filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_28307 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
def crop(data, offsets, crop_shape):
def indexOf(shape, index):
result = []
for dim in reversed(shape):
result.append(index % dim)
index = index / dim
return result[::-1]
result = []
for i, value in enumerate(data.flatten()):
index = indexOf(data.shape, i)
selected = True
if len(index) == len(offsets):
for j, offset in enumerate(offsets):
selected = selected and index[j] >= offset and index[
j] < crop_shape[j] + offset
if selected:
result.append(value)
return np.array(result).reshape(crop_shape)
class TestCropTensorOp(OpTest):
def setUp(self):
self.op_type = "crop_tensor"
self.shape_by_input = False
self.offset_by_input = False
self.unk_dim_idx = -1
self.attrs = {}
self.initTestCase()
if self.shape_by_input:
self.inputs = {
'X': np.random.random(self.x_shape).astype("float64"),
'Shape': np.array(self.crop_shape).astype("int32")
}
else:
self.attrs['shape'] = self.crop_shape
self.inputs = {
'X': np.random.random(self.x_shape).astype("float64"),
}
if self.offset_by_input:
self.inputs['Offsets'] = np.array(self.offsets).astype('int32')
else:
self.attrs['offsets'] = self.offsets
crop_shape = [val for val in self.crop_shape]
for i in range(len(self.crop_shape)):
if self.crop_shape[i] == -1:
crop_shape[i] = self.x_shape[i] - self.offsets[i]
self.outputs = {'Out': crop(self.inputs['X'], self.offsets, crop_shape)}
def initTestCase(self):
self.x_shape = (10, 10)
self.crop_shape = [2, 2]
self.offsets = [1, 2]
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True)
class TestCase1(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (100)
self.crop_shape = [64]
self.offsets = [13]
class TestCase2(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (12, 24)
self.crop_shape = [-1, 8]
self.offsets = [0, 0]
class TestCase3(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (4, 8, 16)
self.crop_shape = [2, 2, 3]
self.offsets = [1, 5, 3]
self.shape_by_input = True
class TestCase4(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (8, 3, 6, 6)
self.crop_shape = [-1, 3, -1, 4]
self.offsets = [0, 0, 1, 0]
self.shape_by_input = True
class TestCase5(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (2, 4, 5, 8, 8)
self.crop_shape = [1, 1, 2, 4, 4]
self.offsets = [1, 0, 0, 2, 2]
self.offset_by_input = True
class TestCase6(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (2, 2, 4, 4, 4, 2)
self.crop_shape = [1, 1, 4, 2, 2, 2]
self.offsets = [0, 0, 0, 0, 0, 0]
self.shape_by_input = True
self.offset_by_input = True
class TestCropTensorOpTensorAttr(OpTest):
def setUp(self):
self.op_type = "crop_tensor"
self.OffsetsTensor = False
self.ShapeTensor = True
self.attrs = {}
self.initTestCase()
if self.ShapeTensor:
shape_tensor = []
for index, ele in enumerate(self.crop_shape):
shape_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs = {
'X': np.random.random(self.x_shape).astype("float64"),
'ShapeTensor': shape_tensor
}
self.attrs['shape'] = self.shape_attr
if self.OffsetsTensor:
offsets_tensor = []
for index, ele in enumerate(self.offsets):
offsets_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs = {
'X': np.random.random(self.x_shape).astype("float64"),
'OffsetsTensor': offsets_tensor
}
self.attrs['offsets'] = self.offsets_attr
self.attrs['shape'] = self.crop_shape
self.attrs['offsets'] = self.offsets
crop_shape = [val for val in self.crop_shape]
for i in range(len(self.crop_shape)):
if self.crop_shape[i] == -1:
crop_shape[i] = self.x_shape[i] - self.offsets[i]
self.outputs = {'Out': crop(self.inputs['X'], self.offsets, crop_shape)}
def initTestCase(self):
self.x_shape = (10, 10)
self.crop_shape = (2, 2)
self.offsets = [1, 2]
self.shape_attr = [0, 0]
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(["X"], "Out", check_eager=True)
class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr):
def initTestCase(self):
self.x_shape = (16, 8, 32)
self.crop_shape = [-1, -1, 3]
self.offsets = [1, 5, 3]
self.shape_attr = [-1, -1, 3]
class TestCropTensorOpTensorAttrCase2(TestCropTensorOpTensorAttr):
def initTestCase(self):
self.x_shape = (4, 8, 16, 8)
self.crop_shape = [2, 2, 3, 4]
self.offsets = [1, 5, 3, 0]
self.shape_attr = [0, 0, 3, 4]
class TestCropTensorOpTensorAttrCase3(TestCropTensorOpTensorAttr):
def initTestCase(self):
self.x_shape = (16, 8, 32)
self.crop_shape = [2, 2, 3]
self.offsets = [1, 5, 3]
self.offsets_attr = [-1, -1, 3]
self.ShapeTensor = False
self.OffsetsTensor = True
class TestCropTensorOpTensorAttrCase4(TestCropTensorOpTensorAttr):
def initTestCase(self):
self.x_shape = (16, 8, 32)
self.crop_shape = [2, 2, 3]
self.shape_attr = [0, 2, 3]
self.offsets = [1, 5, 3]
self.offsets_attr = [-1, -1, 3]
self.OffsetsTensor = True
class TestCropTensorException(unittest.TestCase):
def test_exception(self):
input1 = fluid.data(name="input1", shape=[2, 3, 6, 6], dtype="float32")
input2 = fluid.data(name="input2", shape=[2, 3, 6, 6], dtype="float16")
dim = fluid.data(name='dim', shape=[1], dtype='int32')
offset = fluid.data(name='offset', shape=[1], dtype='int32')
def attr_shape_type():
out = paddle.crop(input1, shape=3)
def attr_shape_dtype():
out = paddle.crop(input1, shape=[2, 2.0, 3, 3])
def attr_shape_value1():
out = paddle.crop(input1, shape=[2, -2, dim, 3])
def attr_shape_value2():
out = paddle.crop(input1, shape=[2, 0, dim, 3])
def attr_offsets_type():
out = paddle.crop(input1, shape=[2, 2, 3, 3], offsets=0)
def attr_offsets_dtype():
out = paddle.crop(
input1, shape=[2, 2, 3, 3], offsets=[0, 1.0, 0, 0])
def attr_offsets_value():
out = paddle.crop(
input1, shape=[2, 2, 3, 3], offsets=[0, -1, offset, 0])
def input_dtype():
out = paddle.crop(input2, shape=[2, 2, 3, 3])
self.assertRaises(TypeError, attr_shape_type)
self.assertRaises(TypeError, attr_shape_dtype)
self.assertRaises(ValueError, attr_shape_value1)
self.assertRaises(ValueError, attr_shape_value2)
self.assertRaises(TypeError, attr_offsets_type)
self.assertRaises(TypeError, attr_offsets_dtype)
self.assertRaises(ValueError, attr_offsets_value)
self.assertRaises(TypeError, input_dtype)
if __name__ == '__main__':
import paddle
paddle.enable_static()
unittest.main()
|
the-stack_106_28311 | #
# Copyright 2019 AXA Group Operations S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
PORT = 8888
class PostHandler(BaseHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
json_data = json.loads(post_data)
new_json_data = process_data(json_data)
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps(new_json_data).encode('utf8'))
def run(server_class=HTTPServer, handler_class=PostHandler, port=PORT):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd on port {}'.format(port))
httpd.serve_forever()
def process_data(data):
# Modify JSON as you want
return data
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
|
the-stack_106_28312 | '''
实验名称:三轴加速度计
版本:v1.0
日期:2019.4
作者:01Studio
说明:通过编程获取其各个方向的数值(X轴、Y轴、Z轴)并在OLED上显示。
'''
import pyb
from machine import Pin,I2C
from ssd1306 import SSD1306_I2C
#初始化相关模块
i2c = I2C(sda=Pin("Y8"), scl=Pin("Y6"))
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c)
accel = pyb.Accel()
while True:
oled.fill(0) #清屏
oled.text('01Studio', 0, 0)
oled.text('Accel test:',0,15)
#获取x,y,z的值并显示
oled.text('X:'+str(accel.x()),0,40)
oled.text('Y:'+str(accel.y()),44,40)
oled.text('Z:'+str(accel.z()),88,40)
oled.show()
pyb.delay(1000) #延时1s
|
the-stack_106_28313 | from zerver.lib.management import ZulipBaseCommand
from corporate.models import Plan, Coupon, Customer
from zproject.settings import get_secret
from typing import Any
import stripe
stripe.api_key = get_secret('stripe_secret_key')
class Command(ZulipBaseCommand):
help = """Script to add the appropriate products and plans to Stripe."""
def handle(self, *args: Any, **options: Any) -> None:
Customer.objects.all().delete()
Plan.objects.all().delete()
Coupon.objects.all().delete()
# Zulip Cloud offerings
product = stripe.Product.create(
name="Zulip Cloud Premium",
type='service',
statement_descriptor="Zulip Cloud Premium",
unit_label="user")
plan = stripe.Plan.create(
currency='usd',
interval='month',
product=product.id,
amount=800,
billing_scheme='per_unit',
nickname=Plan.CLOUD_MONTHLY,
usage_type='licensed')
Plan.objects.create(nickname=Plan.CLOUD_MONTHLY, stripe_plan_id=plan.id)
plan = stripe.Plan.create(
currency='usd',
interval='year',
product=product.id,
amount=8000,
billing_scheme='per_unit',
nickname=Plan.CLOUD_ANNUAL,
usage_type='licensed')
Plan.objects.create(nickname=Plan.CLOUD_ANNUAL, stripe_plan_id=plan.id)
coupon = stripe.Coupon.create(
duration='forever',
name='25% discount',
percent_off=25)
Coupon.objects.create(percent_off=25, stripe_coupon_id=coupon.id)
coupon = stripe.Coupon.create(
duration='forever',
name='85% discount',
percent_off=85)
Coupon.objects.create(percent_off=85, stripe_coupon_id=coupon.id)
|
the-stack_106_28316 | import subprocess
from flask import Flask,request, jsonify
from flask_restful import Api, Resource
from flask_cors import CORS
from webargs.flaskparser import parser, abort
import json
import time
import sys
from waitress import serve
from multiprocessing import Process, Queue
from concurrent.futures import TimeoutError
from pebble import ProcessPool, ProcessExpired
import logging
import functools
import numpy as np
import model_predict
import requests
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
some_queue = None
url_model = 'http://hbqweblog.com/ai/model/watermodel.zip'
url_get = 'http://hbqweblog.com/ai/ai_get_data_by_time.php?'
url_csv = 'http://sovigaz.hbqweblog.com/ai/tdata/'
path_model = 'watermodel.py'
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def download_url(url, save_path, chunk_size=128):
r = requests.get(url, stream=True)
with open(save_path, 'wb') as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
def exists(path):
r = requests.head(path)
return r.status_code == requests.codes.ok
def write_json(data, filename='data.json'):
with open(filename,'w') as f:
json.dump(data, f, indent=4)
APP = Flask(__name__)
API = Api(APP)
CORS(APP)
@APP.route('/sm/restart/', methods=['GET'], endpoint='start_flaskapp')
def restart():
try:
some_queue.put("something")
print("Restarted successfully")
return("Quit")
except:
print("Failed in restart")
return "Failed"
def start_flaskapp(queue):
global some_queue
some_queue = queue
API.add_resource(FractionsResource, "/")
serve(APP, host='0.0.0.0', port=8080)
def long_function():
with ProcessPool(5) as pool:
data = [0, 1, 2, 3, 4]
future = pool.map(functools.partial(add_const, const=1), data, timeout=5)
iterator = future.result()
result=[]
while True:
try:
result.append(next(iterator))
except StopIteration:
break
except TimeoutError as error:
print("function took longer than %d seconds" % error.args[1])
return(result)
def long_function_without_pool():
data = [0, 1, 2, 3, 4]
result = list(map(functools.partial(add_const, const=1), data))
return(result)
def add_const(number, const=0):
time.sleep(5)
return number+const
class FractionsResource(Resource):
@APP.route('/sm',methods=['GET'])
def index():
return "Welcome to AI HBQsolution"
@APP.route('/',methods=['GET'])
def index1():
return "Welcome to AI HBQsolution"
@APP.route('/sm/ai_view',methods=['GET'])
def returnView():
try:
date = request.args.get('date', default = '', type = str)
id_device = request.args.get('ID', default = '', type = str)
if(id_device == '' and len(date) > 0):
return 'Please enter the ID'
data_info_json_file = "data/about_data.json"
with open(data_info_json_file) as json_file:
data = json.load(json_file)
temp = data['infor']
if(date == '' and id_device == ''):
return data
if(len(date) > 0):
data_out = [data_ for data_ in temp if data_['predictdate'] == date][0]
file_json_date = data_out['predictdate']
path_data = "data/"+id_device+"/"+file_json_date+".json"
with open(path_data) as json_file:
data_predict = json.load(json_file)
return jsonify({'output':data_predict})
if(len(id_device) > 0):
data_out = [data_ for data_ in temp if data_['id'] == id_device]
return jsonify({'infor':data_out})
except:
return "System Error 404"
@APP.route('/sm/ai_analyze',methods=['GET'])
def return_ws():
try:
id_device = request.args.get('ID', default = '', type = str)
index = request.args.get('index', default = '', type = str)
training_info_json_file = "model/about_model.json"
with open(training_info_json_file) as json_file:
data = json.load(json_file)
temp = data['infor']
if(id_device == ''):
return data
elif(len(index) > 0):
datas = [data_ for data_ in temp if data_['index'] == index][0]
weightfolder = datas['weightfolder']+"/"+"loss_data.json"
with open(weightfolder) as json_file:
loss_infor = json.load(json_file)
return loss_infor
else:
datas = [data_ for data_ in temp if data_['id'] == id_device]
return jsonify({'infor':datas})
except:
return "System Error 404"
@APP.route('/sm/ai_console',methods=['GET'])
def ai_console():
clear_text = request.args.get('clear', default = 0, type = int)
if(clear_text == 1):
logging.FileHandler(filename="status/predict_status.log", mode='w')
predict_status = open("status/predict_status.log","r")
return "<pre>"+predict_status.read()+"</pre>"
@APP.route('/sm/ai_run',methods=['GET'])
def returnPredict():
predict = request.args.get('run', default = 0, type = int)
id_device = request.args.get('ID', default = '', type = str)
date = request.args.get('date', default = '', type = str)
index = request.args.get('index', default = '', type = str)
logging.info("Request Predict Device:"+id_device)
logging.info("Request Predict Date:"+date)
training_info_json_file = "model/about_model.json"
path_id_device = 'model/'+id_device
check_id_device = os.path.exists(path_id_device)
if(check_id_device == False):
return jsonify({'error':"Device ID "+id_device+" not be trained"})
else:
try:
url_getdata = url_get+'&dev_id='+id_device+'&date='+date
url_getlink = url_csv+id_device+"/"
data_link = url_getlink+"datalink.json"
r = requests.get(data_link)
data_links = r.json()
sampling = data_links["sampling"]
row_datetime_name = data_links["rowname"][0]
row_pressure_name = data_links["rowname"][1]
row_flow_name = data_links["rowname"][2]
row_data = [row_datetime_name,row_pressure_name,row_flow_name]
except:
return jsonify({'error':"Device ID does not exist"})
with open(training_info_json_file) as json_file:
data = json.load(json_file)
temp = data['infor']
datas = [data_ for data_ in temp if data_['index'] == index][0]
lin = datas['lin']
lout = datas['lout']
path_info = datas['weightfolder']+"/"+"loss_data.json"
in_file = open(path_info, "r") # open file loss_data
loss_data = json.load(in_file)
loss_data = json.loads(loss_data)
mean_value = loss_data['mean']
std_value = loss_data['std']
path_w_f = datas['weightfolder']+"/WF.h5"
path_w_p = datas['weightfolder']+"/WP.h5"
parent_dir = "data/"
directory = id_device+"/"
path = os.path.join(parent_dir,directory)
check = os.path.exists(path)
if(check == False):
os.mkdir(path)
path_f = date+".json"
path_save = os.path.join(path,path_f)
name_model_json_file = "model/model_name.json"
with open(name_model_json_file) as json_file:
data = json.load(json_file)
model_name = data["name"]
if(predict == 1):
data_info_json_file = "data/about_data.json"
path_id = "data/"+id_device
path_date_predict = "data/"+id_device+"/"+date+".json"
check_info_json_file = os.path.exists(data_info_json_file)
status = "running"
if(check_info_json_file== False):
data = {'infor':[{'id':id_device,'predictdate':date,'model':model_name,'lin':lin,'lout':lout,'weightfolder':datas['weightfolder'],"status":status}]}
write_json(data,data_info_json_file)
else:
check_id = os.path.exists(path_id)
check_date_predict = os.path.exists(path_date_predict)
if(check_id == False or check_date_predict == False):
with open(data_info_json_file) as json_file:
data = json.load(json_file)
temp = data['infor']
type_file = {'id':id_device,'predictdate':date,'model':model_name,'lin':lin,'lout':lout,'weightfolder':datas['weightfolder'],"status":status}
temp.append(type_file)
write_json(data,data_info_json_file)
if(check_id == True or check_date_predict == True):
with open(data_info_json_file) as json_file:
data = json.load(json_file)
temp = data['infor']
get_status = [data_ for data_ in temp if data_['predictdate'] == date][0]
get_status['status'] = status
get_status['weightfolder'] = datas['weightfolder']
get_status['lin'] = lin
get_status['lout'] = lout
write_json(data,data_info_json_file)
forecast_flow,error_date = model_predict.run_prediction(type_feature=1,row_infor=row_data,
his=lin,target=lout,
path_weight=path_w_f,url_get=url_getdata,
means=mean_value,f_ex=sampling,stds=std_value,mean_std=True)
forecast_p,error_date = model_predict.run_prediction(type_feature=0,row_infor=row_data,
his=lin,target=lout,
path_weight=path_w_p,url_get=url_getdata,
means=mean_value,f_ex=sampling,stds=std_value,mean_std=False)
if(len(error_date) > 0):
with open(data_info_json_file) as json_file:
data = json.load(json_file)
temp = data['infor']
get_status = [data_ for data_ in temp if data_['predictdate'] == date][0]
get_status['status'] = "Error"
write_json(data,data_info_json_file)
return jsonify({'error_date':error_date})
with open(data_info_json_file) as json_file:
data = json.load(json_file)
temp = data['infor']
get_status = [data_ for data_ in temp if data_['predictdate'] == date][0]
get_status['status'] = "done"
write_json(data,data_info_json_file)
data_out = json.dumps({'status':date,'error':error_date,'flow':forecast_flow,'Pressure':forecast_p},cls=NumpyEncoder)
out_file = open(path_save, "w")
json.dump(data_out, out_file, indent = 6)
return "OK"
if __name__ == "__main__":
date_strftime_format = "%d-%b-%y %H:%M:%S"
message_format = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(filename="status/predict_status.log",
format=message_format,datefmt=date_strftime_format,
level=logging.INFO)
q = Queue()
p = Process(target=start_flaskapp, args=(q,))
p.start()
while True: #wathing queue, if there is no call than sleep, otherwise break
if q.empty():
time.sleep(1)
else:
break
p.terminate() #terminate flaskapp and then restart the app on subprocess
args = [sys.executable] + [sys.argv[0]]
subprocess.call(args) |
the-stack_106_28317 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,unused-variable,unused-argument
"""cuda batch_matmul operators"""
import tvm
from tvm import autotvm
from tvm import te
from tvm.contrib import cublas
from tvm.autotvm.task.space import SplitEntity, OtherOptionEntity
from .. import nn, generic
from ..utils import traverse_inline, get_const_tuple, get_max_power2_factor
@autotvm.register_topi_compute("batch_matmul.cuda")
def batch_matmul(cfg, x, y, out_shape=None):
"""Compute conv2d with NCHW layout"""
return nn.batch_matmul(x, y)
@autotvm.register_topi_schedule("batch_matmul.cuda")
def schedule_batch_matmul(cfg, outs):
"""Schedule for batch_matmul
Parameters
----------
outs: Array of Tensor
The computation graph description of batch_matmul
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def _schedule(cfg, op):
C = op.output(0)
A, B = s[C].op.input_tensors
_, M, N = get_const_tuple(C.shape)
AA = s.cache_read(A, "shared", [C])
AL = s.cache_read(AA, "local", [C])
BB = s.cache_read(B, "shared", [C])
BL = s.cache_read(BB, "local", [C])
CC = s.cache_write(C, "local")
if op not in s.outputs:
s[C].compute_inline()
C = s.outputs[0].output(0)
b, y, x = s[C].op.axis
(k,) = s[CC].op.reduce_axis
cfg.define_split("tile_y", y, num_outputs=3)
cfg.define_split("tile_x", x, num_outputs=3)
cfg.define_split("tile_k", k, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [8, 16, 32, 64])
target = tvm.target.Target.current()
if target.kind.name in ["nvptx", "rocm"]:
# llvm-based backends cannot do non-explicit unrolling
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
if cfg.is_fallback:
y_bn = get_max_power2_factor(M, 64)
x_bn = get_max_power2_factor(N, 64)
y_nthreads = min(y_bn, 8)
x_nthreads = min(x_bn, 8)
cfg["tile_x"] = SplitEntity([-1, x_nthreads, x_bn // x_nthreads])
cfg["tile_y"] = SplitEntity([-1, y_nthreads, y_bn // y_nthreads])
cfg["tile_k"] = SplitEntity([-1, 8])
cfg["auto_unroll_max_step"] = OtherOptionEntity(16)
by, ty, yi = cfg["tile_y"].apply(s, C, y)
bx, tx, xi = cfg["tile_x"].apply(s, C, x)
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
s[C].reorder(b, by, bx, ty, tx, yi, xi)
s[C].bind(b, te.thread_axis("blockIdx.z"))
s[C].bind(by, te.thread_axis("blockIdx.y"))
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(ty, thread_y)
s[C].bind(tx, thread_x)
s[C].pragma(yi, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[C].pragma(yi, "unroll_explicit", cfg["unroll_explicit"].val)
s[CC].compute_at(s[C], tx)
_, yi, xi = s[CC].op.axis
ko, ki = cfg["tile_k"].apply(s, CC, k)
s[CC].reorder(ko, ki, yi, xi)
s[CC].pragma(ki, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[CC].pragma(ki, "unroll_explicit", cfg["unroll_explicit"].val)
s[AA].compute_at(s[CC], ko)
s[AL].compute_at(s[CC], ki)
s[BB].compute_at(s[CC], ko)
s[BL].compute_at(s[CC], ki)
_, y, k = s[AA].op.axis
ty, yi = s[AA].split(y, nparts=cfg["tile_y"].size[1])
tx, ki = s[AA].split(k, nparts=cfg["tile_x"].size[1])
s[AA].reorder(ty, tx, yi, ki)
s[AA].bind(ty, thread_y)
s[AA].bind(tx, thread_x)
s[AA].pragma(yi, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[AA].pragma(yi, "unroll_explicit", cfg["unroll_explicit"].val)
_, x, k = s[BB].op.axis
ty, xi = s[BB].split(x, nparts=cfg["tile_y"].size[1])
tx, ki = s[BB].split(k, nparts=cfg["tile_x"].size[1])
s[BB].bind(ty, thread_y)
s[BB].bind(tx, thread_x)
s[BB].reorder(ty, tx, xi, ki)
s[BB].pragma(xi, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
s[BB].pragma(xi, "unroll_explicit", cfg["unroll_explicit"].val)
def _callback(op):
if "batch_matmul" in op.tag:
_schedule(cfg, op)
traverse_inline(s, outs[0].op, _callback)
return s
@autotvm.register_topi_compute("batch_matmul_cublas.cuda")
def batch_matmul_cublas(cfg, x, y, out_shape=None):
"""Computes batch matrix multiplication of `x` and `y` when `x` and `y` are
data in batch.
Parameters
----------
x : tvm.te.Tensor
3-D with shape [batch, M, K]
y : tvm.te.Tensor
3-D with shape [batch, N, K]
out_shape : None
The output shape
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
b, m, k = get_const_tuple(x.shape)
b, n, k = get_const_tuple(y.shape)
if all([isinstance(s, int) for s in [b, m, n, k]]):
cfg.add_flop(b * m * k * n * 2)
return cublas.batch_matmul(x, y, False, True)
@autotvm.register_topi_schedule("batch_matmul_cublas.cuda")
def schedule_batch_matmul_cublas(_, outs):
"""Schedule batch_matmul operator using CUBLAS"""
return generic.schedule_extern(outs)
|
the-stack_106_28319 | # -*- coding: utf-8 -*-
import numpy as np
from matplotlib import pyplot as plt
from datetime import datetime
from matplotlib import dates
import seawater as sw
def plts(date,y):
"""
Plot a multi-year time series y as a function of time of year (rather than absolute time). The time series from each separate year is plotted in a different color, with months on the x-axis. Useful for visualizing seasonal patterns.
Inputs:
date - a list of datetime objects
y - a numpy array
"""
sdate = []
sy = []
ii = 0
for nn,t in enumerate(date):
sdate.append(datetime(1980,t.month,t.day,t.hour,t.minute,t.second,t.microsecond))
sy.append(y[nn])
# make sure this is not the last index in the date list
# and check whether year is about to change
if nn<len(date)-2:
if date[nn].year!=date[nn+1].year:
plt.plot(sdate,sy)
sdate=[]
sy=[]
ii=ii+1
# Set major x ticks on months
ax = plt.gca()
ax.xaxis.set_major_locator(dates.MonthLocator())
ax.xaxis.set_major_formatter(dates.DateFormatter('%b'))
def TS_contours(SP_range,T_range,sigma_levels,**kwargs):
'''
Plot contours of density anomaly (sigma) on a T-S plot. Uses EOS-80 equation of state. If the T_range input is in-situ T, then the sigma-t values are contoured. If the temperature input is potential temperature (theta), then the sigma-theta values are contoured (see Stewart 2005 for definitions).
INPUTS:
SP_range: Practical salinity, minimum and maximum values
T_range: In-situ or potential temperature [C], minimum and maximum values
sigma_levels: density anomaly values to contour
**kwargs: these will be passed to the contour function, see http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.contour
RETURNS:
cs: the matplotlib.contour.QuadContourSet object returned by contour (can
be used as input to pyplot.clabel function, for example).
REQUIRES:
Seawater toolbox: https://pypi.python.org/pypi/seawater
Reference:
Stewart, R. H. (2005) Introduction to Physical Oceanography. http://oceanworld.tamu.edu/resources/ocng_textbook/chapter06/chapter06_05.htm
'''
smin = SP_range[0]
smax = SP_range[1]
tmin = T_range[0]
tmax = T_range[1]
sgrid = np.linspace(smin,smax,101)
tgrid = np.linspace(tmin,tmax,101)
sigma = np.nan*np.zeros((len(tgrid),len(sgrid)))
# Loop to fill in grid with densities
for i,s in enumerate(sgrid):
for j,t in enumerate(tgrid):
sigma[j,i]=sw.dens(s,t,0)-1000 # sigma-t
# contour and return contour object
cs = plt.contour(sgrid,tgrid,sigma,sigma_levels,**kwargs)
return cs
if __name__ == '__main__':
# demonstrate T_S_contours function
plt.figure()
cs = TS_contours([30,34],[10,20],np.arange(20,26,0.6),colors='k')
plt.clabel(cs,fmt='%1.1f',fontsize=10)
plt.show() |
the-stack_106_28320 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Pipeline for preprocessing the VCF files.
This pipeline is aimed to help the user to easily identify and further import
the malformed/incompatible VCF files to BigQuery. It generates two files as the
output:
- Report: A file that lists the conflicting headers, undefined header fields,
the suggested resolutions and malformed records.
- Resolved headers file: A VCF file that contains the resolved fields
definitions.
The report is generated in the ``report_path``, while the resolved headers file
is generated in ``resolved_headers_path`` if provided.
Run locally:
python -m gcp_variant_transforms.vcf_to_bq_preprocess \
--input_pattern <path to VCF file(s)> \
--report_path <local path to the report file> \
--resolved_headers_path <local path to the resolved headers file> \
--report_all_conflicts True
Run on Dataflow:
python -m gcp_variant_transforms.vcf_to_bq_preprocess \
--input_pattern <path to VCF file(s)>
--report_path <cloud path to the report file> \
--resolved_headers_path <cloud path to the resolved headers file> \
--report_all_conflicts True \
--project gcp-variant-transforms-test \
--job_name preprocess \
--staging_location "gs://integration_test_runs/staging" \
--temp_location "gs://integration_test_runs/temp" \
--runner DataflowRunner \
--setup_file ./setup.py
"""
from __future__ import absolute_import
import logging
import sys
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.options import pipeline_options
from gcp_variant_transforms import vcf_to_bq_common
from gcp_variant_transforms.beam_io import vcfio
from gcp_variant_transforms.libs import preprocess_reporter
from gcp_variant_transforms.options import variant_transform_options
from gcp_variant_transforms.transforms import filter_variants
from gcp_variant_transforms.transforms import infer_headers
from gcp_variant_transforms.transforms import merge_headers
from gcp_variant_transforms.transforms import merge_header_definitions
_COMMAND_LINE_OPTIONS = [variant_transform_options.PreprocessOptions]
def _get_inferred_headers(variants, # type: pvalue.PCollection
merged_header # type: pvalue.PCollection
):
# type: (...) -> (pvalue.PCollection, pvalue.PCollection)
inferred_headers = (variants
| 'FilterVariants' >> filter_variants.FilterVariants()
| ' InferHeaderFields' >>
infer_headers.InferHeaderFields(
pvalue.AsSingleton(merged_header),
allow_incompatible_records=True))
merged_header = (
(inferred_headers, merged_header)
| beam.Flatten()
| 'MergeHeadersFromVcfAndVariants' >> merge_headers.MergeHeaders(
allow_incompatible_records=True))
return inferred_headers, merged_header
def run(argv=None):
# type: (List[str]) -> (str, str)
"""Runs preprocess pipeline."""
logging.info('Command: %s', ' '.join(argv or sys.argv))
known_args, pipeline_args = vcf_to_bq_common.parse_args(argv,
_COMMAND_LINE_OPTIONS)
options = pipeline_options.PipelineOptions(pipeline_args)
pipeline_mode = vcf_to_bq_common.get_pipeline_mode(known_args.input_pattern)
with beam.Pipeline(options=options) as p:
headers = vcf_to_bq_common.read_headers(p, pipeline_mode, known_args)
merged_headers = vcf_to_bq_common.get_merged_headers(headers)
merged_definitions = (headers
| 'MergeDefinitions' >>
merge_header_definitions.MergeDefinitions())
if known_args.report_all_conflicts:
variants = p | 'ReadFromVcf' >> vcfio.ReadFromVcf(
known_args.input_pattern, allow_malformed_records=True)
malformed_records = variants | filter_variants.ExtractMalformedVariants()
inferred_headers, merged_headers = (_get_inferred_headers(variants,
merged_headers))
_ = (merged_definitions
| 'GenerateConflictsReport' >>
beam.ParDo(preprocess_reporter.generate_report,
known_args.report_path,
beam.pvalue.AsSingleton(merged_headers),
beam.pvalue.AsSingleton(inferred_headers),
beam.pvalue.AsIter(malformed_records)))
else:
_ = (merged_definitions
| 'GenerateConflictsReport' >>
beam.ParDo(preprocess_reporter.generate_report,
known_args.report_path,
beam.pvalue.AsSingleton(merged_headers)))
if known_args.resolved_headers_path:
vcf_to_bq_common.write_headers(merged_headers,
known_args.resolved_headers_path)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
the-stack_106_28321 | import numpy as np
import tensorflow as tf
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import tf_util
# -----------------
# Global Constants
# -----------------
NUM_HEADING_BIN = 12
NUM_SIZE_CLUSTER = 8 # one cluster for each type
NUM_OBJECT_POINT = 512
g_type2class={'Car':0, 'Van':1, 'Truck':2, 'Pedestrian':3,
'Person_sitting':4, 'Cyclist':5, 'Tram':6, 'Misc':7}
g_class2type = {g_type2class[t]:t for t in g_type2class}
g_type2onehotclass = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
g_type_mean_size = {'Car': np.array([3.88311640418,1.62856739989,1.52563191462]),
'Van': np.array([5.06763659,1.9007158,2.20532825]),
'Truck': np.array([10.13586957,2.58549199,3.2520595]),
'Pedestrian': np.array([0.84422524,0.66068622,1.76255119]),
'Person_sitting': np.array([0.80057803,0.5983815,1.27450867]),
'Cyclist': np.array([1.76282397,0.59706367,1.73698127]),
'Tram': np.array([16.17150617,2.53246914,3.53079012]),
'Misc': np.array([3.64300781,1.54298177,1.92320313])}
g_mean_size_arr = np.zeros((NUM_SIZE_CLUSTER, 3)) # size clustrs
for i in range(NUM_SIZE_CLUSTER):
g_mean_size_arr[i,:] = g_type_mean_size[g_class2type[i]]
# -----------------
# TF Functions Helpers
# -----------------
def tf_gather_object_pc(point_cloud, mask, npoints=512):
''' Gather object point clouds according to predicted masks.
Input:
point_cloud: TF tensor in shape (B,N,C)
mask: TF tensor in shape (B,N) of 0 (not pick) or 1 (pick)
npoints: int scalar, maximum number of points to keep (default: 512)
Output:
object_pc: TF tensor in shape (B,npoint,C)
indices: TF int tensor in shape (B,npoint,2)
'''
def mask_to_indices(mask):
indices = np.zeros((mask.shape[0], npoints, 2), dtype=np.int32)
for i in range(mask.shape[0]):
pos_indices = np.where(mask[i,:]>0.5)[0]
# skip cases when pos_indices is empty
if len(pos_indices) > 0:
if len(pos_indices) > npoints:
choice = np.random.choice(len(pos_indices),
npoints, replace=False)
else:
choice = np.random.choice(len(pos_indices),
npoints-len(pos_indices), replace=True)
choice = np.concatenate((np.arange(len(pos_indices)), choice))
np.random.shuffle(choice)
indices[i,:,1] = pos_indices[choice]
indices[i,:,0] = i
return indices
indices = tf.py_func(mask_to_indices, [mask], tf.int32)
object_pc = tf.gather_nd(point_cloud, indices)
return object_pc, indices
def get_box3d_corners_helper(centers, headings, sizes):
""" TF layer. Input: (N,3), (N,), (N,3), Output: (N,8,3) """
#print '-----', centers
N = centers.get_shape()[0].value
l = tf.slice(sizes, [0,0], [-1,1]) # (N,1)
w = tf.slice(sizes, [0,1], [-1,1]) # (N,1)
h = tf.slice(sizes, [0,2], [-1,1]) # (N,1)
#print l,w,h
x_corners = tf.concat([l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2], axis=1) # (N,8)
y_corners = tf.concat([h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2], axis=1) # (N,8)
z_corners = tf.concat([w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2], axis=1) # (N,8)
corners = tf.concat([tf.expand_dims(x_corners,1), tf.expand_dims(y_corners,1), tf.expand_dims(z_corners,1)], axis=1) # (N,3,8)
#print x_corners, y_corners, z_corners
c = tf.cos(headings)
s = tf.sin(headings)
ones = tf.ones([N], dtype=tf.float32)
zeros = tf.zeros([N], dtype=tf.float32)
row1 = tf.stack([c,zeros,s], axis=1) # (N,3)
row2 = tf.stack([zeros,ones,zeros], axis=1)
row3 = tf.stack([-s,zeros,c], axis=1)
R = tf.concat([tf.expand_dims(row1,1), tf.expand_dims(row2,1), tf.expand_dims(row3,1)], axis=1) # (N,3,3)
#print row1, row2, row3, R, N
corners_3d = tf.matmul(R, corners) # (N,3,8)
corners_3d += tf.tile(tf.expand_dims(centers,2), [1,1,8]) # (N,3,8)
corners_3d = tf.transpose(corners_3d, perm=[0,2,1]) # (N,8,3)
return corners_3d
def get_box3d_corners(center, heading_residuals, size_residuals):
""" TF layer.
Inputs:
center: (B,3)
heading_residuals: (B,NH)
size_residuals: (B,NS,3)
Outputs:
box3d_corners: (B,NH,NS,8,3) tensor
"""
batch_size = center.get_shape()[0].value
heading_bin_centers = tf.constant(np.arange(0,2*np.pi,2*np.pi/NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
headings = heading_residuals + tf.expand_dims(heading_bin_centers, 0) # (B,NH)
mean_sizes = tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0) + size_residuals # (B,NS,1)
sizes = mean_sizes + size_residuals # (B,NS,3)
sizes = tf.tile(tf.expand_dims(sizes,1), [1,NUM_HEADING_BIN,1,1]) # (B,NH,NS,3)
headings = tf.tile(tf.expand_dims(headings,-1), [1,1,NUM_SIZE_CLUSTER]) # (B,NH,NS)
centers = tf.tile(tf.expand_dims(tf.expand_dims(center,1),1), [1,NUM_HEADING_BIN, NUM_SIZE_CLUSTER,1]) # (B,NH,NS,3)
N = batch_size*NUM_HEADING_BIN*NUM_SIZE_CLUSTER
corners_3d = get_box3d_corners_helper(tf.reshape(centers, [N,3]), tf.reshape(headings, [N]), tf.reshape(sizes, [N,3]))
return tf.reshape(corners_3d, [batch_size, NUM_HEADING_BIN, NUM_SIZE_CLUSTER, 8, 3])
def huber_loss(error, delta):
abs_error = tf.abs(error)
quadratic = tf.minimum(abs_error, delta)
linear = (abs_error - quadratic)
losses = 0.5 * quadratic**2 + delta * linear
return tf.reduce_mean(losses)
def parse_output_to_tensors(output, end_points):
''' Parse batch output to separate tensors (added to end_points)
Input:
output: TF tensor in shape (B,3+2*NUM_HEADING_BIN+4*NUM_SIZE_CLUSTER)
end_points: dict
Output:
end_points: dict (updated)
'''
batch_size = output.get_shape()[0].value
center = tf.slice(output, [0,0], [-1,3])
end_points['center_boxnet'] = center
heading_scores = tf.slice(output, [0,3], [-1,NUM_HEADING_BIN])
heading_residuals_normalized = tf.slice(output, [0,3+NUM_HEADING_BIN],
[-1,NUM_HEADING_BIN])
end_points['heading_scores'] = heading_scores # BxNUM_HEADING_BIN
end_points['heading_residuals_normalized'] = \
heading_residuals_normalized # BxNUM_HEADING_BIN (-1 to 1)
end_points['heading_residuals'] = \
heading_residuals_normalized * (np.pi/NUM_HEADING_BIN) # BxNUM_HEADING_BIN
size_scores = tf.slice(output, [0,3+NUM_HEADING_BIN*2],
[-1,NUM_SIZE_CLUSTER]) # BxNUM_SIZE_CLUSTER
size_residuals_normalized = tf.slice(output,
[0,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER], [-1,NUM_SIZE_CLUSTER*3])
size_residuals_normalized = tf.reshape(size_residuals_normalized,
[batch_size, NUM_SIZE_CLUSTER, 3]) # BxNUM_SIZE_CLUSTERx3
end_points['size_scores'] = size_scores
end_points['size_residuals_normalized'] = size_residuals_normalized
end_points['size_residuals'] = size_residuals_normalized * \
tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0)
return end_points
# --------------------------------------
# Shared subgraphs for v1 and v2 models
# --------------------------------------
def placeholder_inputs(batch_size, num_point):
''' Get useful placeholder tensors.
Input:
batch_size: scalar int
num_point: scalar int
Output:
TF placeholders for inputs and ground truths
'''
#pointclouds_pl = tf.placeholder(tf.float32,
# shape=(batch_size, num_point, 4))
pointclouds_pl = tf.placeholder(tf.float32,
shape=(batch_size, num_point, 4))
#pointclouds_pl_no_intensity= tf.placeholder(tf.float32,
# shape=(batch_size, num_point, 3)) # only for xyz coordinates
one_hot_vec_pl = tf.placeholder(tf.float32, shape=(batch_size, 3))
# labels_pl is for segmentation label
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
centers_pl = tf.placeholder(tf.float32, shape=(batch_size, 3))
heading_class_label_pl = tf.placeholder(tf.int32, shape=(batch_size,))
heading_residual_label_pl = tf.placeholder(tf.float32, shape=(batch_size,))
size_class_label_pl = tf.placeholder(tf.int32, shape=(batch_size,))
size_residual_label_pl = tf.placeholder(tf.float32, shape=(batch_size,3))
return pointclouds_pl, one_hot_vec_pl, labels_pl, centers_pl, \
heading_class_label_pl, heading_residual_label_pl, \
size_class_label_pl, size_residual_label_pl
def point_cloud_masking(point_cloud, logits, end_points, xyz_only=True):
''' Select point cloud with predicted 3D mask,
translate coordinates to the masked points centroid.
Input:
point_cloud: TF tensor in shape (B,N,C)
logits: TF tensor in shape (B,N,2)
end_points: dict
xyz_only: boolean, if True only return XYZ channels
Output:
object_point_cloud: TF tensor in shape (B,M,3)
for simplicity we only keep XYZ here
M = NUM_OBJECT_POINT as a hyper-parameter
mask_xyz_mean: TF tensor in shape (B,3)
'''
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
mask = tf.slice(logits,[0,0,0],[-1,-1,1]) < \
tf.slice(logits,[0,0,1],[-1,-1,1])
mask = tf.to_float(mask) # BxNx1
mask_count = tf.tile(tf.reduce_sum(mask,axis=1,keep_dims=True),
[1,1,3]) # Bx1x3
point_cloud_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3]) # BxNx3
mask_xyz_mean = tf.reduce_sum(tf.tile(mask, [1,1,3])*point_cloud_xyz,
axis=1, keep_dims=True) # Bx1x3
mask = tf.squeeze(mask, axis=[2]) # BxN
end_points['mask'] = mask
mask_xyz_mean = mask_xyz_mean/tf.maximum(mask_count,1) # Bx1x3
# Translate to masked points' centroid
point_cloud_xyz_stage1 = point_cloud_xyz - \
tf.tile(mask_xyz_mean, [1,num_point,1])
if xyz_only:
point_cloud_stage1 = point_cloud_xyz_stage1
else:
point_cloud_features = tf.slice(point_cloud, [0,0,3], [-1,-1,-1])
point_cloud_stage1 = tf.concat(\
[point_cloud_xyz_stage1, point_cloud_features], axis=-1)
num_channels = point_cloud_stage1.get_shape()[2].value
object_point_cloud, _ = tf_gather_object_pc(point_cloud_stage1,
mask, NUM_OBJECT_POINT)
object_point_cloud.set_shape([batch_size, NUM_OBJECT_POINT, num_channels])
return object_point_cloud, tf.squeeze(mask_xyz_mean, axis=1), end_points
def get_center_regression_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' Regression network for center delta. a.k.a. T-Net.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in 3D mask coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
predicted_center: TF tensor in shape (B,3)
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg1-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg2-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg3-stage1', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool-stage1')
net = tf.squeeze(net, axis=[1,2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
predicted_center = tf_util.fully_connected(net, 3, activation_fn=None,
scope='fc3-stage1')
return predicted_center, end_points
def get_loss(mask_label, center_label, \
heading_class_label, heading_residual_label, \
size_class_label, size_residual_label, \
end_points, \
corner_loss_weight=10.0, \
box_loss_weight=1.0):
''' Loss functions for 3D object detection.
Input:
mask_label: TF int32 tensor in shape (B,N)
center_label: TF tensor in shape (B,3)
heading_class_label: TF int32 tensor in shape (B,)
heading_residual_label: TF tensor in shape (B,)
size_class_label: TF tensor int32 in shape (B,)
size_residual_label: TF tensor tensor in shape (B,)
end_points: dict, outputs from our model
corner_loss_weight: float scalar
box_loss_weight: float scalar
Output:
total_loss: TF scalar tensor
the total_loss is also added to the losses collection
'''
# 3D Segmentation loss
mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\
logits=end_points['mask_logits'], labels=mask_label))
tf.summary.scalar('3d mask loss', mask_loss)
# Center regression losses
center_dist = tf.norm(center_label - end_points['center'], axis=-1)
center_loss = huber_loss(center_dist, delta=2.0)
tf.summary.scalar('center loss', center_loss)
stage1_center_dist = tf.norm(center_label - \
end_points['stage1_center'], axis=-1)
stage1_center_loss = huber_loss(stage1_center_dist, delta=1.0)
tf.summary.scalar('stage1 center loss', stage1_center_loss)
# Heading loss
heading_class_loss = tf.reduce_mean( \
tf.nn.sparse_softmax_cross_entropy_with_logits( \
logits=end_points['heading_scores'], labels=heading_class_label))
tf.summary.scalar('heading class loss', heading_class_loss)
hcls_onehot = tf.one_hot(heading_class_label,
depth=NUM_HEADING_BIN,
on_value=1, off_value=0, axis=-1) # BxNUM_HEADING_BIN
heading_residual_normalized_label = \
heading_residual_label / (np.pi/NUM_HEADING_BIN)
heading_residual_normalized_loss = huber_loss(tf.reduce_sum( \
end_points['heading_residuals_normalized']*tf.to_float(hcls_onehot), axis=1) - \
heading_residual_normalized_label, delta=1.0)
tf.summary.scalar('heading residual normalized loss',
heading_residual_normalized_loss)
# Size loss
size_class_loss = tf.reduce_mean( \
tf.nn.sparse_softmax_cross_entropy_with_logits( \
logits=end_points['size_scores'], labels=size_class_label))
tf.summary.scalar('size class loss', size_class_loss)
scls_onehot = tf.one_hot(size_class_label,
depth=NUM_SIZE_CLUSTER,
on_value=1, off_value=0, axis=-1) # BxNUM_SIZE_CLUSTER
scls_onehot_tiled = tf.tile(tf.expand_dims( \
tf.to_float(scls_onehot), -1), [1,1,3]) # BxNUM_SIZE_CLUSTERx3
predicted_size_residual_normalized = tf.reduce_sum( \
end_points['size_residuals_normalized']*scls_onehot_tiled, axis=[1]) # Bx3
mean_size_arr_expand = tf.expand_dims( \
tf.constant(g_mean_size_arr, dtype=tf.float32),0) # 1xNUM_SIZE_CLUSTERx3
mean_size_label = tf.reduce_sum( \
scls_onehot_tiled * mean_size_arr_expand, axis=[1]) # Bx3
size_residual_label_normalized = size_residual_label / mean_size_label
size_normalized_dist = tf.norm( \
size_residual_label_normalized - predicted_size_residual_normalized,
axis=-1)
size_residual_normalized_loss = huber_loss(size_normalized_dist, delta=1.0)
tf.summary.scalar('size residual normalized loss',
size_residual_normalized_loss)
# Corner loss
# We select the predicted corners corresponding to the
# GT heading bin and size cluster.
corners_3d = get_box3d_corners(end_points['center'],
end_points['heading_residuals'],
end_points['size_residuals']) # (B,NH,NS,8,3)
gt_mask = tf.tile(tf.expand_dims(hcls_onehot, 2), [1,1,NUM_SIZE_CLUSTER]) * \
tf.tile(tf.expand_dims(scls_onehot,1), [1,NUM_HEADING_BIN,1]) # (B,NH,NS)
corners_3d_pred = tf.reduce_sum( \
tf.to_float(tf.expand_dims(tf.expand_dims(gt_mask,-1),-1)) * corners_3d,
axis=[1,2]) # (B,8,3)
heading_bin_centers = tf.constant( \
np.arange(0,2*np.pi,2*np.pi/NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
heading_label = tf.expand_dims(heading_residual_label,1) + \
tf.expand_dims(heading_bin_centers, 0) # (B,NH)
heading_label = tf.reduce_sum(tf.to_float(hcls_onehot)*heading_label, 1)
mean_sizes = tf.expand_dims( \
tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # (1,NS,3)
size_label = mean_sizes + \
tf.expand_dims(size_residual_label, 1) # (1,NS,3) + (B,1,3) = (B,NS,3)
size_label = tf.reduce_sum( \
tf.expand_dims(tf.to_float(scls_onehot),-1)*size_label, axis=[1]) # (B,3)
corners_3d_gt = get_box3d_corners_helper( \
center_label, heading_label, size_label) # (B,8,3)
corners_3d_gt_flip = get_box3d_corners_helper( \
center_label, heading_label+np.pi, size_label) # (B,8,3)
corners_dist = tf.minimum(tf.norm(corners_3d_pred - corners_3d_gt, axis=-1),
tf.norm(corners_3d_pred - corners_3d_gt_flip, axis=-1))
corners_loss = huber_loss(corners_dist, delta=1.0)
tf.summary.scalar('corners loss', corners_loss)
# Weighted sum of all losses
total_loss = mask_loss + box_loss_weight * (center_loss + \
heading_class_loss + size_class_loss + \
heading_residual_normalized_loss*20 + \
size_residual_normalized_loss*20 + \
stage1_center_loss + \
corner_loss_weight*corners_loss)
tf.add_to_collection('losses', total_loss)
return total_loss
|
the-stack_106_28324 | """Base classes and utilities for wiring."""
from typing import Dict, List, Tuple, TypeVar, Callable
wire_colors = {
12: "YE",
5: "RD",
48: "BU",
0: "BK",
-2: "WH",
1: "WH",
-1: "PK"
}
PinSpecs = List[Tuple[str, int]]
class Board:
"""A board with connectors. This is not an element in wireviz, but useful to track."""
def __init__(self, board_type: str):
"""Create a board with the given type."""
# Note: board_type is used because "type" is reserved.
self.connectors: Dict[str, Connector] = dict()
self.board_type = board_type
self.ordinal = 0
def add_connector(self, name: str, pinspecs: PinSpecs):
"""Add a new connector to this board, with the listed set of pins."""
new_connector = Connector(name, pinspecs, self)
self.connectors[name] = new_connector
def __repr__(self):
"""Output simple string representation of board for debugging."""
s = "Board " + self.board_type + ":\n"
for c in self.connectors.values():
s = s + str(c) + "\n"
return s
def __getitem__(self, key):
"""Allow indexing to be used to access connectors dictionary."""
return self.connectors[key]
def dump(self) -> dict:
"""Output this board in a form suitable for passing to yaml output."""
# Boards are not separate entities in wireviz, so just combine all connectors
o = dict()
for c in self.connectors.values():
o.update(c.dump())
return o
class Connector:
"""A single connector on a board."""
def __init__(self, name: str, pinspecs: PinSpecs, board: Board):
"""Create a connector with the given name and pin specifications on the given board.
Easier to use board.add_connector().
"""
self.name = name
self.pins: List[Pin] = []
self.board = board
for od, (pin_name, vc) in enumerate(pinspecs):
self.pins.append(Pin(pin_name, vc, self, od))
def __repr__(self):
"""Output simple string representation of connector for debugging."""
s = self.name + ":\n"
for pi, p in enumerate(self.pins):
s = s + str(pi) + ":" + str(p) + "\n"
return s
def __getitem__(self, key):
"""Allow indexing to be used to access pins."""
return self.pins[key]
def get_display_name(self) -> str:
"""Get the name of this connector to be shown in wireviz."""
if self.name != "":
return self.board.board_type + " " + str(self.board.ordinal) + " " + self.name
return self.board.board_type
def dump(self) -> dict:
"""Output this connector in a form suitable for passing ot yaml output."""
display_name = self.get_display_name()
pin_labels: List[str] = []
for p in self.pins:
pin_labels.append(p.name)
return {display_name: {"pincount": len(self.pins), "pinlabels": pin_labels}}
class Pin:
"""A pin on a connector.
In the case of a connector such as USB or RJ45, where the pin assignments
aren't relevant, may represent the whole connector.
"""
def __init__(self, name: str, vclass: int, connector: Connector, ordinal: int):
"""Initialize a pin with given name and voltage class, on the given board.
This should probably be done through addConnector() since the pin's position is needed.
"""
self.name = name
self.vclass = vclass
self.connector = connector
self.ordinal = ordinal
def __repr__(self):
"""Return simple string debug output for pin."""
return self.name + " " + str(self.vclass)
class Wire:
"""Represents a wire connecting two pins.
In reality electrical wires don't have a 'direction', but wireviz puts sources on the
left and destinations on the right, so we track them.
"""
def __init__(self, src: Pin, dest: Pin):
"""Initialize a wire connecting the two specified pins.
This isn't a lot of use outside of a System, so you probably want to use System.connect().
"""
if src.vclass != dest.vclass:
print("Connecting pins of different vclasses!")
print(src, dest)
self.src = src
self.dest = dest
def __eq__(self, other):
"""Wires that connect the same pins are equal, regardless of order."""
if other is not Wire:
return False
return self.connects_pins(other.src, other.dest)
def connects_pins(self, a: Pin, b: Pin) -> bool:
"""Return if the wire connects these two pins (in either direction)."""
if self.src == a and self.dest == b:
return True
if self.src == b and self.dest == a:
return True
return False
def connects_connectors(self, a: Connector, b: Connector) -> bool:
"""Return if the wire connects these two connectors (in either direction and any pin)."""
if self.src.connector == a and self.dest.connector == b:
return True
if self.src.connector == b and self.dest.connector == a:
return True
return False
class SerialLED(Board):
"""A basic serial LED."""
def __init__(self, name):
"""Initialize the LED with the WS2812 pinout."""
super().__init__("WS2812 " + name)
self.add_connector("", [
("DOUT", 5),
("DIN", 5),
("VCC", 5),
("NC", -1),
("VDD", 5),
("GND", 0)])
class Switch(Board):
"""A basic switch."""
def __init__(self, name):
"""Initialize the switch with + and - sides."""
super().__init__("SW " + name)
self.add_connector("", [
("+", 5),
("-", 5)])
class Coil(Board):
"""A basic coil/driver."""
def __init__(self, name):
"""Initialize the coil with + and - sides.
TODO: Flipper coils may have multiple negative sides for hold and flip.
"""
super().__init__("DR " + name)
self.add_connector("", [
("+", 48),
("-", 48)])
# pylint: disable=invalid-name
T = TypeVar("T")
class System:
"""A system of wires, boards and connectors."""
def __init__(self):
"""Initialize an empty system."""
self.boards: List[Board] = []
self.wires: List[Wire] = []
def add_board(self, board: Board):
"""Add a board to the system."""
# Set ordinal based on number of existing boards of same type.
o = sum([1 for b in self.boards if b.board_type == board.board_type])
board.ordinal = o
self.boards.append(board)
def connect(self, src: Pin, dest: Pin):
"""Add a wire between two pins."""
assert src.connector.board in self.boards
assert dest.connector.board in self.boards
nw = Wire(src, dest)
if nw not in self.wires:
self.wires.append(nw)
def daisy_chain_list(self, items: List[T], get_in: Callable[[T], Pin], get_out: Callable[[T], Pin]):
"""Daisy chains connections between arbitrary items that can calculate pins.
:param items The list of items, of any type.
:param get_in Function to apply to an item to get the input pin.
:param get_out Function to apply to an item to get the output pin.
"""
if len(items) < 2:
return
for index in range(1, len(items)):
self.connect(get_out(items[index - 1]), get_in(items[index]))
# pylint: disable=too-many-arguments
def daisy_chain_dict(self, items: Dict[int, T], get_in: Callable[[T], Pin], get_out: Callable[[T], Pin],
start: int, ladder: bool) -> bool:
"""Like daisy_chain_list but takes a dict and checks it for sequentiality in the process of daisy chaining.
Used for directly daisy chaining elements with specified numbers.
:param items The dictionary from numbers to items, of any type.
:param get_in Function to apply to an item to get the input pin.
:param get_out Function to apply to an item to get the output pin.
:param start Number to start accessing the dictionary from.
:param ladder If true, alternate chain connections are flipped to create a vertical ladder in wireviz.
:return True if all items in the dictionary were sequentially numbered. If not, the chain stops
at the first gap.
"""
if len(items) < 2:
return True
if start not in items:
return False
even = False
for index in range(start + 1, start + len(items)):
if index not in items:
return False
if even or not ladder:
self.connect(get_out(items[index - 1]), get_in(items[index]))
else:
self.connect(get_in(items[index]), get_out(items[index - 1]))
even = not even
return True
# pylint: disable=too-many-locals
def dump(self) -> dict:
"""Output this system in a format suitable for yaml output."""
# Output all connectors
connectors_dict = dict() # Connectors dictionary for YAML
for board in self.boards:
connectors_dict.update(board.dump())
# Calculate list of all pairs of connectors (NB not pins) connected by wires
pairs: List[Tuple[Connector, Connector]] = []
for wire in self.wires:
if (wire.src.connector, wire.dest.connector) not in pairs and \
(wire.dest.connector, wire.src.connector) not in pairs:
pairs.append((wire.src.connector, wire.dest.connector))
wire_dict = dict() # Wires dictionary for YAML
connection_list: list = [] # Connections list for YAML
wire_ordinal = 0 # Serial number for next wire
for (srcc, destc) in pairs:
# Find all wires that connect each pair
wires_this_pair: List[Wire] = [wire for wire in self.wires if wire.connects_connectors(srcc, destc)]
# Connect them into a single multi-thread "wire" for wireviz
# Calculate name for the wire based on current ordinal
compound_wire_name = "W" + str(wire_ordinal)
wire_ordinal += 1
# Wireviz requires three lists per wire set, matched in order: source pins, wire thread numbers,
# destination pins
src_pin_list = []
wire_list = []
dest_pin_list = []
# Wire colours for wire specifier
color_list = []
for x, awire in enumerate(wires_this_pair):
src_pin_list.append(awire.src.ordinal + 1)
wire_list.append(x + 1)
dest_pin_list.append(awire.dest.ordinal + 1)
color_list.append(wire_colors[awire.src.vclass])
# Weird wireviz format for the cables block: a list of single entry dictionaries of lists
connection_dict = [{srcc.get_display_name(): src_pin_list}, {compound_wire_name: wire_list},
{destc.get_display_name(): dest_pin_list}]
connection_list.append(connection_dict)
# Add entry to wires block
wire_dict.update({compound_wire_name: {"wirecount": len(wires_this_pair), "colors": color_list}})
return {"connectors": connectors_dict, "cables": wire_dict, "connections": connection_list}
|
the-stack_106_28325 | # File: S (Python 2.4)
from pandac.PandaModules import *
from direct.showbase.DirectObject import *
from direct.interval.IntervalGlobal import *
from direct.actor import Actor
from pirates.piratesbase import PiratesGlobals
from PooledEffect import PooledEffect
from EffectController import EffectController
import random
class SkeletonGlow(PooledEffect, EffectController):
def __init__(self, billboardOffset = 0.0):
PooledEffect.__init__(self)
EffectController.__init__(self)
self.setColorScaleOff()
self.setBillboardPointEye(billboardOffset)
self.glowColor = Vec4(0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1)
self.pulseRate = 1.0
self.glow = loader.loadModel('models/effects/skeletonHeart')
self.glow.setFogOff()
self.glow.setLightOff()
self.glow.setBin('fixed', 120)
self.glow.setColorScaleOff()
self.glow.reparentTo(self)
self.glow.setScale(1.0)
def createTracks(self):
randomness = random.random() / 20
pulseUp = self.glow.scaleInterval(0.029999999999999999 + randomness, 2, startScale = 1.3)
pulseDown = self.glow.scaleInterval(0.029999999999999999 + randomness, 1.3, startScale = 2)
fadeIn = self.glow.colorInterval(0.029999999999999999 + randomness, Vec4(1, 1, 1, 1), startColor = self.glowColor)
fadeOut = self.glow.colorInterval(0.029999999999999999 + randomness, self.glowColor, startColor = Vec4(1, 1, 1, 1))
self.track = Sequence(Wait(self.pulseRate), Parallel(fadeIn, pulseUp), Parallel(fadeOut, pulseDown))
def adjustHeartColor(self, hpPercent):
if hpPercent >= 1.0:
self.glow.find('**/+SwitchNode').node().setVisibleChild(0)
self.pulseRate = 1.0
elif hpPercent < 0.5:
self.glow.find('**/+SwitchNode').node().setVisibleChild(1)
self.pulseRate = 0.5
elif hpPercent < 0.25:
self.glow.find('**/+SwitchNode').node().setVisibleChild(2)
self.pulseRate = 0.10000000000000001
self.stop()
self.createTracks()
self.loop()
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
|
the-stack_106_28326 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.common.system.profiler import ProfilerFactory, GooglePProf
class ProfilerFactoryTest(unittest.TestCase):
def _assert_default_profiler_name(self, os_name, expected_profiler_name):
profiler_name = ProfilerFactory.default_profiler_name(MockPlatformInfo(os_name))
self.assertEqual(profiler_name, expected_profiler_name)
def test_default_profilers(self):
self._assert_default_profiler_name('mac', 'iprofiler')
self._assert_default_profiler_name('linux', 'perf')
self._assert_default_profiler_name('win32', None)
self._assert_default_profiler_name('freebsd', None)
def test_default_profiler_output(self):
host = MockSystemHost()
self.assertFalse(host.filesystem.exists("/tmp/output"))
# Default mocks are Mac, so iprofile should be default.
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertTrue(host.filesystem.exists("/tmp/output"))
self.assertEqual(profiler._output_path, "/tmp/output/test.dtps")
# Linux defaults to perf.
host.platform.os_name = 'linux'
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._output_path, "/tmp/output/test.data")
class GooglePProfTest(unittest.TestCase):
def test_pprof_output_regexp(self):
pprof_output = """
sometimes
there
is
junk before the total line
Total: 3770 samples
76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
25 0.7% 12.3% 27 0.7% WebCore::Private::addChildNodesToDeletionQueue
24 0.6% 12.9% 24 0.6% __memcpy_ssse3_back
23 0.6% 13.6% 23 0.6% intHash (inline)
23 0.6% 14.2% 76 2.0% tcmalloc::FL_Next
23 0.6% 14.8% 95 2.5% tcmalloc::FL_Push
22 0.6% 15.4% 22 0.6% WebCore::MarkupTokenizerBase::InputStreamPreprocessor::peek (inline)
"""
expected_first_ten_lines = """ 76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
"""
host = MockSystemHost()
profiler = GooglePProf(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines)
|
the-stack_106_28328 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import copy
import six
import warnings
import functools
from . import layers
from . import framework
from . import core
from . import name_scope
from .dygraph import base as imperative_base
__all__ = [
'set_gradient_clip', 'ErrorClipByValue', 'GradientClipByValue',
'GradientClipByNorm', 'GradientClipByGlobalNorm'
]
class BaseErrorClipAttr(object):
def __str__(self):
raise NotImplementedError()
def _append_clip_op(self, block, grad_name):
raise NotImplementedError()
class ErrorClipByValue(BaseErrorClipAttr):
"""
Clips tensor values to the range [min, max].
Given a tensor ``t`` (see Examples below), this operation clips its value \
to ``min`` and ``max`` inplace.
- Any values less than min are set to min.
- Any values greater than max are set to max.
Args:
max (float): The maximum value to clip by.
min (float, optional): The minimum value to clip by. if not set by user, \
will be set to ``-max`` by framework.
Examples:
.. code-block:: python
import paddle.fluid as fluid
BATCH_SIZE = 128
CLIP_MAX = 2e-6
CLIP_MIN = -1e-6
prog = fluid.framework.Program()
with fluid.program_guard(main_program=prog):
image = fluid.layers.data(
name='x', shape=[784], dtype='float32')
hidden1 = fluid.layers.fc(input=image, size=128, act='relu')
hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu')
predict = fluid.layers.fc(
input=hidden2, size=10, act='softmax')
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
prog_clip = prog.clone()
prog_clip.block(0).var(hidden1.name)._set_error_clip(
fluid.clip.ErrorClipByValue(
max=CLIP_MAX, min=CLIP_MIN)
"""
def __init__(self, max, min=None):
max = float(max)
if min is None:
min = -max
else:
min = float(min)
self.max = max
self.min = min
def __str__(self):
return "ByValue, min=%f, max=%f" % (self.min, self.max)
def _append_clip_op(self, block, grad_name):
clip_op_desc = block.desc.append_op()
clip_op_desc.set_type("clip")
clip_op_desc.set_input("X", [grad_name])
clip_op_desc.set_output("Out", [grad_name])
clip_op_desc._set_attr("min", self.min)
clip_op_desc._set_attr("max", self.max)
def error_clip_callback(block, context):
# the context is a grad_to_var map
grad_to_var = context
op_desc = block.desc.op(block.desc.op_size() - 1)
for grad_n in [n for n in op_desc.output_arg_names() if n in grad_to_var]:
fwd_var = block._var_recursive(grad_to_var[grad_n])
error_clip = getattr(fwd_var, "error_clip", None)
if not (error_clip is None or isinstance(error_clip,
BaseErrorClipAttr)):
raise TypeError(
"Variable's error_clip should be an instance of BaseErrorClipAttr or None."
)
if error_clip is not None:
error_clip._append_clip_op(block, grad_n)
class GradientClipBase(object):
def __init__(self, need_clip=None):
if need_clip is not None and not callable(need_clip):
raise TypeError(
"The type of need_clip must be funciton, and it can filter out "
"parameter that does't need gradient clip. This function must return "
"True or False, and True means that clipping is required. Please refer to "
"API documention of GradientClipByGlobalNorm / GradientClipByNorm "
"/GradientClipByValue.")
self._need_clip_func = need_clip
def __str__(self):
raise NotImplementedError()
@imperative_base.no_grad
def _dygraph_clip(self, params_grads):
raise NotImplementedError
def _static_clip(self, params_grads):
raise NotImplementedError
def __call__(self, params_grads):
if framework.in_dygraph_mode():
return self._dygraph_clip(params_grads)
else:
for p, g in params_grads:
if getattr(p, 'gradient_clip_attr', None) is not None:
warnings.warn(
"'set_gradient_clip' will be ineffective, because you have "
"set 'grad_clip' in 'optimizer'. So, 'set_gradient_clip' "
"is redundant and you can remove it.")
break
return self._static_clip(params_grads)
def _process_context(self, context, param, grad):
raise NotImplementedError()
def _create_operators(self, param, grad):
raise NotImplementedError()
class GradientClipByValue(GradientClipBase):
"""
:alias_main: paddle.nn.GradientClipByValue
:alias: paddle.nn.GradientClipByValue,paddle.nn.clip.GradientClipByValue
:old_api: paddle.fluid.clip.GradientClipByValue
Limit the value of multi-dimensional Tensor :math:`X` to the range [min, max].
- Any values less than min are set to ``min``.
- Any values greater than max are set to ``max``.
The multi-dimensional Tensor :math:`X` is not passed from this class, but the gradients of all parameters in ``Program`` . If ``need_clip``
is not None, then only part of gradients can be selected for gradient clipping.
Gradient clip will takes effect after being set in ``optimizer`` , see the document ``optimizer``
(for example: :ref:`api_fluid_optimizer_SGDOptimizer`).
Args:
max (float): The maximum value to clip by.
min (float, optional): The minimum value to clip by. if not set by user, it will be set to ``-max``
automatically. In this case, ``max`` must be greater than 0.
need_clip (function, optional): Type: function. This function accepts a ``Parameter`` and returns ``bool``
(True: the gradient of this ``Parameter`` need to be clipped, False: not need). Default: None,
and gradients of all parameters in the network will be clipped.
Examples:
.. code-block:: python
# use for Static mode
import paddle
import paddle.fluid as fluid
import numpy as np
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(
main_program=main_prog, startup_program=startup_prog):
image = fluid.data(
name='x', shape=[-1, 2], dtype='float32')
predict = fluid.layers.fc(input=image, size=3, act='relu') # Trainable parameters: fc_0.w.0, fc_0.b.0
loss = fluid.layers.mean(predict)
# Clip all parameters in network:
clip = fluid.clip.GradientClipByValue(min=-1, max=1)
# Clip a part of parameters in network: (e.g. fc_0.w_0)
# pass a function(fileter_func) to need_clip, and fileter_func receive a Parameter, and return bool
# def fileter_func(Parameter):
# # It can be easily filtered by Parameter.name (name can be set in fluid.ParamAttr, and the default name is fc_0.w_0, fc_0.b_0)
# return Parameter.name=="fc_0.w_0"
# clip = fluid.clip.GradientClipByValue(min=-1, max=1, need_clip=fileter_func)
sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip)
sgd_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
x = np.random.uniform(-100, 100, (10, 2)).astype('float32')
exe.run(startup_prog)
out = exe.run(main_prog, feed={'x': x}, fetch_list=loss)
# use for Dygraph mode
import paddle
import paddle.fluid as fluid
with fluid.dygraph.guard():
linear = fluid.dygraph.Linear(10, 10) # Trainable parameters:: linear_0.w.0, linear_0.b.0
inputs = fluid.layers.uniform_random([32, 10]).astype('float32')
out = linear(fluid.dygraph.to_variable(inputs))
loss = fluid.layers.reduce_mean(out)
loss.backward()
# Clip all parameters in network:
clip = fluid.clip.GradientClipByValue(min=-1, max=1)
# Clip a part of parameters in network: (e.g. linear_0.w_0)
# pass a function(fileter_func) to need_clip, and fileter_func receive a ParamBase, and return bool
# def fileter_func(ParamBase):
# # It can be easily filtered by ParamBase.name(name can be set in fluid.ParamAttr, and the default name is linear_0.w_0, linear_0.b_0)
# return ParamBase.name == "linear_0.w_0"
# # Note: linear.weight and linear.bias can return the weight and bias of dygraph.Linear, respectively, and can be used to filter
# return ParamBase.name == linear.weight.name
# clip = fluid.clip.GradientClipByValue(min=-1, max=1, need_clip=fileter_func)
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip)
sgd_optimizer.minimize(loss)
"""
def __init__(self, max, min=None, need_clip=None):
super(GradientClipByValue, self).__init__(need_clip)
if min is None:
assert (max > 0.0)
min = -max
self.max = float(max)
self.min = float(min)
def __str__(self):
return "Gradient Clip By Value, min = %f, max=%f" % (self.min, self.max)
@imperative_base.no_grad
def _dygraph_clip(self, params_grads):
params_and_grads = []
for p, g in params_grads:
if g is None:
continue
if self._need_clip_func is not None and not self._need_clip_func(p):
params_and_grads.append((p, g))
continue
new_grad = layers.clip(x=g, min=self.min, max=self.max)
params_and_grads.append((p, new_grad))
return params_and_grads
def _static_clip(self, params_grads):
params_and_grads = []
param_new_grad_name_dict = dict()
with framework.name_scope('gradient_clip'):
for p, g in params_grads:
if g is None:
continue
if self._need_clip_func is not None and not self._need_clip_func(
p):
params_and_grads.append((p, g))
continue
with p.block.program._optimized_guard([p, g]):
new_grad = layers.clip(x=g, min=self.min, max=self.max)
params_and_grads.append((p, new_grad))
param_new_grad_name_dict[p.name] = new_grad.name
_correct_clip_op_role_var(params_and_grads, param_new_grad_name_dict)
return params_and_grads
def _process_context(self, context, param, grad):
pass
def _create_operators(self, param, grad):
new_grad = layers.clip(x=grad, min=self.min, max=self.max)
return param, new_grad
class GradientClipByNorm(GradientClipBase):
"""
:alias_main: paddle.nn.GradientClipByNorm
:alias: paddle.nn.GradientClipByNorm,paddle.nn.clip.GradientClipByNorm
:old_api: paddle.fluid.clip.GradientClipByNorm
Limit the l2 norm of multi-dimensional Tensor :math:`X` to ``clip_norm`` .
- If the l2 norm of :math:`X` is greater than ``clip_norm`` , :math:`X` will be compressed by a ratio.
- If the l2 norm of :math:`X` is less than or equal to ``clip_norm`` , nothing will be done.
The multidimensional Tensor :math:`X` is not passed from this class, but the gradients of all parameters in ``Program`` . If ``need_clip``
is not None, then only part of gradients can be selected for gradient clipping.
Gradient clip will takes effect after being set in ``optimizer`` , see the document ``optimizer``
(for example: :ref:`api_fluid_optimizer_SGDOptimizer`).
The clipping formula is:
.. math::
Out =
\\left \{
\\begin{aligned}
& X & & if (norm(X) \\leq clip\_norm) \\\\
& \\frac{clip\_norm*X}{norm(X)} & & if (norm(X) > clip\_norm) \\\\
\\end{aligned}
\\right.
where :math:`norm(X)` represents the L2 norm of :math:`X`.
.. math::
norm(X) = ( \\sum_{i=1}^{n}|x\_i|^2)^{ \\frac{1}{2}}
Args:
clip_norm(float): The maximum norm value.
need_clip (function, optional): Type: function. This function accepts a ``Parameter`` and returns ``bool``
(True: the gradient of this ``Parameter`` need to be clipped, False: not need). Default: None,
and gradients of all parameters in the network will be clipped.
Examples:
.. code-block:: python
# use for Static mode
import paddle
import paddle.fluid as fluid
import numpy as np
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(
main_program=main_prog, startup_program=startup_prog):
image = fluid.data(
name='x', shape=[-1, 2], dtype='float32')
predict = fluid.layers.fc(input=image, size=3, act='relu') # Trainable parameters: fc_0.w.0, fc_0.b.0
loss = fluid.layers.mean(predict)
# Clip all parameters in network:
clip = fluid.clip.GradientClipByNorm(clip_norm=1.0)
# Clip a part of parameters in network: (e.g. linear_0.w_0)
# pass a function(fileter_func) to need_clip, and fileter_func receive a Parameter, and return bool
# def fileter_func(Parameter):
# # It can be easily filtered by Parameter.name (name can be set in fluid.ParamAttr, and the default name is fc_0.w_0, fc_0.b_0)
# return Parameter.name=="fc_0.w_0"
# clip = fluid.clip.GradientClipByNorm(clip_norm=1.0, need_clip=fileter_func)
sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip)
sgd_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
x = np.random.uniform(-100, 100, (10, 2)).astype('float32')
exe.run(startup_prog)
out = exe.run(main_prog, feed={'x': x}, fetch_list=loss)
# use for Dygraph mode
import paddle
import paddle.fluid as fluid
with fluid.dygraph.guard():
linear = fluid.dygraph.Linear(10, 10) # Trainable: linear_0.w.0, linear_0.b.0
inputs = fluid.layers.uniform_random([32, 10]).astype('float32')
out = linear(fluid.dygraph.to_variable(inputs))
loss = fluid.layers.reduce_mean(out)
loss.backward()
# Clip all parameters in network:
clip = fluid.clip.GradientClipByNorm(clip_norm=1.0)
# Clip a part of parameters in network: (e.g. linear_0.w_0)
# pass a function(fileter_func) to need_clip, and fileter_func receive a ParamBase, and return bool
# def fileter_func(ParamBase):
# # It can be easily filtered by ParamBase.name(name can be set in fluid.ParamAttr, and the default name is linear_0.w_0, linear_0.b_0)
# return ParamBase.name == "linear_0.w_0"
# # Note: linear.weight and linear.bias can return the weight and bias of dygraph.Linear, respectively, and can be used to filter
# return ParamBase.name == linear.weight.name
# clip = fluid.clip.GradientClipByNorm(clip_norm=1.0, need_clip=fileter_func)
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip)
sgd_optimizer.minimize(loss)
"""
def __init__(self, clip_norm, need_clip=None):
super(GradientClipByNorm, self).__init__(need_clip)
self.clip_norm = float(clip_norm)
def __str__(self):
return "Gradient Clip By Norm, clip_norm=%f" % self.clip_norm
@imperative_base.no_grad
def _dygraph_clip(self, params_grads):
params_and_grads = []
for p, g in params_grads:
if g is None:
continue
if self._need_clip_func is not None and not self._need_clip_func(p):
params_and_grads.append((p, g))
continue
new_grad = layers.clip_by_norm(x=g, max_norm=self.clip_norm)
params_and_grads.append((p, new_grad))
return params_and_grads
def _static_clip(self, params_grads):
params_and_grads = []
with framework.name_scope('gradient_clip'):
param_new_grad_name_dict = dict()
for p, g in params_grads:
if g is None:
continue
if self._need_clip_func is not None and not self._need_clip_func(
p):
params_and_grads.append((p, g))
continue
with p.block.program._optimized_guard([p, g]):
new_grad = layers.clip_by_norm(x=g, max_norm=self.clip_norm)
param_new_grad_name_dict[p.name] = new_grad.name
params_and_grads.append((p, new_grad))
_correct_clip_op_role_var(params_and_grads, param_new_grad_name_dict)
return params_and_grads
def _process_context(self, context, param, grad):
pass
def _create_operators(self, param, grad):
new_grad = layers.clip_by_norm(x=grad, max_norm=self.clip_norm)
return param, new_grad
class GradientClipByGlobalNorm(GradientClipBase):
"""
:alias_main: paddle.nn.GradientClipByGlobalNorm
:alias: paddle.nn.GradientClipByGlobalNorm,paddle.nn.clip.GradientClipByGlobalNorm
:old_api: paddle.fluid.clip.GradientClipByGlobalNorm
Given a list of Tensor :math:`t\_list` , calculate the global norm for the elements of all tensors in
:math:`t\_list` , and limit it to ``clip_norm`` .
- If the global norm is greater than ``clip_norm`` , all elements of :math:`t\_list` will be compressed by a ratio.
- If the global norm is less than or equal to ``clip_norm`` , nothing will be done.
The list of Tensor :math:`t\_list` is not passed from this class, but the gradients of all parameters in ``Program`` . If ``need_clip``
is not None, then only part of gradients can be selected for gradient clipping.
Gradient clip will takes effect after being set in ``optimizer`` , see the document ``optimizer``
(for example: :ref:`api_fluid_optimizer_SGDOptimizer`).
The clipping formula is:
.. math::
t\_list[i] = t\_list[i] * \\frac{clip\_norm}{\max(global\_norm, clip\_norm)}
where:
.. math::
global\_norm = \sqrt{\sum_{i=0}^{N-1}(l2norm(t\_list[i]))^2}
Args:
clip_norm (float): The maximum norm value.
group_name (str, optional): The group name for this clip. Default value is ``default_group``
need_clip (function, optional): Type: function. This function accepts a ``Parameter`` and returns ``bool``
(True: the gradient of this ``Parameter`` need to be clipped, False: not need). Default: None,
and gradients of all parameters in the network will be clipped.
Examples:
.. code-block:: python
# use for Static mode
import paddle
import paddle.fluid as fluid
import numpy as np
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(
main_program=main_prog, startup_program=startup_prog):
image = fluid.data(
name='x', shape=[-1, 2], dtype='float32')
predict = fluid.layers.fc(input=image, size=3, act='relu') # Trainable parameters: fc_0.w.0, fc_0.b.0
loss = fluid.layers.mean(predict)
# Clip all parameters in network:
clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)
# Clip a part of parameters in network: (e.g. fc_0.w_0)
# pass a function(fileter_func) to need_clip, and fileter_func receive a ParamBase, and return bool
# def fileter_func(Parameter):
# # It can be easily filtered by Parameter.name (name can be set in fluid.ParamAttr, and the default name is fc_0.w_0, fc_0.b_0)
# return Parameter.name=="fc_0.w_0"
# clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func)
sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip)
sgd_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
x = np.random.uniform(-100, 100, (10, 2)).astype('float32')
exe.run(startup_prog)
out = exe.run(main_prog, feed={'x': x}, fetch_list=loss)
# use for Dygraph mode
import paddle
import paddle.fluid as fluid
with fluid.dygraph.guard():
linear = fluid.dygraph.Linear(10, 10) # Trainable: linear_0.w.0, linear_0.b.0
inputs = fluid.layers.uniform_random([32, 10]).astype('float32')
out = linear(fluid.dygraph.to_variable(inputs))
loss = fluid.layers.reduce_mean(out)
loss.backward()
# Clip all parameters in network:
clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)
# Clip a part of parameters in network: (e.g. linear_0.w_0)
# pass a function(fileter_func) to need_clip, and fileter_func receive a ParamBase, and return bool
# def fileter_func(ParamBase):
# # It can be easily filtered by ParamBase.name(name can be set in fluid.ParamAttr, and the default name is linear_0.w_0, linear_0.b_0)
# return ParamBase.name == "linear_0.w_0"
# # Note: linear.weight and linear.bias can return the weight and bias of dygraph.Linear, respectively, and can be used to filter
# return ParamBase.name == linear.weight.name
# clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func)
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip)
sgd_optimizer.minimize(loss)
"""
def __init__(self, clip_norm, group_name="default_group", need_clip=None):
super(GradientClipByGlobalNorm, self).__init__(need_clip)
self.clip_norm = float(clip_norm)
self.group_name = group_name
def __str__(self):
return "Gradient Clip By GlobalNorm, global_norm=%f" % (self.clip_norm)
@imperative_base.no_grad
def _dygraph_clip(self, params_grads):
params_and_grads = []
sum_square_list = []
for p, g in params_grads:
if g is None:
continue
if self._need_clip_func is not None and not self._need_clip_func(p):
continue
merge_grad = g
if g.type == core.VarDesc.VarType.SELECTED_ROWS:
merge_grad = layers.merge_selected_rows(g)
merge_grad = layers.get_tensor_from_selected_rows(merge_grad)
square = layers.square(merge_grad)
sum_square = layers.reduce_sum(square)
sum_square_list.append(sum_square)
# all parameters have been filterd out
if len(sum_square_list) == 0:
return params_grads
global_norm_var = layers.concat(sum_square_list)
global_norm_var = layers.reduce_sum(global_norm_var)
global_norm_var = layers.sqrt(global_norm_var)
max_global_norm = layers.fill_constant(
shape=[1], dtype=global_norm_var.dtype, value=self.clip_norm)
clip_var = layers.elementwise_div(
x=max_global_norm,
y=layers.elementwise_max(
x=global_norm_var, y=max_global_norm))
for p, g in params_grads:
if g is None:
continue
if self._need_clip_func is not None and not self._need_clip_func(p):
params_and_grads.append((p, g))
continue
new_grad = layers.elementwise_mul(x=g, y=clip_var)
params_and_grads.append((p, new_grad))
return params_and_grads
def _static_clip(self, params_grads):
params_and_grads = []
sum_square_list = []
with framework.name_scope('gradient_clip'):
for p, g in params_grads:
if g is None:
continue
if self._need_clip_func is not None and not self._need_clip_func(
p):
continue
merge_grad = g
with p.block.program._optimized_guard([p, g]):
if g.type == core.VarDesc.VarType.SELECTED_ROWS:
merge_grad = layers.merge_selected_rows(g)
merge_grad = layers.get_tensor_from_selected_rows(
merge_grad)
square = layers.square(merge_grad)
sum_square = layers.reduce_sum(input=square)
sum_square_list.append(sum_square)
# all parameters have been filterd out
if len(sum_square_list) == 0:
return params_grads
with p.block.program._optimized_guard([p, g]):
global_norm_var = layers.sums(sum_square_list)
global_norm_var = layers.sqrt(x=global_norm_var)
max_global_norm = layers.fill_constant(
shape=[1],
dtype=global_norm_var.dtype,
value=self.clip_norm)
scale_var = layers.elementwise_div(
x=max_global_norm,
y=layers.elementwise_max(
x=max_global_norm, y=global_norm_var))
param_new_grad_name_dict = dict()
for p, g in params_grads:
if g is None:
continue
if self._need_clip_func is not None and not self._need_clip_func(
p):
params_and_grads.append((p, g))
continue
with p.block.program._optimized_guard([p, g]):
new_grad = layers.elementwise_mul(x=g, y=scale_var)
param_new_grad_name_dict[p.name] = new_grad.name
params_and_grads.append((p, new_grad))
_correct_clip_op_role_var(params_and_grads, param_new_grad_name_dict)
return params_and_grads
def _process_context(self, context, param, grad):
if self.group_name not in context:
context[self.group_name] = []
context[self.group_name + "_clip_value"] = self.clip_norm
context[self.group_name + "_clip"] = layers.fill_constant(
shape=[1], dtype=grad.dtype, value=self.clip_norm)
else:
if not self.clip_norm == context[self.group_name + "_clip_value"]:
raise ValueError(
"All parameters' 'clip_norm' of a same group should be the same"
)
merge_grad = grad
if grad.type == core.VarDesc.VarType.SELECTED_ROWS:
merge_grad = layers.merge_selected_rows(grad)
merge_grad = layers.get_tensor_from_selected_rows(merge_grad)
square = layers.square(merge_grad)
local_norm_var = layers.reduce_sum(input=square)
context[self.group_name].append(local_norm_var)
self.context = context
def _create_operators(self, param, grad):
group_scale_name = self.group_name + "_scale"
if group_scale_name not in self.context:
group_norm_var = layers.sums(input=self.context[self.group_name])
group_norm_var = layers.sqrt(x=group_norm_var)
clip_var = self.context[self.group_name + "_clip"]
group_scale_var = layers.elementwise_div(
x=clip_var,
y=layers.elementwise_max(
x=clip_var, y=group_norm_var))
assert group_scale_var.shape == (1, )
self.context[group_scale_name] = group_scale_var
new_grad = layers.elementwise_mul(
x=grad, y=self.context[group_scale_name])
return param, new_grad
@framework.dygraph_not_support
def set_gradient_clip(clip, param_list=None, program=None):
"""
:api_attr: Static Graph
Warning:
This API must be used after building network, and before ``minimize`` ,
and it may be removed in future releases, so it is not recommended.
It is recommended to set ``grad_clip`` when initializing the ``optimizer`` ,
this is a better method to clip gradient. There are three clipping strategies:
:ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
:ref:`api_fluid_clip_GradientClipByValue` .
To specify parameters that require gradient clip.
Args:
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
some derived class of ``GradientClipBase`` . There are three cliping strategies
( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
:ref:`api_fluid_clip_GradientClipByValue` ). Default value: None, and there is no
gradient clipping.
param_list (list(Variable), optional): Parameters that require gradient clip.
It can be a list of parameter or a list of parameter's name.
Default None, meaning that all parameters in the program will be included.
program (Program, optional): The program where parameters are located.
Default None, meaning that using :ref:`api_fluid_default_main_program` .
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
def network():
image = fluid.data(name='image', shape=[
None, 28], dtype='float32')
param_attr1 = fluid.ParamAttr("fc1_param")
fc1 = fluid.layers.fc(image, size=10, param_attr=param_attr1)
param_attr2 = fluid.ParamAttr("fc2_param")
fc2 = fluid.layers.fc(fc1, size=10, param_attr=param_attr2)
loss = fluid.layers.reduce_mean(fc2)
return loss
# network 1: clip all parameter gradient
with fluid.program_guard(fluid.Program(), fluid.Program()):
loss = network()
fluid.clip.set_gradient_clip(
fluid.clip.GradientClipByGlobalNorm(clip_norm=2.0))
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
# network 2: clip parameter gradient by name
with fluid.program_guard(fluid.Program(), fluid.Program()):
loss = network()
fluid.clip.set_gradient_clip(
fluid.clip.GradientClipByValue(min=-1.0, max=1.0),
param_list=["fc1_param", "fc2_param"])
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
# network 3: clip parameter gradient by value
with fluid.program_guard(fluid.Program(), fluid.Program()):
loss = network()
param_var1 = fluid.default_main_program().global_block().var("fc1_param")
param_var2 = fluid.default_main_program().global_block().var("fc2_param")
fluid.clip.set_gradient_clip(
fluid.clip.GradientClipByValue(min=-1.0, max=1.0),
param_list=[param_var1, param_var2])
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
# network 4: use 'set_gradient_clip' and 'optimize(grad_clip=clip)' together
with fluid.program_guard(fluid.Program(), fluid.Program()):
loss = network()
clip1 = fluid.clip.GradientClipByValue(min=-1.0, max=1.0)
clip2 = fluid.clip.GradientClipByNorm(clip_norm=1.0)
# Set the gradient clipping strategy: clip1
fluid.clip.set_gradient_clip(clip1)
# Set the gradient clipping strategy: clip2
sgd = fluid.optimizer.SGD(learning_rate=1e-3, grad_clip=clip2)
sgd.minimize(loss)
# 'set_gradient_clip' will not take effect when setting has a conflict,
# and the gradient clipping strategy will be 'clip2'
"""
warnings.warn("Caution! 'set_gradient_clip' is not recommended "
"and may be deprecated in future! "
"We recommend a new strategy: set 'grad_clip' "
"when initializing the 'optimizer'. "
"This method can reduce the mistakes, please "
"refer to documention of 'optimizer'.")
if not isinstance(clip, GradientClipBase):
raise TypeError(
"'clip' should be an instance of GradientClipBase's derived class")
if program is None:
program = framework.default_main_program()
for op in program.block(0).ops:
if 'op_namescope' in op.all_attrs() and "optimizer" in op.attr(
"op_namescope"):
warnings.warn(
"'minimize' has been invoked before, this will make 'set_gradient_clip' "
"be ineffective! Please invoke 'set_gradient_clip' before 'minimize'."
)
break
if param_list is None:
param_list = program.block(0).all_parameters()
if all(isinstance(elem, six.string_types) for elem in param_list):
param_list = [program.block(0).var(elem) for elem in param_list]
if not all(isinstance(elem, framework.Parameter) for elem in param_list):
raise TypeError(
"'param_list' should be a list of Parameter or basestring(parameter's name)."
)
for param in param_list:
param.gradient_clip_attr = copy.deepcopy(clip)
def append_gradient_clip_ops(param_grads):
context = dict()
for p, g in param_grads:
if g is None:
continue
with p.block.program._optimized_guard(
[p, g]), framework.name_scope('gradient_clip_@CLIP'):
clip_attr = getattr(p, 'gradient_clip_attr', None)
if clip_attr is None:
return param_grads
if not isinstance(clip_attr, GradientClipBase):
raise TypeError(
"clip attribute should be an instance of GradientClipBase")
clip_attr._process_context(context=context, param=p, grad=g)
res = []
param_new_grad_name_dict = dict()
for p, g in param_grads:
if g is None:
continue
with p.block.program._optimized_guard(
[p, g]), framework.name_scope('graident_clip_@CLIP'):
param, new_grad = clip_attr._create_operators(param=p, grad=g)
param_new_grad_name_dict[param.name] = new_grad.name
res.append([param, new_grad])
_correct_clip_op_role_var(res, param_new_grad_name_dict)
return res
# change wrong mapping relation between param & grad in clip op
# Note: This function is sensitive to the time cost of the network with gradient clipping
# and should not be changed easily. If you must change, please test the time cost.
def _correct_clip_op_role_var(params_grads, param_new_grad_name_dict):
block_id_list = []
if len(param_new_grad_name_dict) == 0:
return
for param, grad in params_grads:
if grad is None:
continue
block_id = param.block.idx
if block_id in block_id_list:
continue
block_id_list.append(block_id)
for op in param.block.program.global_block().ops:
if 'op_namescope' in op.all_attrs() and "gradient_clip" in op.attr(
"op_namescope") and op.attr('op_role_var'):
param_name = op.attr('op_role_var')[0]
if param_name in param_new_grad_name_dict:
correct_p_g = [
param_name, param_new_grad_name_dict[param_name]
]
op._set_attr('op_role_var', correct_p_g)
ClipByValue = GradientClipByValue
ClipByNorm = GradientClipByNorm
ClipByGlobalNorm = GradientClipByGlobalNorm
|
the-stack_106_28329 | #########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
import pytest
from integration_tests import AgentTestCase
from integration_tests.tests.utils import get_resource as resource
from integration_tests.tests.utils import wait_for_blueprint_upload
pytestmark = pytest.mark.group_dsl
class NamespacedScriptsTest(AgentTestCase):
def test_success_deploy_namespaced_blueprint_with_scripts(self):
basic_blueprint_path =\
resource('dsl/agent_tests/blueprint_with_scripts.yaml')
blueprint_id = 'imported_scripts'
self.client.blueprints.upload(basic_blueprint_path,
entity_id=blueprint_id)
wait_for_blueprint_upload(blueprint_id, self.client)
deployment_id = 'd{0}'.format(uuid.uuid4())
dsl_path = resource(
'dsl/agent_tests/blueprints/'
'blueprint_with_namespaced_blueprint_import.yaml')
_, execution_id = self.deploy_application(dsl_path,
deployment_id=deployment_id)
events = self.client.events.list(execution_id=execution_id,
sort='timestamp')
script_success_msg = "Task succeeded 'script_runner.tasks.run'"
script_success_events = [event['message'] for event in events
if script_success_msg == event['message']]
self.assertEqual(len(script_success_events), 1)
agent_success_msg = 'Agent created'
agent_success_events = [event['message'] for event in events
if agent_success_msg == event['message']]
self.assertEqual(len(agent_success_events), 1)
def test_success_deploy_namespaced_blueprint_with_local_scripts(self):
deployment_id = 'dep'
dsl_path = resource(
'dsl/agent_tests/'
'blueprint_with_namespaced_local_blueprint_import.yaml')
_, execution_id = self.deploy_application(dsl_path,
deployment_id=deployment_id)
events = self.client.events.list(execution_id=execution_id,
sort='timestamp')
script_success_msg = "Task succeeded 'script_runner.tasks.run'"
script_success_events = [event['message'] for event in events
if script_success_msg == event['message']]
self.assertEqual(len(script_success_events), 1)
agent_success_msg = 'Agent created'
agent_success_events = [event['message'] for event in events
if agent_success_msg == event['message']]
self.assertEqual(len(agent_success_events), 1)
|
the-stack_106_28330 | from six import iteritems, callable
from voluptuous import Schema, ALLOW_EXTRA
from typedtuple import TypedTupleType
from hieratic import Resource
class ItemResource(Resource):
def __init__(self, parent, name, engine_name, item_engine):
Resource.__init__(self, parent, name)
self.__engine_name = engine_name
self.__engine = item_engine
self.__data = None
self.__is_deleted = False
@property
def engine_name(self):
return self.__engine_name
@property
def engine(self):
return self.__engine
@classmethod
def set_data_class(cls, data_class):
if not issubclass(data_class, TypedTupleType):
raise ValueError('data class must be a type of typedtuple.TypedTupleType.')
cls.__data_class = data_class
@classmethod
def get_data_class(cls):
return cls.__data_class
@staticmethod
def data_class(data_class):
def f(clazz):
clazz.set_data_class(data_class)
return clazz
return f
@classmethod
def get_persistence_converters(cls):
try:
cls.__persistence_converters
except AttributeError:
cls.__persistence_converters = {}
return cls.__persistence_converters
@classmethod
def register_persistence_converter(cls, engine_name, converter):
if not isinstance(converter, Schema) and not callable(converter):
converter = Schema(converter, extra=ALLOW_EXTRA)
cls.get_persistence_converters()[engine_name] = converter
@classmethod
def get_persistence_converter(cls, engine_name):
return cls.get_persistence_converters().get(engine_name)
@staticmethod
def persistence_converter(converters):
def f(clazz):
for k, v in iteritems(converters):
clazz.register_persistence_converter(k, v)
return clazz
return f
@staticmethod
def define(data_class, child_definitions=None, converters=None):
child_definitions = child_definitions or {}
converters = converters or {}
def f(clazz):
return ItemResource.persistence_converter(converters)(
Resource.children(child_definitions)(
ItemResource.data_class(data_class)(clazz)
)
)
return f
@property
def data(self):
if self.__data is None:
return self.get_data()
else:
return self.__data
def get_data(self):
self.__data = None if self.__is_deleted else self.get_data_class()(**self.engine.get_dict())
return self.__data
def update(self, patch=True, context=None, **kwargs):
primary_index = self.parent.get_index()
found_index_key = None
if primary_index.first_desc[0] in kwargs:
found_index_key = primary_index.first_desc[0]
elif primary_index.second_desc is not None and primary_index.second_desc[0] in kwargs:
found_index_key = primary_index.second_desc[0]
if found_index_key is not None:
raise ValueError('index attribute "{}" cannot be updated.'.format(found_index_key))
updates = kwargs
persistence_converter = self.get_persistence_converter(self.engine_name)
if persistence_converter is not None:
updates = persistence_converter(updates)
self.engine.update(primary_index, patch, context, updates)
self.get_data()
def delete(self, context=None):
self.engine.delete(self.parent.get_index(), context)
del self.__parent__[self.__name__]
self.__is_deleted = True
self.get_data()
|
the-stack_106_28331 | #!/usr/bin/env python3
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import re
import sys
import nbformat
import pytest
from nbconvert.preprocessors import ExecutePreprocessor
# See https://www.blog.pythonlibrary.org/2018/10/16/testing-jupyter-notebooks/
EXAMPLES = os.path.join(os.path.dirname(os.path.dirname(__file__)), "docs", "examples")
SKIP = ("11-icoads.ipynb",)
MARS = (
"04-source-mars.ipynb",
"04-source-mars.ipynb",
"08-mars-odb.ipynb",
"11-icoads.ipynb",
)
CDS = (
"03-source-cds.ipynb",
"06-era5-temperature.ipynb",
"05-high-lows.ipynb",
"14-gruan.ipynb",
"11-hurricane-database.ipynb",
)
TENSORFLOW = ("05-high-lows.ipynb",)
def notebooks_list():
notebooks = []
for path in os.listdir(EXAMPLES):
if re.match(r"^\d\d-.*\.ipynb$", path):
if "Copy" not in path:
notebooks.append(path)
return sorted(notebooks)
@pytest.mark.skipif(
int(os.environ.get("CLIMETLAB_SKIP_NOTEBOOKS_TESTS", 0)),
reason="CLIMETLAB_SKIP_NOTEBOOKS_TESTS not zero",
)
@pytest.mark.skipif(
sys.platform == "win32", reason="Cannot execute notebookds on Windows"
)
@pytest.mark.parametrize("path", notebooks_list())
def test_notebook(path):
if path in SKIP:
pytest.skip("Notebook marked as 'skip'")
if path in MARS:
if not os.path.exists(os.path.expanduser("~/.ecmwfapirc")):
pytest.skip("No ~/.ecmwfapirc")
if path in CDS:
if not os.path.exists(os.path.expanduser("~/.cdsapirc")):
pytest.skip("No ~/.cdsapirc")
if path in TENSORFLOW:
if sys.version_info >= (3, 9):
pytest.skip("Tensorflow not yet ready on 3.9")
with open(os.path.join(EXAMPLES, path)) as f:
nb = nbformat.read(f, as_version=4)
proc = ExecutePreprocessor(timeout=60 * 60, kernel_name="python3")
proc.preprocess(nb, {"metadata": {"path": EXAMPLES}})
|
the-stack_106_28333 | from __future__ import division
from itertools import product
from collections import namedtuple
import numpy as np
from pgmpy.factors.base import BaseFactor
from pgmpy.extern import tabulate
from pgmpy.extern import six
from pgmpy.extern.six.moves import map, range, reduce, zip
from pgmpy.utils import StateNameInit, StateNameDecorator
State = namedtuple('State', ['var', 'state'])
class DiscreteFactor(BaseFactor):
"""
Base class for DiscreteFactor.
Public Methods
--------------
assignment(index)
get_cardinality(variable)
marginalize([variable_list])
normalize()
product(*DiscreteFactor)
reduce([variable_values_list])
"""
@StateNameInit()
def __init__(self, variables, cardinality, values):
"""
Initialize a factor class.
Defined above, we have the following mapping from variable
assignments to the index of the row vector in the value field:
+-----+-----+-----+-------------------+
| x1 | x2 | x3 | phi(x1, x2, x3)|
+-----+-----+-----+-------------------+
| x1_0| x2_0| x3_0| phi.value(0) |
+-----+-----+-----+-------------------+
| x1_0| x2_0| x3_1| phi.value(1) |
+-----+-----+-----+-------------------+
| x1_0| x2_1| x3_0| phi.value(2) |
+-----+-----+-----+-------------------+
| x1_0| x2_1| x3_1| phi.value(3) |
+-----+-----+-----+-------------------+
| x1_1| x2_0| x3_0| phi.value(4) |
+-----+-----+-----+-------------------+
| x1_1| x2_0| x3_1| phi.value(5) |
+-----+-----+-----+-------------------+
| x1_1| x2_1| x3_0| phi.value(6) |
+-----+-----+-----+-------------------+
| x1_1| x2_1| x3_1| phi.value(7) |
+-----+-----+-----+-------------------+
Parameters
----------
variables: list, array-like
List of variables in the scope of the factor.
cardinality: list, array_like
List of cardinalities of each variable. `cardinality` array must have a value
corresponding to each variable in `variables`.
values: list, array_like
List of values of factor.
A DiscreteFactor's values are stored in a row vector in the value
using an ordering such that the left-most variables as defined in
`variables` cycle through their values the fastest.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
>>> phi
<DiscreteFactor representing phi(x1:2, x2:2, x3:2) at 0x7f8188fcaa90>
>>> print(phi)
+------+------+------+-----------------+
| x1 | x2 | x3 | phi(x1,x2,x3) |
|------+------+------+-----------------|
| x1_0 | x2_0 | x3_0 | 1.0000 |
| x1_0 | x2_0 | x3_1 | 1.0000 |
| x1_0 | x2_1 | x3_0 | 1.0000 |
| x1_0 | x2_1 | x3_1 | 1.0000 |
| x1_1 | x2_0 | x3_0 | 1.0000 |
| x1_1 | x2_0 | x3_1 | 1.0000 |
| x1_1 | x2_1 | x3_0 | 1.0000 |
| x1_1 | x2_1 | x3_1 | 1.0000 |
+------+------+------+-----------------+
"""
if isinstance(variables, six.string_types):
raise TypeError("Variables: Expected type list or array like, got string")
values = np.array(values, dtype=float)
if len(cardinality) != len(variables):
raise ValueError("Number of elements in cardinality must be equal to number of variables")
if values.size != np.product(cardinality):
raise ValueError("Values array must be of size: {size}".format(
size=np.product(cardinality)))
if len(set(variables)) != len(variables):
raise ValueError("Variable names cannot be same")
self.variables = list(variables)
self.cardinality = np.array(cardinality, dtype=int)
self.values = values.reshape(self.cardinality)
def scope(self):
"""
Returns the scope of the factor.
Returns
-------
list: List of variable names in the scope of the factor.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12))
>>> phi.scope()
['x1', 'x2', 'x3']
"""
return self.variables
def get_cardinality(self, variables):
"""
Returns cardinality of a given variable
Parameters
----------
variables: list, array-like
A list of variable names.
Returns
-------
dict: Dictionary of the form {variable: variable_cardinality}
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi.get_cardinality(['x1'])
{'x1': 2}
>>> phi.get_cardinality(['x1', 'x2'])
{'x1': 2, 'x2': 3}
"""
if isinstance(variables, six.string_types):
raise TypeError("variables: Expected type list or array-like, got type str")
if not all([var in self.variables for var in variables]):
raise ValueError("Variable not in scope")
return {var: self.cardinality[self.variables.index(var)] for var in variables}
@StateNameDecorator(argument=None, return_val=True)
def assignment(self, index):
"""
Returns a list of assignments for the corresponding index.
Parameters
----------
index: list, array-like
List of indices whose assignment is to be computed
Returns
-------
list: Returns a list of full assignments of all the variables of the factor.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['diff', 'intel'], [2, 2], np.ones(4))
>>> phi.assignment([1, 2])
[[('diff', 0), ('intel', 1)], [('diff', 1), ('intel', 0)]]
"""
index = np.array(index)
max_possible_index = np.prod(self.cardinality) - 1
if not all(i <= max_possible_index for i in index):
raise IndexError("Index greater than max possible index")
assignments = np.zeros((len(index), len(self.scope())), dtype=np.int)
rev_card = self.cardinality[::-1]
for i, card in enumerate(rev_card):
assignments[:, i] = index % card
index = index // card
assignments = assignments[:, ::-1]
return [[(key, val) for key, val in zip(self.variables, values)] for values in assignments]
def identity_factor(self):
"""
Returns the identity factor.
Def: The identity factor of a factor has the same scope and cardinality as the original factor,
but the values for all the assignments is 1. When the identity factor is multiplied with
the factor it returns the factor itself.
Returns
-------
DiscreteFactor: The identity factor.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi_identity = phi.identity_factor()
>>> phi_identity.variables
['x1', 'x2', 'x3']
>>> phi_identity.values
array([[[ 1., 1.],
[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.],
[ 1., 1.]]])
"""
return DiscreteFactor(self.variables, self.cardinality, np.ones(self.values.size))
def marginalize(self, variables, inplace=True):
"""
Modifies the factor with marginalized values.
Parameters
----------
variables: list, array-like
List of variables over which to marginalize.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi.marginalize(['x1', 'x3'])
>>> phi.values
array([ 14., 22., 30.])
>>> phi.variables
['x2']
"""
if isinstance(variables, six.string_types):
raise TypeError("variables: Expected type list or array-like, got type str")
phi = self if inplace else self.copy()
for var in variables:
if var not in phi.variables:
raise ValueError("{var} not in scope.".format(var=var))
var_indexes = [phi.variables.index(var) for var in variables]
index_to_keep = sorted(set(range(len(self.variables))) - set(var_indexes))
phi.variables = [phi.variables[index] for index in index_to_keep]
phi.cardinality = phi.cardinality[index_to_keep]
phi.values = np.sum(phi.values, axis=tuple(var_indexes))
if not inplace:
return phi
def maximize(self, variables, inplace=True):
"""
Maximizes the factor with respect to `variables`.
Parameters
----------
variables: list, array-like
List of variables with respect to which factor is to be maximized
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2], [0.25, 0.35, 0.08, 0.16, 0.05, 0.07,
... 0.00, 0.00, 0.15, 0.21, 0.09, 0.18])
>>> phi.variables
['x1','x2','x3']
>>> phi.maximize(['x2'])
>>> phi.variables
['x1', 'x3']
>>> phi.cardinality
array([3, 2])
>>> phi.values
array([[ 0.25, 0.35],
[ 0.05, 0.07],
[ 0.15, 0.21]])
"""
if isinstance(variables, six.string_types):
raise TypeError("variables: Expected type list or array-like, got type str")
phi = self if inplace else self.copy()
for var in variables:
if var not in phi.variables:
raise ValueError("{var} not in scope.".format(var=var))
var_indexes = [phi.variables.index(var) for var in variables]
index_to_keep = sorted(set(range(len(self.variables))) - set(var_indexes))
phi.variables = [phi.variables[index] for index in index_to_keep]
phi.cardinality = phi.cardinality[index_to_keep]
phi.values = np.max(phi.values, axis=tuple(var_indexes))
if not inplace:
return phi
def normalize(self, inplace=True):
"""
Normalizes the values of factor so that they sum to 1.
Parameters
----------
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi.values
array([[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]]])
>>> phi.normalize()
>>> phi.variables
['x1', 'x2', 'x3']
>>> phi.cardinality
array([2, 3, 2])
>>> phi.values
array([[[ 0. , 0.01515152],
[ 0.03030303, 0.04545455],
[ 0.06060606, 0.07575758]],
[[ 0.09090909, 0.10606061],
[ 0.12121212, 0.13636364],
[ 0.15151515, 0.16666667]]])
"""
phi = self if inplace else self.copy()
phi.values = phi.values / phi.values.sum()
if not inplace:
return phi
@StateNameDecorator(argument='values', return_val=None)
def reduce(self, values, inplace=True):
"""
Reduces the factor to the context of given variable values.
Parameters
----------
values: list, array-like
A list of tuples of the form (variable_name, variable_state).
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi.reduce([('x1', 0), ('x2', 0)])
>>> phi.variables
['x3']
>>> phi.cardinality
array([2])
>>> phi.values
array([0., 1.])
"""
if isinstance(values, six.string_types):
raise TypeError("values: Expected type list or array-like, got type str")
if (any(isinstance(value, six.string_types) for value in values) or
not all(isinstance(state, (int, np.integer)) for var, state in values)):
raise TypeError("values: must contain tuples or array-like elements of the form "
"(hashable object, type int)")
phi = self if inplace else self.copy()
var_index_to_del = []
slice_ = [slice(None)] * len(self.variables)
for var, state in values:
var_index = phi.variables.index(var)
slice_[var_index] = state
var_index_to_del.append(var_index)
var_index_to_keep = sorted(set(range(len(phi.variables))) - set(var_index_to_del))
# set difference is not gaurenteed to maintain ordering
phi.variables = [phi.variables[index] for index in var_index_to_keep]
phi.cardinality = phi.cardinality[var_index_to_keep]
phi.values = phi.values[tuple(slice_)]
if not inplace:
return phi
def sum(self, phi1, inplace=True):
"""
DiscreteFactor sum with `phi1`.
Parameters
----------
phi1: `DiscreteFactor` instance.
DiscreteFactor to be added.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Example
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> phi1.sum(phi2, inplace=True)
>>> phi1.variables
['x1', 'x2', 'x3', 'x4']
>>> phi1.cardinality
array([2, 3, 2, 2])
>>> phi1.values
array([[[[ 0, 0],
[ 4, 6]],
[[ 0, 4],
[12, 18]],
[[ 0, 8],
[20, 30]]],
[[[ 6, 18],
[35, 49]],
[[ 8, 24],
[45, 63]],
[[10, 30],
[55, 77]]]])
"""
phi = self if inplace else self.copy()
if isinstance(phi1, (int, float)):
phi.values += phi1
else:
phi1 = phi1.copy()
# modifying phi to add new variables
extra_vars = set(phi1.variables) - set(phi.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi.values = phi.values[slice_]
phi.variables.extend(extra_vars)
new_var_card = phi1.get_cardinality(extra_vars)
phi.cardinality = np.append(phi.cardinality, [new_var_card[var] for var in extra_vars])
# modifying phi1 to add new variables
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[slice_]
phi1.variables.extend(extra_vars)
# No need to modify cardinality as we don't need it.
# rearranging the axes of phi1 to match phi
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], \
phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values + phi1.values
if not inplace:
return phi
def product(self, phi1, inplace=True):
"""
DiscreteFactor product with `phi1`.
Parameters
----------
phi1: `DiscreteFactor` instance
DiscreteFactor to be multiplied.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Example
-------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> phi1.product(phi2, inplace=True)
>>> phi1.variables
['x1', 'x2', 'x3', 'x4']
>>> phi1.cardinality
array([2, 3, 2, 2])
>>> phi1.values
array([[[[ 0, 0],
[ 4, 6]],
[[ 0, 4],
[12, 18]],
[[ 0, 8],
[20, 30]]],
[[[ 6, 18],
[35, 49]],
[[ 8, 24],
[45, 63]],
[[10, 30],
[55, 77]]]]
"""
phi = self if inplace else self.copy()
if isinstance(phi1, (int, float)):
phi.values *= phi1
else:
phi1 = phi1.copy()
# modifying phi to add new variables
extra_vars = set(phi1.variables) - set(phi.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi.values = phi.values[slice_]
phi.variables.extend(extra_vars)
new_var_card = phi1.get_cardinality(extra_vars)
phi.cardinality = np.append(phi.cardinality, [new_var_card[var] for var in extra_vars])
# modifying phi1 to add new variables
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[slice_]
phi1.variables.extend(extra_vars)
# No need to modify cardinality as we don't need it.
# rearranging the axes of phi1 to match phi
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], \
phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values * phi1.values
if not inplace:
return phi
def divide(self, phi1, inplace=True):
"""
DiscreteFactor division by `phi1`.
Parameters
----------
phi1 : `DiscreteFactor` instance
The denominator for division.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)])
>>> phi1.divide(phi2)
>>> phi1.variables
['x1', 'x2', 'x3']
>>> phi1.cardinality
array([2, 3, 2])
>>> phi1.values
array([[[ 0. , 0.33333333],
[ 2. , 1. ],
[ 4. , 1.66666667]],
[[ 3. , 1.75 ],
[ 4. , 2.25 ],
[ 5. , 2.75 ]]])
"""
phi = self if inplace else self.copy()
phi1 = phi1.copy()
if set(phi1.variables) - set(phi.variables):
raise ValueError("Scope of divisor should be a subset of dividend")
# Adding extra variables in phi1.
extra_vars = set(phi.variables) - set(phi1.variables)
if extra_vars:
slice_ = [slice(None)] * len(phi1.variables)
slice_.extend([np.newaxis] * len(extra_vars))
phi1.values = phi1.values[slice_]
phi1.variables.extend(extra_vars)
# Rearranging the axes of phi1 to match phi
for axis in range(phi.values.ndim):
exchange_index = phi1.variables.index(phi.variables[axis])
phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], phi1.variables[axis]
phi1.values = phi1.values.swapaxes(axis, exchange_index)
phi.values = phi.values / phi1.values
# If factor division 0/0 = 0 but is undefined for x/0. In pgmpy we are using
# np.inf to represent x/0 cases.
phi.values[np.isnan(phi.values)] = 0
if not inplace:
return phi
def copy(self):
"""
Returns a copy of the factor.
Returns
-------
DiscreteFactor: copy of the factor
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 3], np.arange(18))
>>> phi_copy = phi.copy()
>>> phi_copy.variables
['x1', 'x2', 'x3']
>>> phi_copy.cardinality
array([2, 3, 3])
>>> phi_copy.values
array([[[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8]],
[[ 9, 10, 11],
[12, 13, 14],
[15, 16, 17]]])
"""
# not creating a new copy of self.values and self.cardinality
# because __init__ methods does that.
return DiscreteFactor(self.scope(), self.cardinality, self.values)
def is_valid_cpd(self):
return np.allclose(self.to_factor().marginalize(self.scope()[:1], inplace=False).values.flatten('C'),
np.ones(np.product(self.cardinality[:0:-1])),
atol=0.01)
def __str__(self):
return self._str(phi_or_p='phi', tablefmt='grid')
def _str(self, phi_or_p="phi", tablefmt="grid", print_state_names=True):
"""
Generate the string from `__str__` method.
Parameters
----------
phi_or_p: 'phi' | 'p'
'phi': When used for Factors.
'p': When used for CPDs.
print_state_names: boolean
If True, the user defined state names are displayed.
"""
string_header = list(map(lambda x: six.text_type(x), self.scope()))
string_header.append('{phi_or_p}({variables})'.format(phi_or_p=phi_or_p,
variables=','.join(string_header)))
value_index = 0
factor_table = []
for prob in product(*[range(card) for card in self.cardinality]):
if self.state_names and print_state_names:
prob_list = ["{var}({state})".format(
var=list(self.variables)[i], state=self.state_names[list(
self.variables)[i]][prob[i]])
for i in range(len(self.variables))]
else:
prob_list = ["{s}_{d}".format(s=list(self.variables)[i], d=prob[i])
for i in range(len(self.variables))]
prob_list.append(self.values.ravel()[value_index])
factor_table.append(prob_list)
value_index += 1
return tabulate(factor_table, headers=string_header, tablefmt=tablefmt, floatfmt=".4f")
def __repr__(self):
var_card = ", ".join(['{var}:{card}'.format(var=var, card=card)
for var, card in zip(self.variables, self.cardinality)])
return "<DiscreteFactor representing phi({var_card}) at {address}>".format(
address=hex(id(self)), var_card=var_card)
def __mul__(self, other):
return self.product(other, inplace=False)
def __rmul__(self, other):
return self.__mul__(other)
def __add__(self, other):
return self.sum(other, inplace=False)
def __radd__(self, other):
return self.__add__(other)
def __truediv__(self, other):
return self.divide(other, inplace=False)
__div__ = __truediv__
def __eq__(self, other):
if not (isinstance(self, DiscreteFactor) and isinstance(other, DiscreteFactor)):
return False
elif set(self.scope()) != set(other.scope()):
return False
else:
phi = other.copy()
for axis in range(self.values.ndim):
exchange_index = phi.variables.index(self.variables[axis])
phi.variables[axis], phi.variables[exchange_index] = (phi.variables[exchange_index],
phi.variables[axis])
phi.cardinality[axis], phi.cardinality[exchange_index] = (phi.cardinality[exchange_index],
phi.cardinality[axis])
phi.values = phi.values.swapaxes(axis, exchange_index)
if phi.values.shape != self.values.shape:
return False
elif not np.allclose(phi.values, self.values):
return False
elif not all(self.cardinality == phi.cardinality):
return False
else:
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
variable_hashes = [hash(variable) for variable in self.variables]
sorted_var_hashes = sorted(variable_hashes)
phi = self.copy()
for axis in range(phi.values.ndim):
exchange_index = variable_hashes.index(sorted_var_hashes[axis])
variable_hashes[axis], variable_hashes[exchange_index] = (variable_hashes[exchange_index],
variable_hashes[axis])
phi.cardinality[axis], phi.cardinality[exchange_index] = (phi.cardinality[exchange_index],
phi.cardinality[axis])
phi.values = phi.values.swapaxes(axis, exchange_index)
return hash(str(sorted_var_hashes) + str(phi.values) + str(phi.cardinality))
|
the-stack_106_28335 | #!/usr/bin/env python
"""
Copyright (C) 2013 Bo Zhu http://about.bozhu.me
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from Crypto.Cipher import AES
from Crypto.Util import Counter
from Crypto.Util.number import long_to_bytes, bytes_to_long
# GF(2^128) defined by 1 + a + a^2 + a^7 + a^128
# Please note the MSB is x0 and LSB is x127
def gf_2_128_mul(x, y):
assert x < (1 << 128)
assert y < (1 << 128)
res = 0
for i in range(127, -1, -1):
res ^= x * ((y >> i) & 1) # branchless
x = (x >> 1) ^ ((x & 1) * 0xE1000000000000000000000000000000)
assert res < 1 << 128
return res
class InvalidInputException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return str(self.msg)
class InvalidTagException(Exception):
def __str__(self):
return 'The authenticaiton tag is invalid.'
# Galois/Counter Mode with AES-128 and 96-bit IV
class AES_GCM:
def __init__(self, master_key):
self.change_key(master_key)
def change_key(self, master_key):
if master_key >= (1 << 128):
raise InvalidInputException('Master key should be 128-bit')
self.__master_key = long_to_bytes(master_key, 16)
self.__aes_ecb = AES.new(self.__master_key, AES.MODE_ECB)
self.__auth_key = bytes_to_long(self.__aes_ecb.encrypt(b'\x00' * 16))
# precompute the table for multiplication in finite field
table = [] # for 8-bit
for i in range(16):
row = []
for j in range(256):
row.append(gf_2_128_mul(self.__auth_key, j << (8 * i)))
table.append(tuple(row))
self.__pre_table = tuple(table)
self.prev_init_value = None # reset
def __times_auth_key(self, val):
res = 0
for i in range(16):
res ^= self.__pre_table[i][val & 0xFF]
val >>= 8
return res
def __ghash(self, aad, txt):
len_aad = len(aad)
len_txt = len(txt)
# padding
if 0 == len_aad % 16:
data = aad
else:
data = aad + b'\x00' * (16 - len_aad % 16)
if 0 == len_txt % 16:
data += txt
else:
data += txt + b'\x00' * (16 - len_txt % 16)
tag = 0
assert len(data) % 16 == 0
for i in range(len(data) // 16):
tag ^= bytes_to_long(data[i * 16: (i + 1) * 16])
tag = self.__times_auth_key(tag)
# print 'X\t', hex(tag)
tag ^= ((8 * len_aad) << 64) | (8 * len_txt)
tag = self.__times_auth_key(tag)
return tag
def encrypt(self, init_value, plaintext, auth_data=b''):
if init_value >= (1 << 96):
raise InvalidInputException('IV should be 96-bit')
# a naive checking for IV reuse
if init_value == self.prev_init_value:
raise InvalidInputException('IV must not be reused!')
self.prev_init_value = init_value
len_plaintext = len(plaintext)
# len_auth_data = len(auth_data)
if len_plaintext > 0:
counter = Counter.new(
nbits=32,
prefix=long_to_bytes(init_value, 12),
initial_value=2, # notice this
allow_wraparound=False)
aes_ctr = AES.new(self.__master_key, AES.MODE_CTR, counter=counter)
if 0 != len_plaintext % 16:
padded_plaintext = plaintext + \
b'\x00' * (16 - len_plaintext % 16)
else:
padded_plaintext = plaintext
ciphertext = aes_ctr.encrypt(padded_plaintext)[:len_plaintext]
else:
ciphertext = b''
auth_tag = self.__ghash(auth_data, ciphertext)
# print 'GHASH\t', hex(auth_tag)
auth_tag ^= bytes_to_long(self.__aes_ecb.encrypt(
long_to_bytes((init_value << 32) | 1, 16)))
# assert len(ciphertext) == len(plaintext)
assert auth_tag < (1 << 128)
return ciphertext, auth_tag
def decrypt(self, init_value, ciphertext, auth_tag, auth_data=b''):
if init_value >= (1 << 96):
raise InvalidInputException('IV should be 96-bit')
if auth_tag >= (1 << 128):
raise InvalidInputException('Tag should be 128-bit')
if auth_tag != self.__ghash(auth_data, ciphertext) ^ \
bytes_to_long(self.__aes_ecb.encrypt(
long_to_bytes((init_value << 32) | 1, 16))):
raise InvalidTagException
len_ciphertext = len(ciphertext)
if len_ciphertext > 0:
counter = Counter.new(
nbits=32,
prefix=long_to_bytes(init_value, 12),
initial_value=2,
allow_wraparound=True)
aes_ctr = AES.new(self.__master_key, AES.MODE_CTR, counter=counter)
if 0 != len_ciphertext % 16:
padded_ciphertext = ciphertext + \
b'\x00' * (16 - len_ciphertext % 16)
else:
padded_ciphertext = ciphertext
plaintext = aes_ctr.decrypt(padded_ciphertext)[:len_ciphertext]
else:
plaintext = b''
return plaintext
if __name__ == '__main__':
master_key = 0xfeffe9928665731c6d6a8f9467308308
plaintext = b'\xd9\x31\x32\x25\xf8\x84\x06\xe5' + \
b'\xa5\x59\x09\xc5\xaf\xf5\x26\x9a' + \
b'\x86\xa7\xa9\x53\x15\x34\xf7\xda' + \
b'\x2e\x4c\x30\x3d\x8a\x31\x8a\x72' + \
b'\x1c\x3c\x0c\x95\x95\x68\x09\x53' + \
b'\x2f\xcf\x0e\x24\x49\xa6\xb5\x25' + \
b'\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57' + \
b'\xba\x63\x7b\x39'
auth_data = b'\xfe\xed\xfa\xce\xde\xad\xbe\xef' + \
b'\xfe\xed\xfa\xce\xde\xad\xbe\xef' + \
b'\xab\xad\xda\xd2'
init_value = 0xcafebabefacedbaddecaf888
ciphertext = b'\x42\x83\x1e\xc2\x21\x77\x74\x24' + \
b'\x4b\x72\x21\xb7\x84\xd0\xd4\x9c' + \
b'\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0' + \
b'\x35\xc1\x7e\x23\x29\xac\xa1\x2e' + \
b'\x21\xd5\x14\xb2\x54\x66\x93\x1c' + \
b'\x7d\x8f\x6a\x5a\xac\x84\xaa\x05' + \
b'\x1b\xa3\x0b\x39\x6a\x0a\xac\x97' + \
b'\x3d\x58\xe0\x91'
auth_tag = 0x5bc94fbc3221a5db94fae95ae7121a47
print('plaintext:', hex(bytes_to_long(plaintext)))
my_gcm = AES_GCM(master_key)
encrypted, new_tag = my_gcm.encrypt(init_value, plaintext, auth_data)
print('encrypted:', hex(bytes_to_long(encrypted)))
print('auth tag: ', hex(new_tag))
try:
decrypted = my_gcm.decrypt(init_value, encrypted,
new_tag + 1, auth_data)
except InvalidTagException:
decrypted = my_gcm.decrypt(init_value, encrypted, new_tag, auth_data)
print('decrypted:', hex(bytes_to_long(decrypted)))
|
the-stack_106_28336 | """
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by Lev Givon were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, Lev Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import date, datetime, timedelta
from io import BytesIO
import os
import warnings
from dateutil.parser import parse
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.errors import PerformanceWarning
from pandas.util._move import (
BadMove as _BadMove,
move_into_mutable_buffer as _move_into_mutable_buffer,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_object_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas import ( # noqa:F401
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Float64Index,
Index,
Int64Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
Timestamp,
)
from pandas.core import internals
from pandas.core.arrays import DatetimeArray, IntervalArray, PeriodArray
from pandas.core.arrays.sparse import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.core.internals import BlockManager, _safe_reshape, make_block
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.io.common import _stringify_path, get_filepath_or_buffer
from pandas.io.msgpack import ExtType, Packer as _Packer, Unpacker as _Unpacker
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
.. deprecated:: 0.25.0
to_msgpack is deprecated and will be removed in a future version.
It is recommended to use pyarrow for on-the-wire transmission of
pandas objects.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated bytes
args : an object or objects to serialize
encoding : encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
warnings.warn(
"to_msgpack is deprecated and will be removed in a "
"future version.\n"
"It is recommended to use pyarrow for on-the-wire "
"transmission of pandas objects.",
FutureWarning,
stacklevel=3,
)
global compressor
compressor = kwargs.pop("compress", None)
append = kwargs.pop("append", None)
if append:
mode = "a+b"
else:
mode = "wb"
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
try:
with open(path_or_buf, mode) as fh:
writer(fh)
except FileNotFoundError:
msg = "File b'{}' does not exist".format(path_or_buf)
raise FileNotFoundError(msg)
elif path_or_buf is None:
buf = BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding="utf-8", iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path.
.. deprecated:: 0.25.0
read_msgpack is deprecated and will be removed in a future version.
It is recommended to use pyarrow for on-the-wire transmission of
pandas objects.
Parameters
----------
path_or_buf : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function) or
``StringIO``.
encoding : Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : same type as object stored in file
Notes
-----
read_msgpack is only guaranteed to be backwards compatible to pandas
0.20.3.
"""
warnings.warn(
"The read_msgpack is deprecated and will be removed in a "
"future version.\n"
"It is recommended to use pyarrow for on-the-wire "
"transmission of pandas objects.",
FutureWarning,
stacklevel=3,
)
path_or_buf, _, _, should_close = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
unpacked_obj = list(unpack(fh, encoding=encoding, **kwargs))
if len(unpacked_obj) == 1:
return unpacked_obj[0]
if should_close:
try:
path_or_buf.close()
except IOError:
pass
return unpacked_obj
# see if we have an actual file
if isinstance(path_or_buf, str):
try:
with open(path_or_buf, "rb") as fh:
return read(fh)
except FileNotFoundError:
msg = "File b'{}' does not exist".format(path_or_buf)
raise FileNotFoundError(msg)
if isinstance(path_or_buf, bytes):
# treat as a binary-like
fh = None
try:
fh = BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
elif hasattr(path_or_buf, "read") and callable(path_or_buf.read):
# treat as a buffer like
return read(path_or_buf)
raise ValueError("path_or_buf needs to be a string file path or file-like")
dtype_dict = {
21: np.dtype("M8[ns]"),
"datetime64[ns]": np.dtype("M8[ns]"),
"datetime64[us]": np.dtype("M8[us]"),
22: np.dtype("m8[ns]"),
"timedelta64[ns]": np.dtype("m8[ns]"),
"timedelta64[us]": np.dtype("m8[us]"),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype("int64"),
"category": "category",
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {"complex": np.float64, "complex128": np.float64, "complex64": np.float32}
# windows (32 bit) compat
if hasattr(np, "float128"):
c2f_dict["complex256"] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view("i8")
v = values.ravel()
if compressor == "zlib":
zlib = import_optional_dependency(
"zlib", extra="zlib is required when `compress='zlib'`."
)
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == "blosc":
blosc = import_optional_dependency(
"blosc", extra="zlib is required when `compress='blosc'`."
)
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if is_categorical_dtype(dtype):
return values
elif is_object_dtype(dtype):
return np.array(values, dtype=object)
dtype = pandas_dtype(dtype).base
if not as_is_ext:
values = values.encode("latin1")
if compress:
if compress == "zlib":
zlib = import_optional_dependency(
"zlib", extra="zlib is required when `compress='zlib'`."
)
decompress = zlib.decompress
elif compress == "blosc":
blosc = import_optional_dependency(
"blosc", extra="zlib is required when `compress='blosc'`."
)
decompress = blosc.decompress
else:
raise ValueError("compress must be one of 'zlib' or 'blosc'")
try:
return np.frombuffer(
_move_into_mutable_buffer(decompress(values)), dtype=dtype
)
except _BadMove as e:
# Pull the decompressed data off of the `_BadMove` exception.
# We don't just store this in the locals because we want to
# minimize the risk of giving users access to a `bytes` object
# whose data is also given to a mutable buffer.
values = e.args[0]
if len(values) > 1:
# The empty string and single characters are memoized in many
# string creating functions in the capi. This case should not
# warn even though we need to make a copy because we are only
# copying at most 1 byte.
warnings.warn(
"copying data after decompressing; this may mean that"
" decompress is caching its result",
PerformanceWarning,
)
# fall through to copying `np.fromstring`
# Copy the bytes into a numpy array.
buf = np.frombuffer(values, dtype=dtype)
buf = buf.copy() # required to not mutate the original data
buf.flags.writeable = True
return buf
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {
"typ": "range_index",
"klass": obj.__class__.__name__,
"name": getattr(obj, "name", None),
"start": obj._range.start,
"stop": obj._range.stop,
"step": obj._range.step,
}
elif isinstance(obj, PeriodIndex):
return {
"typ": "period_index",
"klass": obj.__class__.__name__,
"name": getattr(obj, "name", None),
"freq": getattr(obj, "freqstr", None),
"dtype": obj.dtype.name,
"data": convert(obj.asi8),
"compress": compressor,
}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, "tz", None)
# store tz info and data as UTC
if tz is not None:
tz = tz.zone
obj = obj.tz_convert("UTC")
return {
"typ": "datetime_index",
"klass": obj.__class__.__name__,
"name": getattr(obj, "name", None),
"dtype": obj.dtype.name,
"data": convert(obj.asi8),
"freq": getattr(obj, "freqstr", None),
"tz": tz,
"compress": compressor,
}
elif isinstance(obj, (IntervalIndex, IntervalArray)):
if isinstance(obj, IntervalIndex):
typ = "interval_index"
else:
typ = "interval_array"
return {
"typ": typ,
"klass": obj.__class__.__name__,
"name": getattr(obj, "name", None),
"left": getattr(obj, "left", None),
"right": getattr(obj, "right", None),
"closed": getattr(obj, "closed", None),
}
elif isinstance(obj, MultiIndex):
return {
"typ": "multi_index",
"klass": obj.__class__.__name__,
"names": getattr(obj, "names", None),
"dtype": obj.dtype.name,
"data": convert(obj.values),
"compress": compressor,
}
else:
return {
"typ": "index",
"klass": obj.__class__.__name__,
"name": getattr(obj, "name", None),
"dtype": obj.dtype.name,
"data": convert(obj.values),
"compress": compressor,
}
elif isinstance(obj, Categorical):
return {
"typ": "category",
"klass": obj.__class__.__name__,
"name": getattr(obj, "name", None),
"codes": obj.codes,
"categories": obj.categories,
"ordered": obj.ordered,
"compress": compressor,
}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError("msgpack sparse series is not implemented")
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {
"typ": "series",
"klass": obj.__class__.__name__,
"name": getattr(obj, "name", None),
"index": obj.index,
"dtype": obj.dtype.name,
"data": convert(obj.values),
"compress": compressor,
}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError("msgpack sparse frame is not implemented")
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in obj.items()])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {
"typ": "block_manager",
"klass": obj.__class__.__name__,
"axes": data.axes,
"blocks": [
{
"locs": b.mgr_locs.as_array,
"values": convert(b.values),
"shape": b.values.shape,
"dtype": b.dtype.name,
"klass": b.__class__.__name__,
"compress": compressor,
}
for b in data.blocks
],
}
elif (
isinstance(obj, (datetime, date, np.datetime64, timedelta, np.timedelta64))
or obj is NaT
):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = tz.zone
freq = obj.freq
if freq is not None:
freq = freq.freqstr
return {"typ": "timestamp", "value": obj.value, "freq": freq, "tz": tz}
if obj is NaT:
return {"typ": "nat"}
elif isinstance(obj, np.timedelta64):
return {"typ": "timedelta64", "data": obj.view("i8")}
elif isinstance(obj, timedelta):
return {
"typ": "timedelta",
"data": (obj.days, obj.seconds, obj.microseconds),
}
elif isinstance(obj, np.datetime64):
return {"typ": "datetime64", "data": str(obj)}
elif isinstance(obj, datetime):
return {"typ": "datetime", "data": obj.isoformat()}
elif isinstance(obj, date):
return {"typ": "date", "data": obj.isoformat()}
raise Exception("cannot encode this datetimelike object: {obj}".format(obj=obj))
elif isinstance(obj, Period):
return {"typ": "period", "ordinal": obj.ordinal, "freq": obj.freqstr}
elif isinstance(obj, Interval):
return {
"typ": "interval",
"left": obj.left,
"right": obj.right,
"closed": obj.closed,
}
elif isinstance(obj, BlockIndex):
return {
"typ": "block_index",
"klass": obj.__class__.__name__,
"blocs": obj.blocs,
"blengths": obj.blengths,
"length": obj.length,
}
elif isinstance(obj, IntIndex):
return {
"typ": "int_index",
"klass": obj.__class__.__name__,
"indices": obj.indices,
"length": obj.length,
}
elif isinstance(obj, np.ndarray):
return {
"typ": "ndarray",
"shape": obj.shape,
"ndim": obj.ndim,
"dtype": obj.dtype.name,
"data": convert(obj),
"compress": compressor,
}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {
"typ": "np_scalar",
"sub_typ": "np_complex",
"dtype": obj.dtype.name,
"real": np.real(obj).__repr__(),
"imag": np.imag(obj).__repr__(),
}
else:
return {"typ": "np_scalar", "dtype": obj.dtype.name, "data": obj.__repr__()}
elif isinstance(obj, complex):
return {
"typ": "np_complex",
"real": np.real(obj).__repr__(),
"imag": np.imag(obj).__repr__(),
}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get("typ")
if typ is None:
return obj
elif typ == "timestamp":
freq = obj["freq"] if "freq" in obj else obj["offset"]
return Timestamp(obj["value"], tz=obj["tz"], freq=freq)
elif typ == "nat":
return NaT
elif typ == "period":
return Period(ordinal=obj["ordinal"], freq=obj["freq"])
elif typ == "index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
return Index(data, dtype=dtype, name=obj["name"])
elif typ == "range_index":
return RangeIndex(obj["start"], obj["stop"], obj["step"], name=obj["name"])
elif typ == "multi_index":
dtype = dtype_for(obj["dtype"])
data = unconvert(obj["data"], dtype, obj.get("compress"))
data = [tuple(x) for x in data]
return MultiIndex.from_tuples(data, names=obj["names"])
elif typ == "period_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"])
freq = d.pop("freq", None)
return PeriodIndex(PeriodArray(data, freq), **d)
elif typ == "datetime_index":
data = unconvert(obj["data"], np.int64, obj.get("compress"))
d = dict(name=obj["name"], freq=obj["freq"])
result = DatetimeIndex(data, **d)
tz = obj["tz"]
# reverse tz conversion
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
elif typ in ("interval_index", "interval_array"):
return globals()[obj["klass"]].from_arrays(
obj["left"], obj["right"], obj["closed"], name=obj["name"]
)
elif typ == "category":
from_codes = globals()[obj["klass"]].from_codes
return from_codes(
codes=obj["codes"], categories=obj["categories"], ordered=obj["ordered"]
)
elif typ == "interval":
return Interval(obj["left"], obj["right"], obj["closed"])
elif typ == "series":
dtype = dtype_for(obj["dtype"])
index = obj["index"]
data = unconvert(obj["data"], dtype, obj["compress"])
return Series(data, index=index, dtype=dtype, name=obj["name"])
elif typ == "block_manager":
axes = obj["axes"]
def create_block(b):
values = _safe_reshape(
unconvert(b["values"], dtype_for(b["dtype"]), b["compress"]), b["shape"]
)
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if "locs" in b:
placement = b["locs"]
else:
placement = axes[0].get_indexer(b["items"])
if is_datetime64tz_dtype(b["dtype"]):
assert isinstance(values, np.ndarray), type(values)
assert values.dtype == "M8[ns]", values.dtype
values = DatetimeArray(values, dtype=b["dtype"])
return make_block(
values=values,
klass=getattr(internals, b["klass"]),
placement=placement,
dtype=b["dtype"],
)
blocks = [create_block(b) for b in obj["blocks"]]
return globals()[obj["klass"]](BlockManager(blocks, axes))
elif typ == "datetime":
return parse(obj["data"])
elif typ == "datetime64":
return np.datetime64(parse(obj["data"]))
elif typ == "date":
return parse(obj["data"]).date()
elif typ == "timedelta":
return timedelta(*obj["data"])
elif typ == "timedelta64":
return np.timedelta64(int(obj["data"]))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return SparseSeries(
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return SparseDataFrame(
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
elif typ == "block_index":
return globals()[obj["klass"]](obj["length"], obj["blocs"], obj["blengths"])
elif typ == "int_index":
return globals()[obj["klass"]](obj["length"], obj["indices"])
elif typ == "ndarray":
return unconvert(
obj["data"], np.typeDict[obj["dtype"]], obj.get("compress")
).reshape(obj["shape"])
elif typ == "np_scalar":
if obj.get("sub_typ") == "np_complex":
return c2f(obj["real"], obj["imag"], obj["dtype"])
else:
dtype = dtype_for(obj["dtype"])
try:
return dtype(obj["data"])
except (ValueError, TypeError):
return dtype.type(obj["data"])
elif typ == "np_complex":
return complex(obj["real"] + "+" + obj["imag"] + "j")
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
def pack(
o,
default=encode,
encoding="utf-8",
unicode_errors="strict",
use_single_float=False,
autoreset=1,
use_bin_type=1,
):
"""
Pack an object and return the packed bytes.
"""
return Packer(
default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type,
).pack(o)
def unpack(
packed,
object_hook=decode,
list_hook=None,
use_list=False,
encoding="utf-8",
unicode_errors="strict",
object_pairs_hook=None,
max_buffer_size=0,
ext_hook=ExtType,
):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
"""
return Unpacker(
packed,
object_hook=object_hook,
list_hook=list_hook,
use_list=use_list,
encoding=encoding,
unicode_errors=unicode_errors,
object_pairs_hook=object_pairs_hook,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook,
)
class Packer(_Packer):
def __init__(
self,
default=encode,
encoding="utf-8",
unicode_errors="strict",
use_single_float=False,
autoreset=1,
use_bin_type=1,
):
super().__init__(
default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type,
)
class Unpacker(_Unpacker):
def __init__(
self,
file_like=None,
read_size=0,
use_list=False,
object_hook=decode,
object_pairs_hook=None,
list_hook=None,
encoding="utf-8",
unicode_errors="strict",
max_buffer_size=0,
ext_hook=ExtType,
):
super().__init__(
file_like=file_like,
read_size=read_size,
use_list=use_list,
object_hook=object_hook,
object_pairs_hook=object_pairs_hook,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook,
)
class Iterator:
""" manage the unpacking iteration,
close the file on completion """
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
needs_closing = True
try:
# see if we have an actual file
if isinstance(self.path, str):
try:
path_exists = os.path.exists(self.path)
except TypeError:
path_exists = False
if path_exists:
fh = open(self.path, "rb")
else:
fh = BytesIO(self.path)
else:
if not hasattr(self.path, "read"):
fh = BytesIO(self.path)
else:
# a file-like
needs_closing = False
fh = self.path
unpacker = unpack(fh)
for o in unpacker:
yield o
finally:
if needs_closing:
fh.close()
|
the-stack_106_28339 | import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import numpy as np
kernels = []
data = []
with open("plot_data_orig.txt") as f:
for line in f:
kernel, vals = line.strip().split(":")
kernels.append(kernel.strip())
data.append(eval(vals))
data = np.asarray(data)
jet = cm = plt.get_cmap('Dark2')
cNorm = colors.Normalize(vmin=0, vmax=len(kernels))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
x_axis = range(1,len(data[0])+1)
# handles = []
for idx, kernel in enumerate(kernels):
colorVal = scalarMap.to_rgba(idx)
handle = plt.scatter(x_axis,data[idx,:]*100,color=colorVal,label=kernel)
plt.plot(x_axis,data[idx,:]*100,color=colorVal)
# print(handle.get_label())
# print(handle)
# handles.append(handle)
# plt.legend(handles,loc='lower right')
plt.legend(loc='lower right')
plt.xlabel("13 functional categories")
plt.ylabel("Accuracy (in percent)")
plt.grid()
plt.show()
|
the-stack_106_28341 | """Activate coverage at python startup if appropriate.
The python site initialisation will ensure that anything we import
will be removed and not visible at the end of python startup. However
we minimise all work by putting these init actions in this separate
module and only importing what is needed when needed.
For normal python startup when coverage should not be activated the pth
file checks a single env var and does not import or call the init fn
here.
For python startup when an ancestor process has set the env indicating
that code coverage is being collected we activate coverage based on
info passed via env vars.
"""
import atexit
import os
import signal
_active_cov = None
def multiprocessing_start(_):
global _active_cov
cov = init()
if cov:
_active_cov = cov
multiprocessing.util.Finalize(None, cleanup, exitpriority=1000)
try:
import multiprocessing.util
except ImportError:
pass
else:
multiprocessing.util.register_after_fork(multiprocessing_start, multiprocessing_start)
def multiprocess_start(_):
global _active_cov
cov = init()
if cov:
_active_cov = cov
multiprocess.util.Finalize(None, cleanup, exitpriority=1000)
try:
import multiprocess.util
except ImportError:
pass
else:
multiprocess.util.register_after_fork(multiprocess_start, multiprocess_start)
def init():
# Only continue if ancestor process has set everything needed in
# the env.
global _active_cov
cov_source = os.environ.get('COV_CORE_SOURCE')
cov_config = os.environ.get('COV_CORE_CONFIG')
cov_datafile = os.environ.get('COV_CORE_DATAFILE')
cov_branch = True if os.environ.get('COV_CORE_BRANCH') == 'enabled' else None
cov_context = os.environ.get('COV_CORE_CONTEXT')
if cov_datafile:
if _active_cov:
cleanup()
# Import what we need to activate coverage.
import coverage
# Determine all source roots.
if cov_source in os.pathsep:
cov_source = None
else:
cov_source = cov_source.split(os.pathsep)
if cov_config == os.pathsep:
cov_config = True
# Activate coverage for this process.
cov = _active_cov = coverage.Coverage(
source=cov_source,
branch=cov_branch,
data_suffix=True,
config_file=cov_config,
auto_data=True,
data_file=cov_datafile
)
cov.load()
cov.start()
if cov_context:
cov.switch_context(cov_context)
cov._warn_no_data = False
cov._warn_unimported_source = False
return cov
def _cleanup(cov):
if cov is not None:
cov.stop()
cov.save()
cov._auto_save = False # prevent autosaving from cov._atexit in case the interpreter lacks atexit.unregister
try:
atexit.unregister(cov._atexit)
except Exception:
pass
def cleanup():
global _active_cov
global _cleanup_in_progress
global _pending_signal
_cleanup_in_progress = True
_cleanup(_active_cov)
_active_cov = None
_cleanup_in_progress = False
if _pending_signal:
pending_singal = _pending_signal
_pending_signal = None
_signal_cleanup_handler(*pending_singal)
multiprocessing_finish = cleanup # in case someone dared to use this internal
_previous_handlers = {}
_pending_signal = None
_cleanup_in_progress = False
def _signal_cleanup_handler(signum, frame):
global _pending_signal
if _cleanup_in_progress:
_pending_signal = signum, frame
return
cleanup()
_previous_handler = _previous_handlers.get(signum)
if _previous_handler == signal.SIG_IGN:
return
elif _previous_handler and _previous_handler is not _signal_cleanup_handler:
_previous_handler(signum, frame)
elif signum == signal.SIGTERM:
os._exit(128 + signum)
elif signum == signal.SIGINT:
raise KeyboardInterrupt()
def cleanup_on_signal(signum):
previous = signal.getsignal(signum)
if previous is not _signal_cleanup_handler:
_previous_handlers[signum] = previous
signal.signal(signum, _signal_cleanup_handler)
def cleanup_on_sigterm():
cleanup_on_signal(signal.SIGTERM)
|
the-stack_106_28346 |
import copy
configs = dict()
config = dict(
agent=dict(
action_squash=1.,
pretrain_std=0.75, # 0.75 gets pretty uniform actions
load_conv=True,
load_all=False,
store_latent=False,
state_dict_filename=None,
),
conv=dict(
channels=[32, 32, 32, 32],
kernel_sizes=[3, 3, 3, 3],
strides=[2, 2, 2, 1],
paddings=None,
),
fc1=dict(
latent_size=50,
layer_norm=True,
),
pi_model=dict(
hidden_sizes=[1024, 1024],
min_log_std=-10,
max_log_std=2,
),
q_model=dict(hidden_sizes=[1024, 1024]),
algo=dict(
discount=0.99,
batch_size=512,
# replay_ratio=512, # data_consumption / data_generation
min_steps_learn=int(1e4),
replay_size=int(1e5),
target_update_tau=0.01, # tau=1 for hard update.
target_update_interval=2,
actor_update_interval=2,
# OptimCls=torch.optim.Adam,
initial_optim_state_dict=None, # for all of them.
action_prior="uniform", # or "gaussian"
reward_scale=1,
target_entropy="auto", # "auto", float, or None
reparameterize=True,
clip_grad_norm=1e6,
n_step_return=1,
# updates_per_sync=1, # For async mode only.
bootstrap_timelimit=True,
# crop_size=84, # Get from agent.
q_lr=2e-4,
pi_lr=2e-4,
alpha_lr=1e-4,
q_beta=0.9,
pi_beta=0.9,
alpha_beta=0.5,
alpha_init=0.1,
encoder_update_tau=0.05,
augmentation="random_shift", # [None, "random_shift", "subpixel_shift"]
random_shift_pad=4, # how much to pad on each direction (like DrQ style)
random_shift_prob=1.,
stop_conv_grad=False,
max_pixel_shift=1.,
),
env=dict(
domain_name="cheetah",
task_name="run",
from_pixels=True,
frame_stack=3,
frame_skip=4,
height=84,
width=84,
),
optim=dict(),
runner=dict(
n_steps=1e5,
log_interval_steps=1e3,
),
sampler=dict(
batch_T=1,
batch_B=1,
max_decorrelation_steps=0,
eval_n_envs=5,
eval_max_steps=int(10000),
eval_max_trajectories=10,
),
pretrain=dict( # Populate these for logging, to compare
name=None,
algo=None,
n_updates=None,
batch_size=None,
batch_T=None,
batch_B=None,
delta_T=None,
learning_rate=None,
target_update_tau=None,
target_update_interval=None,
replay=None,
model_dir=None,
clip_grad_norm=None,
activation_loss_coefficient=None,
learning_rate_anneal=None,
learning_rate_warmup=None,
data_aug=None,
random_shift_pad=None,
random_shift_prob=None,
latent_size=None,
anchor_hidden_sizes=None,
hidden_sizes=None,
kl_coeff=None,
weight_decay=None,
kiaming_init=None,
run_ID=0,
log_interval_updates=None,
)
)
configs["serial_radsac"] = config
config = copy.deepcopy(configs["serial_radsac"])
config["agent"]["load_conv"] = False
config["algo"]["min_steps_learn"] = 5e3
config["runner"]["n_steps"] = 50e3
config["algo"]["replay_size"] = 50e3
config["sampler"]["eval_max_steps"] = 30e3
config["sampler"]["eval_max_trajectories"] = 30
configs["replaysave"] = config
|
the-stack_106_28348 | import RPi.GPIO as GPIO
import time
from math import *
from random import *
off = True
while True:
level = int(input("Input: "))
if level > 1 or level < 0:
break
if level == 1 and off:
off = False
GPIO.setmode(GPIO.BOARD)
GPIO.setup(23, GPIO.OUT)
GPIO.output(23, 0)
elif level == 0 and not off:
off = True
GPIO.cleanup()
GPIO.cleanup()
|
the-stack_106_28352 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
import urllib.request
import urllib.parse
import os
import sys
BASE_DIR = os.path.dirname(os.getcwd())
# 设置工作目录,使得包和模块能够正常导入
sys.path.append(BASE_DIR)
from conf import settings
def update_test(data):
"""
创建测试用例
:return:
"""
# 将数据打包到一个字典内,并转换为json格式
data = {"asset_data": json.dumps(data)}
# 根据settings中的配置,构造url
url = "http://%s:%s%s" % (settings.Params['server'], settings.Params['port'], settings.Params['url'])
print('正在将数据发送至: [%s] ......' % url)
try:
# 使用Python内置的urllib.request库,发送post请求。
# 需要先将数据进行封装,并转换成bytes类型
data_encode = urllib.parse.urlencode(data).encode()
response = urllib.request.urlopen(url=url, data=data_encode, timeout=settings.Params['request_timeout'])
print("\033[31;1m发送完毕!\033[0m ")
message = response.read().decode()
print("返回结果:%s" % message)
except Exception as e:
message = "发送失败"
print("\033[31;1m发送失败,%s\033[0m" % e)
if __name__ == '__main__':
windows_data = {
"os_type": "Windows",
"os_release": "7 64bit 6.1.7601 ",
"os_distribution": "Microsoft",
"asset_type": "server",
"cpu_count": 2,
"cpu_model": "Intel(R) Core(TM) i5-2300 CPU @ 2.80GHz",
"cpu_core_count": 8,
"ram": [
{
"slot": "A1",
"capacity": 8,
"model": "Physical Memory",
"manufacturer": "kingstone ",
"sn": "456"
},
],
"manufacturer": "Intel",
"model": "P67X-UD3R-B3",
"wake_up_type": 6,
"sn": "00426-OEM-8992662-3333",
"physical_disk_driver": [
{
"iface_type": "unknown",
"slot": 0,
"sn": "3830414130423230343234362020202020202020",
"model": "KINGSTON SV100S264G ATA Device",
"manufacturer": "(标准磁盘驱动器)",
"capacity": 128
},
{
"iface_type": "SATA",
"slot": 1,
"sn": "383041413042323023234362020102020202020",
"model": "KINGSTON SV100S264G ATA Device",
"manufacturer": "(标准磁盘驱动器)",
"capacity": 2048
},
],
"nic": [
{
"mac": "0A:01:27:00:00:00",
"model": "[00000013] VirtualBox Host-Only Ethernet Adapter",
"name": 13,
"ip_address": "192.168.56.1",
"net_mask": [
"255.255.255.0",
"64"
]
},
{
"mac": "24:CF:22:FF:48:34",
"model": "[00000017] Microsoft Virtual WiFi Miniport Adapter",
"name": 17,
"ip_address": "",
"net_mask": ""
},
{
"mac": "24:CF:22:FF:48:34",
"model": "Intel Adapter",
"name": 17,
"ip_address": "192.1.1.1",
"net_mask": ""
},
]
}
linux_data = {
"asset_type": "server",
"manufacturer": "innotek GmbH",
"sn": "00003",
"model": "VirtualBox",
"uuid": "E8DE611C-4279-495C-9B58-502B6FCED076",
"wake_up_type": "Power Switch",
"os_distribution": "Ubuntu",
"os_release": "Ubuntu 16.04.3 LTS",
"os_type": "Linux",
"cpu_count": "2",
"cpu_core_count": "4",
"cpu_model": "Intel(R) Core(TM) i5-2300 CPU @ 2.80GHz",
"ram": [
{
"slot": "A1",
"capacity": 8,
}
],
"ram_size": 3.858997344970703,
"nic": [],
"physical_disk_driver": [
{
"model": "VBOX HARDDISK",
"size": "50",
"sn": "VBeee1ba73-09085302"
}
]
}
update_test(linux_data)
update_test(windows_data) |
the-stack_106_28353 | """
End-to-End Multi-Lingual Optical Character Recognition (OCR) Solution
"""
from setuptools import setup
from io import open
with open('requirements.txt', encoding="utf-8-sig") as f:
requirements = f.readlines()
def readme():
with open('README.md', encoding="utf-8-sig") as f:
README = f.read()
return README
setup(
name='easyocr',
packages=['easyocr'],
include_package_data=True,
version='1.4.2',
install_requires=requirements,
entry_points={"console_scripts": ["easyocr= easyocr.cli:main"]},
license='Apache License 2.0',
description='End-to-End Multi-Lingual Optical Character Recognition (OCR) Solution',
long_description=readme(),
long_description_content_type="text/markdown",
author='Rakpong Kittinaradorn',
author_email='[email protected]',
url='https://github.com/jaidedai/easyocr',
download_url='https://github.com/jaidedai/easyocr.git',
keywords=['ocr optical character recognition deep learning neural network'],
classifiers=[
'Development Status :: 5 - Production/Stable'
],
)
|
the-stack_106_28354 | '''
Author: your name
Date: 2021-12-07 14:53:54
LastEditTime: 2021-12-07 15:26:52
LastEditors: Please set LastEditors
Description: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
FilePath: /PG-engine/run/single_view.py
'''
import argparse
import os
from pickle import load
import sys
from numpy import PINF
cur_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.join(cur_dir,'..')
sys.path.insert(0,os.path.join(root_dir,'src'))
sys.path.insert(0,os.path.join(root_dir,'run'))
from data.vibe_utils import *
from config import cfg
import scipy.io as sio
from toolkits import single_view_render
from pipeline import PipeLine
from tools.file_op import mkdir_safe
cfg.Engine.root_dir = root_dir
if not os.path.exists(cfg.Engine.output_dir):
cfg.Engine.output_dir =os.path.join(cfg.Engine.root_dir,cfg.Engine.output_dir)
mkdir_safe(cfg.Engine.output_dir)
def load_tmp_info(mat_file):
data=sio.loadmat(mat_file)
return data
def parser_upate():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--name', type=str, default='test',
help='the all view data name for mkdir.')
parser.add_argument('--view_id', type=int, default=0,
help='the view id.')
parser.add_argument('--cam_height', type=float, default=1.0,
help='the cam height.')
parser.add_argument('--cam_dist', type=float, default=8.0,
help='the cam dist.')
parser.add_argument('--zrot', type=float, default=0,
help='the cam height.')
parser.add_argument('--label', type=str, default='../label_info/data.mat',
help='the render need data input file.')
parser.add_argument('--fskip', type=int, default=1,
help='the number of skip frames')
tmp=sys.argv[sys.argv.index("--") + 1 :]
return parser.parse_args(tmp[1:])
if __name__=="__main__":
args=parser_upate()
print(args.view_id)
data=load_tmp_info(args.label)
cam=[args.cam_height,args.cam_dist,args.zrot]
view_name = 'camera_{:04d}'.format(args.view_id)
# print(data['light'])
# renderer=PipeLine(cfg,args.name,
# view_name,
# data['num_model'][0][0],
# genders=data['genders'],
# bg_img=data['bg_img'][0],
# textures=data['textures'],
# shape=data['shape'],
# sh_coeffs=data['light'][0])
renderer=PipeLine(cfg,args.name,view_name,data['num_model'][0][0])
print(data['pose'].shape)
single_view_render(renderer,data['pose'],data['trans'],cfg,cam_obs=cam,fskip=args.fskip) |
the-stack_106_28355 | """Binary Sensor platform for Sensibo integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import TYPE_CHECKING
from pysensibo.model import MotionSensor, SensiboDevice
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .coordinator import SensiboDataUpdateCoordinator
from .entity import SensiboDeviceBaseEntity, SensiboMotionBaseEntity
PARALLEL_UPDATES = 0
@dataclass
class MotionBaseEntityDescriptionMixin:
"""Mixin for required Sensibo base description keys."""
value_fn: Callable[[MotionSensor], bool | None]
@dataclass
class DeviceBaseEntityDescriptionMixin:
"""Mixin for required Sensibo base description keys."""
value_fn: Callable[[SensiboDevice], bool | None]
@dataclass
class SensiboMotionBinarySensorEntityDescription(
BinarySensorEntityDescription, MotionBaseEntityDescriptionMixin
):
"""Describes Sensibo Motion sensor entity."""
@dataclass
class SensiboDeviceBinarySensorEntityDescription(
BinarySensorEntityDescription, DeviceBaseEntityDescriptionMixin
):
"""Describes Sensibo Motion sensor entity."""
FILTER_CLEAN_REQUIRED_DESCRIPTION = SensiboDeviceBinarySensorEntityDescription(
key="filter_clean",
device_class=BinarySensorDeviceClass.PROBLEM,
name="Filter Clean Required",
value_fn=lambda data: data.filter_clean,
)
MOTION_SENSOR_TYPES: tuple[SensiboMotionBinarySensorEntityDescription, ...] = (
SensiboMotionBinarySensorEntityDescription(
key="alive",
device_class=BinarySensorDeviceClass.CONNECTIVITY,
entity_category=EntityCategory.DIAGNOSTIC,
name="Alive",
icon="mdi:wifi",
value_fn=lambda data: data.alive,
),
SensiboMotionBinarySensorEntityDescription(
key="is_main_sensor",
entity_category=EntityCategory.DIAGNOSTIC,
name="Main Sensor",
icon="mdi:connection",
value_fn=lambda data: data.is_main_sensor,
),
SensiboMotionBinarySensorEntityDescription(
key="motion",
device_class=BinarySensorDeviceClass.MOTION,
name="Motion",
icon="mdi:motion-sensor",
value_fn=lambda data: data.motion,
),
)
MOTION_DEVICE_SENSOR_TYPES: tuple[SensiboDeviceBinarySensorEntityDescription, ...] = (
SensiboDeviceBinarySensorEntityDescription(
key="room_occupied",
device_class=BinarySensorDeviceClass.MOTION,
name="Room Occupied",
icon="mdi:motion-sensor",
value_fn=lambda data: data.room_occupied,
),
FILTER_CLEAN_REQUIRED_DESCRIPTION,
)
PURE_SENSOR_TYPES: tuple[SensiboDeviceBinarySensorEntityDescription, ...] = (
SensiboDeviceBinarySensorEntityDescription(
key="pure_boost_enabled",
device_class=BinarySensorDeviceClass.RUNNING,
name="Pure Boost Enabled",
icon="mdi:wind-power-outline",
value_fn=lambda data: data.pure_boost_enabled,
),
SensiboDeviceBinarySensorEntityDescription(
key="pure_ac_integration",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=BinarySensorDeviceClass.CONNECTIVITY,
name="Pure Boost linked with AC",
icon="mdi:connection",
value_fn=lambda data: data.pure_ac_integration,
),
SensiboDeviceBinarySensorEntityDescription(
key="pure_geo_integration",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=BinarySensorDeviceClass.CONNECTIVITY,
name="Pure Boost linked with Presence",
icon="mdi:connection",
value_fn=lambda data: data.pure_geo_integration,
),
SensiboDeviceBinarySensorEntityDescription(
key="pure_measure_integration",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=BinarySensorDeviceClass.CONNECTIVITY,
name="Pure Boost linked with Indoor Air Quality",
icon="mdi:connection",
value_fn=lambda data: data.pure_measure_integration,
),
SensiboDeviceBinarySensorEntityDescription(
key="pure_prime_integration",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=BinarySensorDeviceClass.CONNECTIVITY,
name="Pure Boost linked with Outdoor Air Quality",
icon="mdi:connection",
value_fn=lambda data: data.pure_prime_integration,
),
FILTER_CLEAN_REQUIRED_DESCRIPTION,
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Sensibo binary sensor platform."""
coordinator: SensiboDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
entities: list[SensiboMotionSensor | SensiboDeviceSensor] = []
for device_id, device_data in coordinator.data.parsed.items():
if device_data.motion_sensors:
entities.extend(
SensiboMotionSensor(
coordinator, device_id, sensor_id, sensor_data, description
)
for sensor_id, sensor_data in device_data.motion_sensors.items()
for description in MOTION_SENSOR_TYPES
)
entities.extend(
SensiboDeviceSensor(coordinator, device_id, description)
for description in MOTION_DEVICE_SENSOR_TYPES
for device_id, device_data in coordinator.data.parsed.items()
if device_data.motion_sensors is not None
)
entities.extend(
SensiboDeviceSensor(coordinator, device_id, description)
for description in PURE_SENSOR_TYPES
for device_id, device_data in coordinator.data.parsed.items()
if device_data.model == "pure"
)
async_add_entities(entities)
class SensiboMotionSensor(SensiboMotionBaseEntity, BinarySensorEntity):
"""Representation of a Sensibo Motion Binary Sensor."""
entity_description: SensiboMotionBinarySensorEntityDescription
def __init__(
self,
coordinator: SensiboDataUpdateCoordinator,
device_id: str,
sensor_id: str,
sensor_data: MotionSensor,
entity_description: SensiboMotionBinarySensorEntityDescription,
) -> None:
"""Initiate Sensibo Motion Binary Sensor."""
super().__init__(
coordinator,
device_id,
sensor_id,
sensor_data,
entity_description.name,
)
self.entity_description = entity_description
self._attr_unique_id = f"{sensor_id}-{entity_description.key}"
self._attr_name = (
f"{self.device_data.name} Motion Sensor {entity_description.name}"
)
@property
def is_on(self) -> bool | None:
"""Return true if the binary sensor is on."""
if TYPE_CHECKING:
assert self.sensor_data
return self.entity_description.value_fn(self.sensor_data)
class SensiboDeviceSensor(SensiboDeviceBaseEntity, BinarySensorEntity):
"""Representation of a Sensibo Device Binary Sensor."""
entity_description: SensiboDeviceBinarySensorEntityDescription
def __init__(
self,
coordinator: SensiboDataUpdateCoordinator,
device_id: str,
entity_description: SensiboDeviceBinarySensorEntityDescription,
) -> None:
"""Initiate Sensibo Device Binary Sensor."""
super().__init__(
coordinator,
device_id,
)
self.entity_description = entity_description
self._attr_unique_id = f"{device_id}-{entity_description.key}"
self._attr_name = f"{self.device_data.name} {entity_description.name}"
@property
def is_on(self) -> bool | None:
"""Return true if the binary sensor is on."""
return self.entity_description.value_fn(self.device_data)
|
the-stack_106_28359 | from setuptools import find_packages, setup
with open("README.md") as fh:
long_description = ""
header_count = 0
for line in fh:
if line.startswith("##"):
header_count += 1
if header_count < 2:
long_description += line
else:
break
def get_version():
path = "pettingzoo/__init__.py"
with open(path) as file:
lines = file.readlines()
for line in lines:
if line.startswith("__version__"):
return line.strip().split()[-1].strip().strip('"')
raise RuntimeError("bad version data in __init__.py")
extras = {
"atari": ["multi_agent_ale_py==0.1.11", "pygame==2.0.0"],
"classic": ["chess==1.7.0", "rlcard==1.0.4", "pygame==2.0.0", "hanabi_learning_environment==0.0.1"],
"butterfly": ["pygame==2.0.0", "pymunk==6.2.0"],
"magent": ["magent==0.1.14"],
"mpe": ["pyglet>=1.4.0"],
"sisl": ["pygame==2.0.0", "box2d-py==2.3.5", "pyglet>=1.4.0", "scipy>=1.4.1"],
"other": ["pillow>=8.0.1"],
"tests": ["pynput"],
}
extras["all"] = extras["atari"] + extras["classic"] + extras["butterfly"] + extras["magent"] + extras["mpe"] + extras["sisl"] + extras["other"]
setup(
name='PettingZoo',
version=get_version(),
author='PettingZoo Community',
author_email="[email protected]",
description="Gym for multi-agent reinforcement learning",
url='https://github.com/Farama-Foundation/PettingZoo',
long_description=long_description,
long_description_content_type="text/markdown",
keywords=["Reinforcement Learning", "game", "RL", "AI", "gym"],
python_requires=">=3.7, <3.10",
packages=["pettingzoo"] + ["pettingzoo." + pkg for pkg in find_packages("pettingzoo")],
include_package_data=True,
install_requires=[
"numpy>=1.18.0",
"gym>=0.21.0"
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
extras_require=extras,
)
|
the-stack_106_28360 | import json
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.http import (
HttpResponse,
HttpResponseNotFound,
HttpResponseBadRequest,
HttpResponseForbidden,
)
from django.utils.translation import ugettext as _
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.decorators.http import require_POST
from kitsune.access.decorators import login_required
from kitsune.upload.models import ImageAttachment
from kitsune.upload.utils import upload_imageattachment, FileTooLargeError
ALLOWED_MODELS = ["questions.Question", "questions.Answer", "auth.User"]
@login_required
@require_POST
@xframe_options_sameorigin
def up_image_async(request, model_name, object_pk):
"""Upload all images in request.FILES."""
# Verify the model agaist our white-list
if model_name not in ALLOWED_MODELS:
message = _("Model not allowed.")
return HttpResponseBadRequest(json.dumps({"status": "error", "message": message}))
# Get the model
m = apps.get_model(*model_name.split("."))
# Then look up the object by pk
try:
obj = m.objects.get(pk=object_pk)
except ObjectDoesNotExist:
message = _("Object does not exist.")
return HttpResponseNotFound(json.dumps({"status": "error", "message": message}))
try:
file_info = upload_imageattachment(request, obj)
except FileTooLargeError as e:
return HttpResponseBadRequest(json.dumps({"status": "error", "message": e.args[0]}))
if hasattr(obj, "clear_cached_images"):
# if the object the image is attached to has a `clear_cached_images` method,
# like questions and answers do, call it
obj.clear_cached_images()
if isinstance(file_info, dict) and "thumbnail_url" in file_info:
return HttpResponse(json.dumps({"status": "success", "file": file_info}))
message = _("Invalid or no image received.")
return HttpResponseBadRequest(
json.dumps({"status": "error", "message": message, "errors": file_info})
)
@require_POST
@xframe_options_sameorigin
def del_image_async(request, image_id):
"""Delete an image given its object id."""
user = request.user
if not user.is_authenticated:
message = _("You are not logged in.")
return HttpResponseForbidden(json.dumps({"status": "error", "message": message}))
try:
image = ImageAttachment.objects.get(pk=image_id)
except ImageAttachment.DoesNotExist:
message = _("The requested image could not be found.")
return HttpResponseNotFound(json.dumps({"status": "error", "message": message}))
if not ((user == image.creator) or (user.has_perm("upload.delete_imageattachment"))):
message = _("You do not have permission to do that.")
return HttpResponseForbidden(json.dumps({"status": "error", "message": message}))
content_object = image.content_object
image.file.delete()
if image.thumbnail:
image.thumbnail.delete()
image.delete()
if hasattr(content_object, "clear_cached_images"):
# if the object the image was attached to has a `clear_cached_images` method,
# like questions and answers do, call it
content_object.clear_cached_images()
return HttpResponse(json.dumps({"status": "success"}))
|
the-stack_106_28361 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from xmlrpc.client import ServerProxy
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import QPushButton
from electrum_audax import util, keystore, ecc, crypto
from electrum_audax import transaction
from electrum_audax.bip32 import BIP32Node
from electrum_audax.plugin import BasePlugin, hook
from electrum_audax.i18n import _
from electrum_audax.wallet import Multisig_Wallet
from electrum_audax.util import bh2u, bfh
from electrum_audax.gui.qt.transaction_dialog import show_transaction
from electrum_audax.gui.qt.util import WaitingDialog
import sys
import traceback
server = ServerProxy('https://cosigner.electrum.org/', allow_none=True)
class Listener(util.DaemonThread):
def __init__(self, parent):
util.DaemonThread.__init__(self)
self.daemon = True
self.parent = parent
self.received = set()
self.keyhashes = []
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
server.delete(keyhash)
self.received.remove(keyhash)
def run(self):
while self.running:
if not self.keyhashes:
time.sleep(2)
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
continue
try:
message = server.get(keyhash)
except Exception as e:
self.logger.info("cannot contact cosigner pool")
time.sleep(30)
continue
if message:
self.received.add(keyhash)
self.logger.info(f"received message for {keyhash}")
self.parent.obj.cosigner_receive_signal.emit(
keyhash, message)
# poll every 30 seconds
time.sleep(30)
class QReceiveSignalObject(QObject):
cosigner_receive_signal = pyqtSignal(object, object)
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.listener = None
self.obj = QReceiveSignalObject()
self.obj.cosigner_receive_signal.connect(self.on_receive)
self.keys = []
self.cosigner_list = []
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def on_new_window(self, window):
self.update(window)
@hook
def on_close_window(self, window):
self.update(window)
def is_available(self):
return True
def update(self, window):
wallet = window.wallet
if type(wallet) != Multisig_Wallet:
return
if self.listener is None:
self.logger.info("starting listener")
self.listener = Listener(self)
self.listener.start()
elif self.listener:
self.logger.info("shutting down listener")
self.listener.stop()
self.listener = None
self.keys = []
self.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
pubkey = BIP32Node.from_xkey(xpub).eckey.get_public_key_bytes(compressed=True)
_hash = bh2u(crypto.sha256d(pubkey))
if not keystore.is_watching_only():
self.keys.append((key, _hash, window))
else:
self.cosigner_list.append((window, xpub, pubkey, _hash))
if self.listener:
self.listener.set_keyhashes([t[1] for t in self.keys])
@hook
def transaction_dialog(self, d):
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.clicked.connect(lambda: self.do_send(d.tx))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() or d.wallet.can_sign(d.tx):
d.cosigner_send_button.hide()
return
for window, xpub, K, _hash in self.cosigner_list:
if window.wallet == d.wallet and self.cosigner_can_sign(d.tx, xpub):
d.cosigner_send_button.show()
break
else:
d.cosigner_send_button.hide()
def cosigner_can_sign(self, tx, cosigner_xpub):
from electrum_audax.keystore import is_xpubkey, parse_xpubkey
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
def do_send(self, tx):
def on_success(result):
window.show_message(_("Your transaction was sent to the cosigning pool.") + '\n' +
_("Open your cosigner wallet to retrieve it."))
def on_failure(exc_info):
e = exc_info[1]
try: self.logger.error("on_failure", exc_info=exc_info)
except OSError: pass
window.show_error(_("Failed to send transaction to cosigning pool") + ':\n' + str(e))
for window, xpub, K, _hash in self.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
# construct message
raw_tx_bytes = bfh(str(tx))
public_key = ecc.ECPubkey(K)
message = public_key.encrypt_message(raw_tx_bytes).decode('ascii')
# send message
task = lambda: server.put(_hash, message)
msg = _('Sending transaction to cosigning pool...')
WaitingDialog(window, msg, task, on_success, on_failure)
def on_receive(self, keyhash, message):
self.logger.info(f"signal arrived for {keyhash}")
for key, _hash, window in self.keys:
if _hash == keyhash:
break
else:
self.logger.info("keyhash not found")
return
wallet = window.wallet
if isinstance(wallet.keystore, keystore.Hardware_KeyStore):
window.show_warning(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('However, hardware wallets do not support message decryption, '
'which makes them not compatible with the current design of cosigner pool.'))
return
elif wallet.has_keystore_encryption():
password = window.password_dialog(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('Please enter your password to decrypt it.'))
if not password:
return
else:
password = None
if not window.question(_("An encrypted transaction was retrieved from cosigning pool.") + '\n' +
_("Do you want to open it now?")):
return
xprv = wallet.keystore.get_master_private_key(password)
if not xprv:
return
try:
privkey = BIP32Node.from_xkey(xprv).eckey
message = bh2u(privkey.decrypt_message(message))
except Exception as e:
self.logger.exception('')
window.show_error(_('Error decrypting message') + ':\n' + str(e))
return
self.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
|
the-stack_106_28362 | #! /usr/bin/env python
"""
Create a final simulated exposure.
This module contains code that will combine a seed image and a
dark current exposure into a final simulated exposure. Cosmic rays,
Poisson noise, and other detector effects are added. This is the
final step when creating simulated data with Mirage. It can be run
after catalog_Seed_image.py and dark_prep.py
Authors:
--------
- Bryan Hilbert, Kevin Volk
Use:
----
This module can be imported as such:
::
from mirage.ramp_generator.obs_generator import Observation
ob = Observation()
ob.paramfile = 'my_parameters.yaml'
ob.create()
"""
import sys
import os
import random
import copy
from math import radians
import datetime
import logging
import warnings
import argparse
import shutil
import yaml
import pkg_resources
import numpy as np
from astropy.io import fits, ascii
from astropy.table import Table
from astropy.time import Time, TimeDelta
import astropy.units as u
import pysiaf
import mirage
from mirage.logging import logging_functions
from mirage.ramp_generator import unlinearize
from mirage.reference_files import crds_tools
from mirage.utils import read_fits, utils, siaf_interface
from mirage.utils import set_telescope_pointing_separated as stp
from mirage.utils.constants import EXPTYPES, MEAN_GAIN_VALUES, LOG_CONFIG_FILENAME, \
STANDARD_LOGFILE_NAME, NUM_RESETS_BEFORE_EXP, NUM_RESETS_BEFORE_INT
from mirage.utils.timer import Timer
INST_LIST = ['nircam', 'niriss', 'fgs']
MODES = {"nircam": ["imaging", "ts_imaging", "wfss", "ts_grism"],
"niriss": ["imaging", "ami", "pom", "wfss", "soss"],
"fgs": ["imaging"]}
classdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
log_config_file = os.path.join(classdir, 'logging', LOG_CONFIG_FILENAME)
logging_functions.create_logger(log_config_file, STANDARD_LOGFILE_NAME)
class Observation():
def __init__(self, offline=False):
"""Instantiate the Observation class
Parameters
----------
offline : bool
If True, the check for the existence of the MIRAGE_DATA
directory is skipped. This is primarily for Travis testing
"""
# Initialize the log using dictionary from the yaml file
self.logger = logging.getLogger(__name__)
self.linDark = None
self.seed = None
self.segmap = None
self.seedheader = None
self.seedunits = 'ADU/sec'
self.offline = offline
self.paramfile = 'None'
self.params = None
# self.coord_adjust contains the factor by which the
# nominal output array size needs to be increased
# (used for WFSS mode), as well as the coordinate
# offset between the nominal output array coordinates.
self.coord_adjust = {'x': 1., 'xoffset': 0., 'y': 1., 'yoffset': 0.}
# Locate the module files, so that we know where to look
# for config subdirectory
self.modpath = pkg_resources.resource_filename('mirage', '')
# Get the location of the MIRAGE_DATA environment
# variable, so we know where to look for darks, CR,
# PSF files, etc later
self.env_var = 'MIRAGE_DATA'
datadir = utils.expand_environment_variable(self.env_var, offline=offline)
# Check that CRDS-related environment variables are set correctly
self.crds_datadir = crds_tools.env_variables()
# Initialize timer
self.timer = Timer()
def add_crosstalk(self, exposure):
"""Add crosstalk effects to the input exposure
Parameters
----------
exposure : numpy.ndarray
Always 4D
Returns
-------
exposure : numpy.ndarray
Exposure with crosstalk effects added
"""
ints, groups, yd, xd = exposure.shape
if self.params['Readout']['namp'] == 4:
if self.instrument.upper() == 'NIRCAM':
xdet = self.detector[3:5].upper()
if xdet[1] == 'L':
xdet = xdet[0] + '5'
else:
xdet = self.detector
xtcoeffs = self.read_crosstalk_file(self.params['Reffiles']['crosstalk'], xdet)
# Only sources on the detector will create crosstalk.
# If signalimage is larger than full frame
# because we are creating a grism image, then extract the
# pixels corresponding to the actual
# detector, and only create crosstalk values for those.
xs = 0
xe = xd
ys = 0
ye = yd
for integ in range(ints):
for group in range(groups):
xtinput = exposure[integ, group, ys:ye, xs:xe]
xtimage = self.crosstalk_image(xtinput, xtcoeffs)
# Now add the crosstalk image to the signalimage
exposure[integ, group, ys:ye, xs:xe] += xtimage
else:
self.logger.info(("Crosstalk calculation requested, but the chosen subarray "
"is read out using only 1 amplifier. "
"Therefore there will be no crosstalk. Skipping this step."))
return exposure
def add_crs_and_noise(self, seed, num_integrations=None):
"""Given a noiseless seed ramp, add cosmic
rays and poisson noise
Paramters:
----------
seed : numpy.ndarray
Exposure to add CRs and noise to
Returns
--------
sim_exposure : numpy.ndarray
Exposure with CRs and noise added
sim_zero : numpy.ndarray
Zeroth read(s) of exposure
num_integrations : int
Number of integrations to create from the seed image
"""
yd, xd = seed.shape[-2:]
seeddim = len(seed.shape)
# Find the number of integrations to make from the seed image.
if num_integrations is None:
# Imaging mode where there is no file splitting
if seeddim == 2:
nint = self.params['Readout']['nint']
ngroups = self.params['Readout']['ngroup']
# TSO and moving targets
elif seeddim == 4:
nint = seed.shape[0]
ngroups = int(seed.shape[1] / (self.params['Readout']['nframe'] + self.params['Readout']['nskip']))
else:
nint = num_integrations
# Imaging mode where there may be file splitting
if seeddim == 2:
ngroups = self.params['Readout']['ngroup']
# TSO and moving targets
elif seeddim == 4:
if num_integrations != seed.shape[0]:
raise ValueError(('The number of integrations reported for the dark ({}) does not match that '
'implied by the size of the 1st dimension of the seed image ({}). Not sure '
'which to use.'.format(num_integrations, seed.shape[0])))
ngroups = int(seed.shape[1] / (self.params['Readout']['nframe'] + self.params['Readout']['nskip']))
sim_exposure = np.zeros((nint, ngroups, yd, xd))
sim_zero = np.zeros((nint, yd, xd))
# Run one integration at a time
# because each needs its own collection
# of cosmic rays and poisson noise realization
for integ in range(nint):
self.logger.info("Integration {}:".format(integ))
if seeddim == 2:
inseed = seed
elif seeddim == 4:
inseed = seed[integ, :, :, :]
if self.runStep['cosmicray']:
ramp, rampzero = self.frame_to_ramp(inseed)
else:
ramp, rampzero = self.frame_to_ramp_no_cr(inseed)
sim_exposure[integ, :, :, :] = ramp
sim_zero[integ, :, :] = rampzero
return sim_exposure, sim_zero
def add_detector_effects(self, ramp):
"""Add detector-based effects to input data.
Currently only crosstalk effects are added.
Parameters
----------
ramp : numpy.ndarray
Array containing the exposure
Returns
-------
ramp : numpy.ndarray
Exposure with effects added
"""
if self.runStep['crosstalk']:
ramp = self.add_crosstalk(ramp)
return ramp
def add_flatfield_effects(self, ramp):
"""Add flat field effects to the exposure
Paramters:
----------
ramp : numpy.ndarray
Array containing exposure
Returns
--------
ramp : numpy.ndarray
Exposure with flat field applied
"""
# ILLUMINATION FLAT
if self.runStep['illuminationflat']:
illuminationflat, illuminationflatheader = self.read_cal_file(self.params['Reffiles']['illumflat'])
ramp *= illuminationflat
# PIXEL FLAT
if self.runStep['pixelflat']:
pixelflat, pixelflatheader = self.read_cal_file(self.params['Reffiles']['pixelflat'])
ramp *= pixelflat
return ramp
def add_ipc(self, data):
"""
Add interpixel capacitance effects to the data. This is done by
convolving the data with a kernel. The kernel is read in from the
file specified by self.params['Reffiles']['ipc']. The core of this
function was copied from the IPC correction step in the JWST
calibration pipeline.
Parameters
----------
data : obj
4d numpy ndarray containing the data to which the
IPC effects will be added
Returns
-------
returns : obj
4d numpy ndarray of the modified data
"""
output_data = np.copy(data)
# Shape of the data, which may include reference pix
shape = output_data.shape
# Find the number of reference pixel rows and columns
# in output_data
if self.subarray_bounds[0] < 4:
left_columns = 4 - self.subarray_bounds[0]
else:
left_columns = 0
if self.subarray_bounds[2] > 2043:
right_columns = 4 - (2047 - self.subarray_bounds[2])
else:
right_columns = 0
if self.subarray_bounds[1] < 4:
bottom_rows = 4 - self.subarray_bounds[1]
else:
bottom_rows = 0
if self.subarray_bounds[3] > 2043:
top_rows = 4 - (2047 - self.subarray_bounds[3])
else:
top_rows = 0
# Get IPC kernel data
try:
# If add_ipc has already been called, then the correct
# IPC kernel already exists, in self.kernel
kernel = np.copy(self.kernel)
except:
# If add_ipc has not been called yet, then read in the
# kernel from the specified file.
kernel = fits.getdata(self.params['Reffiles']['ipc'])
# Invert the kernel if requested, to go from a kernel
# designed to remove IPC effects to one designed to
# add IPC effects
if self.params['Reffiles']['invertIPC']:
self.logger.info("Inverting IPC kernel prior to convolving with image")
kernel = self.invert_ipc_kernel(kernel)
self.kernel = np.copy(kernel)
kshape = kernel.shape
# These axes lengths exclude reference pixels, if there are any.
ny = shape[-2] - (bottom_rows + top_rows)
nx = shape[-1] - (left_columns + right_columns)
# The temporary array temp is larger than the science part of
# output_data by a border (set to zero) that's about half of the
# kernel size, so the convolution can be done without checking for
# out of bounds.
# b_b, t_b, l_b, and r_b are the widths of the borders on the
# bottom, top, left, and right, respectively.
b_b = kshape[0] // 2
t_b = kshape[0] - b_b - 1
l_b = kshape[1] // 2
r_b = kshape[1] - l_b - 1
tny = ny + b_b + t_b
yoff = bottom_rows # offset in output_data
tnx = nx + l_b + r_b
xoff = left_columns # offset in output_data
# Loop over integrations and groups
for integration in range(shape[0]):
for group in range(shape[1]):
# Copy the science portion (not the reference pixels) of
# output_data to this temporary array, then make
# subsequent changes in-place to output_data.
temp = np.zeros((tny, tnx), dtype=output_data.dtype)
temp[b_b:b_b + ny, l_b:l_b + nx] = \
output_data[integration, group, yoff:yoff + ny, xoff:xoff + nx].copy()
# After setting this slice to zero, we'll incrementally add
# to it.
output_data[integration, group, yoff:yoff + ny, xoff:xoff + nx] = 0.
if len(kshape) == 2:
# 2-D IPC kernel. Loop over pixels of the deconvolution
# kernel. In this section, `part` has the same shape
# as `temp`.
middle_j = kshape[0] // 2
middle_i = kshape[1] // 2
for j in range(kshape[0]):
jstart = kshape[0] - j - 1
for i in range(kshape[1]):
if i == middle_i and j == middle_j:
continue # the middle pixel is done last
part = kernel[j, i] * temp
istart = kshape[1] - i - 1
output_data[integration, group, yoff:yoff + ny, xoff:xoff + nx] += \
part[jstart:jstart + ny, istart:istart + nx]
# The middle pixel of the IPC kernel is expected to be
# the largest, so add that last.
part = kernel[middle_j, middle_i] * temp
output_data[integration, group, yoff:yoff + ny, xoff:xoff + nx] += \
part[middle_j:middle_j + ny, middle_i:middle_i + nx]
else:
# 4-D IPC kernel. Extract a subset of the kernel:
# all of the first two axes, but only the portion
# of the last two axes corresponding to the science
# data (i.e. possibly a subarray,
# and certainly excluding reference pixels).
k_temp = np.zeros((kshape[0], kshape[1], tny, tnx),
dtype=kernel.dtype)
k_temp[:, :, b_b:b_b + ny, l_b:l_b + nx] = \
kernel[:, :, yoff:yoff + ny, xoff:xoff + nx]
# In this section, `part` has shape (ny, nx), which is
# smaller than `temp`.
middle_j = kshape[0] // 2
middle_i = kshape[1] // 2
for j in range(kshape[0]):
jstart = kshape[0] - j - 1
for i in range(kshape[1]):
if i == middle_i and j == middle_j:
continue # the middle pixel is done last
istart = kshape[1] - i - 1
# The slice of k_temp includes different pixels
# for the first or second axes within each loop,
# but the same slice for the last two axes.
# The slice of temp (a copy of the science data)
# includes a different offset for each loop.
part = k_temp[j, i, b_b:b_b + ny, l_b:l_b + nx] * \
temp[jstart:jstart + ny, istart:istart + nx]
output_data[integration, group, yoff:yoff + ny, xoff:xoff + nx] += part
# Add the product for the middle pixel last.
part = k_temp[middle_j, middle_i, b_b:b_b + ny, l_b:l_b + nx] * \
temp[middle_j:middle_j + ny, middle_i:middle_i + nx]
output_data[integration, group, yoff:yoff + ny, xoff:xoff + nx] += part
return output_data
def add_mirage_info(self):
"""Place Mirage-related information in a FITS hdulist so that it can
be saved with the output data
Returns
-------
hdulist : astroy.io.fits.HDUList
HDU List containing Mirage-related info in the primary header
"""
hdulist = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU()])
hdulist[0].header['MRGEVRSN'] = (mirage.__version__, 'Mirage version used')
hdulist[0].header['YAMLFILE'] = (self.paramfile, 'Mirage input yaml file')
#hdulist[0].header['GAINFILE'] = (self.params['Reffiles']['gain'], 'Gain file used by Mirage')
hdulist[0].header['GAIN'] = (self.gain, 'Gain value used by Mirage')
hdulist[0].header['DISTORTN'] = (self.params['Reffiles']['astrometric'],
'Distortion reffile used by Mirage')
hdulist[0].header['IPC'] = (self.params['Reffiles']['ipc'], 'IPC kernel used by Mirage')
hdulist[0].header['PIXARMAP'] = (self.params['Reffiles']['pixelAreaMap'],
'Pixel area map used by Mirage')
hdulist[0].header['CROSSTLK'] = (self.params['Reffiles']['crosstalk'],
'Crosstalk file used by Mirage')
hdulist[0].header['FLUX_CAL'] = (self.params['Reffiles']['flux_cal'],
'Flux calibration file used by Mirage')
hdulist[0].header['FTHRUPUT'] = (self.params['Reffiles']['filter_throughput'],
'Filter throughput file used by Mirage')
hdulist[0].header['PTSRCCAT'] = (self.params['simSignals']['pointsource'],
'Point source catalog used by Mirage')
hdulist[0].header['GALAXCAT'] = (self.params['simSignals']['galaxyListFile'],
'Galaxy source catalog used by Mirage')
hdulist[0].header['EXTNDCAT'] = (self.params['simSignals']['extended'],
'Extended source catalog used by Mirage')
hdulist[0].header['MTPTSCAT'] = (self.params['simSignals']['movingTargetList'],
'Moving point source catalog used by Mirage')
hdulist[0].header['MTSERSIC'] = (self.params['simSignals']['movingTargetSersic'],
'Moving Sersic catalog used by Mirage')
hdulist[0].header['MTEXTEND'] = (self.params['simSignals']['movingTargetExtended'],
'Moving extended target catalog used by Mirage')
hdulist[0].header['NONSDRAL'] = (self.params['simSignals']['movingTargetToTrack'],
'Non-Sidereal catalog used by Mirage')
hdulist[0].header['BKGDRATE'] = (self.params['simSignals']['bkgdrate'],
'Background rate used by Mirage')
hdulist[0].header['TRACKING'] = (self.params['Telescope']['tracking'],
'Telescope tracking type for Mirage')
hdulist[0].header['POISSON'] = (self.params['simSignals']['poissonseed'],
'Random num generator seed for Poisson noise in Mirage')
hdulist[0].header['PSFWFE'] = (self.params['simSignals']['psfwfe'],
'WebbPSF Wavefront error used by Mirage')
hdulist[0].header['PSFWFGRP'] = (self.params['simSignals']['psfwfegroup'],
'WebbPSF wavefront error group used by Mirage')
hdulist[0].header['CRLIB'] = (self.params['cosmicRay']['library'],
'Cosmic ray library used by Mirage')
hdulist[0].header['CRSCALE'] = (self.params['cosmicRay']['scale'],
'Cosmic ray rate scaling factor used by Mirage')
hdulist[0].header['CRSEED'] = (self.params['cosmicRay']['seed'],
'Random number generator seed for cosmic rays in Mirage')
return hdulist
def add_pam(self, signalramp):
""" Apply Pixel Area Map to exposure
Paramters:
----------
signalramp : numpy.ndarray
Array containing exposure
Returns
--------
signalramp : numpy.ndarary
Array after multiplying by the pixel area map
"""
pixAreaMap = self.simple_get_image(self.params['Reffiles']['pixelAreaMap'])
# If we are making a grism direct image, we need to embed the true pixel area
# map in an array of the appropriate dimension, where any pixels outside the
# actual aperture are set to 1.0
if self.params['Output']['grism_source_image']:
mapshape = pixAreaMap.shape
g, yd, xd = signalramp.shape
pam = np.ones((yd, xd))
ys = self.coord_adjust['yoffset']
xs = self.coord_adjust['xoffset']
pam[ys:ys+mapshape[0], xs:xs+mapshape[1]] = np.copy(pixAreaMap)
pixAreaMap = pam
signalramp *= pixAreaMap
return signalramp
def add_superbias_and_refpix(self, ramp, sbref):
"""Add superbias and reference pixel-associated
signal to the input ramp
Parameters
----------
ramp : numpy.ndarray
Array containing exposure data
sbref : numpy.ndarray
Array containing superbias and reference pixel associated signal
Returns
-------
newramp : numpy.ndarray
Ramp with superbias and refpix signal added
"""
rampdim = ramp.shape
sbrefdim = sbref.shape
if len(rampdim) != len(sbrefdim):
if len(rampdim) == (len(sbrefdim) + 1):
newramp = ramp + sbref
else:
raise ValueError(("WARNING: input ramp and superbias+refpix "
"arrays have different dimensions. Cannot combine. "
"Ramp dim: {}, SBRef dim: {}"
.format(len(rampdim), len(sbrefdim))))
else:
# Inputs arrays have the same number of dimensions
newramp = ramp + sbref
return newramp
def add_synthetic_to_dark(self, synthetic, dark, syn_zeroframe=None):
"""Add the synthetic data (now an exposure) to the dark current
exposure.
If zeroframe is provided, the function uses that to create the
dark+synthetic zeroframe that is returned. If not provided, the
function attempts to use the 0th frame of the input synthetic ramp
Combine the cube of synthetic signals to the real dark current ramp.
Be sure to adjust the dark current ramp if nframe/nskip is different
than the nframe/nskip values that the dark was taken with.
Only RAPID, NISRAPID, FGSRAPID darks will be re-averaged into different
readout patterns. But a BRIGHT2 dark can be used to create a
BRIGHT2 simulated ramp
Arguments:
----------
synthetic : numpy.ndarray
simulated signals, 4D array
dark : numpy.ndarray
dark current exposure, 4D array
syn_zeroframe : numpy.ndarray
zeroframe data associated with simulated data
Returns
--------
synthetic : numpy.ndarray
4D exposure containing combined simulated + dark data
zeroframe : numpy.ndarray
Zeroth read(s) of simulated + dark data
reorder_sbandref : numpy.ndarray
superbias and refpix signal from the dark
"""
# Get the info for the dark integration
darkpatt = dark.header['READPATT']
dark_nframe = dark.header['NFRAMES']
mtch = self.readpatterns['name'].data == darkpatt
dark_nskip = self.readpatterns['nskip'].data[mtch][0]
# If the zeroframes for the dark and the synthetic data
# are present, combine. Otherwise the zeroframe will be
# None.
zeroframe = None
if ((syn_zeroframe is not None) & (dark.zeroframe is not None)):
zeroframe = dark.zeroframe + syn_zeroframe
# To hold reordered superbias + refpix signals from the dark
reorder_sbandref = np.zeros_like(synthetic)
# We have already guaranteed that either the readpatterns match
# or the dark is RAPID, so no need to worry about checking for
# other cases here.
rapids = ["RAPID", "NISRAPID", "FGSRAPID"]
if ((darkpatt in rapids) and (self.params['Readout']['readpatt'] not in rapids)):
deltaframe = self.params['Readout']['nskip'] + \
self.params['Readout']['nframe']
frames = np.arange(0, self.params['Readout']['nframe'])
accumimage = np.zeros_like(synthetic[0, :, :], dtype=np.int32)
sbaccumimage = np.zeros_like(synthetic[0, :, :], dtype=np.int32)
# Loop over integrations
for integ in range(self.params['Readout']['nint']):
# Loop over groups
for i in range(self.params['Readout']['ngroup']):
# average together the appropriate frames,
# skip the appropriate frames
self.logger.info(('Averaging dark current ramp in add_synthetic_to_dark.'
'Frames {}, to become group {}'.format(frames, i)))
# If averaging needs to be done
if self.params['Readout']['nframe'] > 1:
accumimage = np.mean(dark.data[integ, frames, :, :], axis=0)
sbaccumimage = np.mean(dark.sbAndRefpix[integ, frames, :, :],
axis=0)
# If no averaging needs to be done
else:
accumimage = dark.data[integ, frames[0], :, :]
sbaccumimage = dark.sbAndRefpix[integ, frames[0], :, :]
# Now add the averaged dark frame to the synthetic data,
# which has already been placed into the correct readout pattern
synthetic[integ, i, :, :] += accumimage
reorder_sbandref[integ, i, :, :] = sbaccumimage
# Increment the frame indexes
frames = frames + deltaframe
elif (darkpatt == self.params['Readout']['readpatt']):
# If the input dark is not RAPID, or if the readout pattern
# of the input dark and the output ramp match, then no averaging
# needs to be done and we can simply add the synthetic groups to
# the dark current groups.
synthetic = synthetic + dark.data[:, 0:self.params['Readout']['ngroup'], :, :]
reorder_sbandref = dark.sbAndRefpix
return synthetic, zeroframe, reorder_sbandref
def apply_lincoeff(self, data, cof):
"""Linearize the input data
cof[0] + num*cof[1] + cof[2]*num**2 + cof[3]*num**3 +...
Parameters
----------
data : numpy.ndarray
data will be 2d
cof : numpy.ndarray
Non-linearity coefficients. cof will be 3d, or 1d
Returns
-------
apply : numpy.ndarray
Linearized data
"""
apply = 0.
if len(cof.shape) == 1:
for i in range(len(cof)):
apply += (cof[i] * data**i)
elif len(cof.shape) == 3:
for i in range(len(cof[:, 0, 0])):
apply += (cof[i, :, :] * data**i)
return apply
def check_param_val(self, value, typ, vmin, vmax, default):
"""Make sure the input value is a float and between given min and max
Parameters
----------
value : float
Value to be checked
typ : str
Description of variable contents
vmin : float
Minimum acceptible value
vmax : float
Maximum acceptible value
default : float
Value to set value if it is outside accepible bounds
Returns
-------
value : float
Acceptible value of value
"""
try:
value = float(value)
except ValueError:
self.logger.error("{} for {} is not a float.".format(value, typ))
if ((value >= vmin) & (value <= vmax)):
return value
else:
self.logger.error(("ERROR: {} for {} is not within reasonable bounds. "
"Setting to {}".format(value, typ, default)))
return default
def check_params(self):
"""Check that the values of various input parameters are acceptible"""
# check instrument name
if self.params['Inst']['instrument'].lower() not in INST_LIST:
raise ValueError(("WARNING: instrument {} not in the list of "
"available instruments: {}"
.format(self.params['Inst']['instrument'].lower(), INST_LIST)))
# check output filename - make sure it's fits
if self.params['Output']['file'][-5:].lower() != '.fits':
self.params['Output']['file'] += '.fits'
# check mode:
possibleModes = MODES[self.params['Inst']['instrument'].lower()]
self.params['Inst']['mode'] = self.params['Inst']['mode'].lower()
if self.params['Inst']['mode'] in possibleModes:
pass
else:
raise ValueError(("WARNING: unrecognized mode {}. Must be one of: {}"
.format(self.params['Inst']['mode'], possibleModes)))
# Make sure input readout pattern, nframe/nkip combination
# is valid
self.readpattern_check()
# Check that readout patterns of input dark and requested output
# are compatible
self.readpattern_compatible()
# Make sure ngroup and nint are integers
try:
self.params['Readout']['ngroup'] = int(self.params['Readout']['ngroup'])
except:
raise ValueError("WARNING: Input value of ngroup is not an integer.")
try:
self.params['Readout']['nint'] = int(self.params['Readout']['nint'])
except:
raise ValueError("WARNING: Input value of nint is not an integer.")
# If instrument is FGS, then force filter to be 'N/A'
if self.params['Inst']['instrument'].lower() == 'fgs':
self.params['Readout']['filter'] = 'NA'
self.params['Readout']['pupil'] = 'NA'
# Check for entries in the parameter file that are None or blank,
# indicating the step should be skipped. Create a dictionary of steps
# and populate with True or False
self.runStep = {}
self.runStep['superbias'] = self.check_run_step(self.params['Reffiles']['superbias'])
self.runStep['nonlin'] = self.check_run_step(self.params['Reffiles']['linearity'])
self.runStep['gain'] = self.check_run_step(self.params['Reffiles']['gain'])
# self.runStep['phot'] = self.check_run_step(self.params['Reffiles']['phot'])
self.runStep['pixelflat'] = self.check_run_step(self.params['Reffiles']['pixelflat'])
self.runStep['illuminationflat'] = self.check_run_step(self.params['Reffiles']['illumflat'])
self.runStep['astrometric'] = self.check_run_step(self.params['Reffiles']['astrometric'])
self.runStep['ipc'] = self.check_run_step(self.params['Reffiles']['ipc'])
self.runStep['crosstalk'] = self.check_run_step(self.params['Reffiles']['crosstalk'])
self.runStep['occult'] = self.check_run_step(self.params['Reffiles']['occult'])
self.runStep['pointsource'] = self.check_run_step(self.params['simSignals']['pointsource'])
self.runStep['galaxies'] = self.check_run_step(self.params['simSignals']['galaxyListFile'])
self.runStep['extendedsource'] = self.check_run_step(self.params['simSignals']['extended'])
self.runStep['movingTargets'] = self.check_run_step(self.params['simSignals']['movingTargetList'])
self.runStep['movingTargetsSersic'] = self.check_run_step(self.params['simSignals']['movingTargetSersic'])
self.runStep['movingTargetsExtended'] = self.check_run_step(self.params['simSignals']['movingTargetExtended'])
self.runStep['MT_tracking'] = self.check_run_step(self.params['simSignals']['movingTargetToTrack'])
self.runStep['zodiacal'] = self.check_run_step(self.params['simSignals']['zodiacal'])
self.runStep['scattered'] = self.check_run_step(self.params['simSignals']['scattered'])
self.runStep['linearity'] = self.check_run_step(self.params['Reffiles']['linearity'])
self.runStep['cosmicray'] = self.check_run_step(self.params['cosmicRay']['path'])
self.runStep['saturation_lin_limit'] = self.check_run_step(self.params['Reffiles']['saturation'])
self.runStep['fwpw'] = self.check_run_step(self.params['Reffiles']['filtpupilcombo'])
self.runStep['linearized_darkfile'] = self.check_run_step(self.params['Reffiles']['linearized_darkfile'])
self.runStep['badpixfile'] = self.check_run_step(self.params['Reffiles']['badpixmask'])
self.runStep['pixelAreaMap'] = self.check_run_step(self.params['Reffiles']['pixelAreaMap'])
# NON-LINEARITY
# Make sure the input accuracy is a float with reasonable bounds
self.params['nonlin']['accuracy'] = self.check_param_val(self.params['nonlin']['accuracy'],
'nlin accuracy', 1e-12, 1e-6, 1e-6)
self.params['nonlin']['maxiter'] = self.check_param_val(self.params['nonlin']['maxiter'],
'nonlin max iterations', 5, 40, 10)
self.params['nonlin']['limit'] = self.check_param_val(self.params['nonlin']['limit'],
'nonlin max value', 30000., 1.e6, 66000.)
# Make sure the CR random number seed is an integer
try:
self.params['cosmicRay']['seed'] = int(self.params['cosmicRay']['seed'])
except:
self.params['cosmicRay']['seed'] = 66231289
self.logger.warning(("ERROR: cosmic ray random number generator seed is bad. "
"Using the default value of {}."
.format(self.params['cosmicRay']['seed'])))
# Also make sure the poisson random number seed is an integer
try:
self.params['simSignals']['poissonseed'] = int(self.params['simSignals']['poissonseed'])
except:
self.params['simSignals']['poissonseed'] = 815813492
self.logger.warning(("ERROR: cosmic ray random number generator seed is bad. "
"Using the default value of {}."
.format(self.params['simSignals']['poissonseed'])))
# COSMIC RAYS:
# Generate the name of the actual CR file to use
if self.params['cosmicRay']['path'] is None:
self.crfile = None
else:
if self.params['cosmicRay']['path'][-1] != '/':
self.params['cosmicRay']['path'] += '/'
if self.params['cosmicRay']["library"].upper() in ["SUNMAX", "SUNMIN", "FLARES"]:
self.crfile = os.path.join(self.params['cosmicRay']['path'],
"CRs_MCD1.7_" + self.params['cosmicRay']["library"].upper())
else:
self.crfile = None
raise FileNotFoundError(("Warning: unrecognised cosmic ray library {}"
.format(self.params['cosmicRay']["library"])))
# Read in distortion and WCS-related data. These will be placed
# in the header of the output file.
ap_name = self.params['Readout']['array_name']
# Convert the input RA and Dec of the pointing position into floats
# check to see if the inputs are in decimal units or hh:mm:ss strings
try:
self.ra = float(self.params['Telescope']['ra'])
self.dec = float(self.params['Telescope']['dec'])
except:
self.ra, self.dec = utils.parse_RA_Dec(self.params['Telescope']['ra'],
self.params['Telescope']['dec'])
#if abs(self.dec) > 90. or self.ra < 0. or self.ra > 360. or self.ra is None or self.dec is None:
if abs(self.dec) > 90. or self.ra is None or self.dec is None:
raise ValueError("WARNING: bad requested RA and Dec {} {}".format(self.ra, self.dec))
# Make sure the rotation angle is a float
try:
self.params['Telescope']["rotation"] = float(self.params['Telescope']["rotation"])
except:
self.logger.warning(("ERROR: bad rotation value {}, setting to zero."
.format(self.params['Telescope']["rotation"])))
self.params['Telescope']["rotation"] = 0.
# Get SIAF-related information and subarray bounds
siaf_inst = self.params['Inst']['instrument']
if siaf_inst.lower() == 'nircam':
siaf_inst = 'NIRCam'
instrument_siaf = siaf_interface.get_instance(siaf_inst)
self.siaf = instrument_siaf[self.params['Readout']['array_name']]
self.local_roll, self.attitude_matrix, self.ffsize, \
self.subarray_bounds = siaf_interface.get_siaf_information(instrument_siaf,
self.params['Readout']['array_name'],
self.ra, self.dec,
self.params['Telescope']['rotation'])
# Check that the various scaling factors are floats and
# within a reasonable range
self.params['cosmicRay']['scale'] = self.check_param_val(self.params['cosmicRay']['scale'],
'cosmicRay', 0, 100, 1)
self.params['simSignals']['extendedscale'] = self.check_param_val(self.params['simSignals']
['extendedscale'],
'extendedEmission', 0, 10000, 1)
self.params['simSignals']['zodiscale'] = self.check_param_val(self.params['simSignals']['zodiscale'],
'zodi', 0, 10000, 1)
self.params['simSignals']['scatteredscale'] = self.check_param_val(self.params['simSignals']
['scatteredscale'],
'scatteredLight', 0, 10000, 1)
# Make sure the requested output format is an allowed value
if self.params['Output']['format'] not in ['DMS']:
raise NotImplementedError(("WARNING: unsupported output format {} requested. "
"Possible options are {}."
.format(self.params['Output']['format'], ['DMS'])))
# Check the output metadata, including visit and observation
# numbers, obs_id, etc
kwchecks = ['program_number', 'visit_number', 'visit_group',
'sequence_id', 'activity_id', 'exposure_number',
'observation_number', 'obs_id', 'visit_id']
for quality in kwchecks:
try:
self.params['Output'][quality] = str(self.params['Output'][quality])
except ValueError:
self.logger.error(("Unable to convert {} to string. "
"This is required.".format(self.params['Output'][quality])))
# Get the filter wheel and pupil wheel resolver positions for the
# filter and pupil to use. This information will be placed in the
# header of the output file
if self.instrument.upper() in ['NIRCAM', 'NIRISS']:
fw_positions = ascii.read(self.params['Reffiles']['filter_wheel_positions'])
if self.instrument.upper() == 'NIRISS':
f_match = self.params['Readout']['filter'] == fw_positions['Name']
p_match = self.params['Readout']['pupil'] == fw_positions['Name']
elif self.instrument.upper() == 'NIRCAM':
if '5' in self.detector or 'LONG' in self.detector.upper():
channel = 'LW'
else:
channel = 'SW'
f_match = ((self.params['Readout']['filter'] == fw_positions['Name']) & (channel == fw_positions['Channel']))
p_match = ((self.params['Readout']['pupil'] == fw_positions['Name']) & (channel == fw_positions['Channel']))
self.filter_wheel_position = fw_positions['Filter_Resolver_Reading_Wheel_Degrees'][f_match].data[0]
self.pupil_wheel_position = fw_positions['Pupil_Resolver_Reading_Wheel_Degrees'][p_match].data[0]
elif self.instrument.upper() == 'FGS':
self.filter_wheel_position = 999.
self.pupil_wheel_position = 999.
def check_run_step(self, filename):
"""Check to see if a filename exists in the parameter file.
Parameters
----------
filename : str
Name of file to be checked
Returns
-------
state : bool
Indicates whether or not filename is 'none'
"""
if ((len(filename) == 0) or (filename.lower() == 'none')):
return False
else:
return True
def combine_seeds(self, filenames):
"""If the seed image is broken amongst a list of files, read
in those files and combine the data into a single seed image.
Parameters
----------
filenames : list
List of files to be read in
Returns
-------
seed : numpy.ndarray
segmap : numpy.ndarray
header : dict
"""
self.logger.info("Reconstructing seed image from multiple files")
for i, filename in enumerate(filenames):
self.logger.info('{}'.format(filename))
# Read in the data from one file
seed_data, seg_data, header_data = self.read_seed(filename)
ints, groups, ydim, xdim = seed_data.shape
if i == 0:
nints = header_data['SEGINT']
ngroups = header_data['SEGGROUP']
self.logger.info(('Final seed image shape before averaging/skipping frames to create groups: '
'({}, {}, {}, {})'.format(nints, ngroups, ydim, xdim)))
seed = np.zeros((nints, ngroups, ydim, xdim))
segmap = seg_data
# Place the data in the reconstructed seed image array based
# on the integration and group counters
int_start = header_data['PTINTSRT']
grp_start = header_data['PTFRMSRT']
seed[int_start: int_start+ints, grp_start: grp_start+groups, :, :] = seed_data
#print('data placed into integration: ', int_start, int_start+ints)
#print(' groups: ', grp_start, grp_start+groups)
#print(seed_data.shape)
#print(seed.shape)
return seed, segmap, header_data
def convert_mask(self, inmask, dq_table):
"""Convert a bad pixel mask to contain values used
by the JWST pipeline
Parameters
----------
inmask : numpy.ndarray
Input bad pixel mask
dq_table : astropy.fits.record
Table containing data quality definitions
Returns
-------
dqmask : numpy.ndarray
Data quality array modified to use values in dq_table
"""
from jwst.datamodels import dqflags
# Get the DQ array and the flag definitions
if (dq_table is not None and
not np.isscalar(dq_table) and
len(dq_table.shape) and
len(dq_table)):
#
# Make an empty mask
dqmask = np.zeros(inmask.shape, dtype=np.uint32)
for record in dq_table:
bitplane = record['VALUE']
dqname = record['NAME'].strip()
try:
standard_bitvalue = dqflags.pixel[dqname]
except KeyError:
self.logger.info(('Keyword {} does not correspond to an existing DQ '
'mnemonic, so will be ignored'.format(dqname)))
continue
just_this_bit = np.bitwise_and(inmask, bitplane)
pixels = np.where(just_this_bit != 0)
dqmask[pixels] = np.bitwise_or(dqmask[pixels], standard_bitvalue)
else:
dqmask = inmask
return dqmask
def cr_funcs(self, npix, seed=4242):
"""Set up functions that will be used to generate
cosmic ray hits
Parameters
----------
npix : int
Number of pixels across which we are generating a collection of CR hits
seed : int
Seed value for random number generator
Returns
-------
crhits : int
Number of cosmic ray hits in the given number of pixels and time
crs_perframe L numpy.ndarray
Array of random values from Poisson distribution for the cosmic ray hits
per readout frame
"""
crhits = npix * self.crrate * self.params['cosmicRay']['scale'] * self.frametime
np.random.seed(seed)
# Need a set of CRs for all frames, including those
# that are skipped, in order for the rate of CRs to
# be consistent.
crs_perframe = np.random.poisson(crhits, self.params['Readout']['nint'] *
self.params['Readout']['ngroup'] *
(self.params['Readout']['nframe']+self.params['Readout']['nskip']))
return crhits, crs_perframe
@logging_functions.log_fail
def create(self, params=None):
"""MAIN FUNCTION"""
# Read in the parameter file
if params is not None:
self.params = params
if self.params is None:
self.read_parameter_file()
# Get the log caught up on what's already happened
self.logger.info('\n\nRunning observation generator....\n')
self.logger.info('Reading parameter file: {}\n'.format(self.paramfile))
self.logger.info('Original log file name: ./{}'.format(STANDARD_LOGFILE_NAME))
# Make filter/pupil values respect the filter/pupil wheel they are in
self.params['Readout']['filter'], self.params['Readout']['pupil'] = \
utils.normalize_filters(self.params['Inst']['instrument'], self.params['Readout']['filter'], self.params['Readout']['pupil'])
# Create dictionary to use when looking in CRDS for reference files
self.crds_dict = crds_tools.dict_from_yaml(self.params)
# Expand param entries to full paths where appropriate
self.params = utils.full_paths(self.params, self.modpath, self.crds_dict, offline=self.offline)
self.file_check()
#print('self.linDark:', self.linDark)
#print('self.seed:', self.seed)
# Get the input dark if a filename is supplied
self.dark_setup()
# Create a mapping of the seed image and the dark data
seed_dict = self.seed_mapping()
# Finally, collect information about the detector,
# which will be needed for astrometry later
self.detector = self.linear_dark.header['DETECTOR']
self.instrument = self.linear_dark.header['INSTRUME']
self.fastaxis = self.linear_dark.header['FASTAXIS']
self.slowaxis = self.linear_dark.header['SLOWAXIS']
# Some basic checks on the inputs to make sure
# the script won't have to abort due to bad inputs
# self.check_params()
self.subdict = utils.read_subarray_definition_file(self.params['Reffiles']['subarray_defs'])
self.params = utils.get_subarray_info(self.params, self.subdict)
self.check_params()
# Read in cosmic ray library files if
# CRs are to be added to the data later
if self.runStep['cosmicray']:
self.read_cr_files()
# Read in gain map to be used for adding Poisson noise
# and to scale CRs to be in ADU
#self.read_gain_map()
# For the time being, use the mean gain value in constants.py in
# order to avoid discontinuities that can arise when gain reference
# files are made using binned areas of pixels, as they are now.
if self.instrument.lower() == 'niriss':
self.gain = MEAN_GAIN_VALUES['niriss']
elif self.instrument.lower() == 'nircam':
det = copy.deepcopy(self.detector.lower())
if 'long' in det:
det = det.replace('long', '5')
self.gain = MEAN_GAIN_VALUES['nircam'][det]
elif self.instrument.lower() == 'fgs':
self.gain = MEAN_GAIN_VALUES['fgs'][self.detector.lower()]
# Calculate the exposure time of a single frame, based on
# the size of the subarray
#tmpy, tmpx = self.seed.shape[-2:]
tmpy, tmpx = self.linear_dark.data.shape[-2:]
self.frametime = utils.calc_frame_time(self.instrument, self.params['Readout']['array_name'],
tmpx, tmpy, self.params['Readout']['namp'])
self.logger.info("Frametime is {}".format(self.frametime))
# ramptime is the exposure time for a single integration, including the
# time for one reset prior to the integration
self.ramptime = self.frametime * (1 + self.params['Readout']['ngroup'] *
(self.params['Readout']['nframe'] + self.params['Readout']['nskip']))
# rampexptime is the exposure time for the ramp excluding any resets
self.rampexptime = self.frametime * (self.params['Readout']['ngroup'] *
(self.params['Readout']['nframe']+self.params['Readout']['nskip']))
# Find the number of resets that occur at the start of the exposure
self.resets_before_exp()
# Calculate the rate of cosmic ray hits expected per frame
self.get_cr_rate()
# Read in saturation file
if self.params['Reffiles']['saturation'] is not None:
self.read_saturation_file()
else:
self.logger.warning(('No saturation map provided. Using '
'{} for all pixels.'.format(self.params['nonlin']['limit'])))
dy, dx = self.linear_dark.data.shape[2:]
self.satmap = np.zeros((dy, dx)) + self.params['nonlin']['limit']
# Read in non-linearity correction coefficients. We need these
# regardless of whether we are saving the linearized data or going
# on to make raw data
nonlincoeffs = self.get_nonlinearity_coeffs()
# Read in superbias file if present
self.read_superbias_file()
if len(self.linDark) > 1:
self.logger.info(('An estimate of the remaining processing time will be provided after the first '
'segment file has been created.\n\n'))
for i, linDark in enumerate(self.linDark):
# Run the timer over each segment in order to come up with
# a rough estimate of computation time
self.timer.start()
temp_outdir, basename = os.path.split(self.params['Output']['file'])
# Get the segment number of the file if present
linDarkfile = os.path.basename(linDark)
seg_location = linDarkfile.find('_seg')
if seg_location != -1:
seg_str = linDarkfile[seg_location+4:seg_location+7]
else:
try:
seg_location = seed_dict[linDark][0].find('_seg')
except AttributeError:
seg_location = -1
if seg_location != -1:
seg_str = seed_dict[linDark][0][seg_location+4:seg_location+7]
else:
seg_str = ''
if seg_str != '':
# Assume standard JWST filename format
try:
self.logger.info("Creating output file name with segment number.")
parts = basename.split('_')
if len(parts) == 5:
basename = '{}_{}_{}-seg{}_{}_{}'.format(parts[0], parts[1], parts[2], seg_str,
parts[3], parts[4])
else:
basename = basename.replace('.fits', '-seg{}.fits'.format(seg_str))
except IndexError:
# Non-standard filename format
basename = basename.replace('.fits', '-seg{}.fits'.format(seg_str))
#basename = os.path.join(temp_outdir, basename)
if i > 0:
self.linear_dark = self.read_dark_file(self.linDark[i])
# Find how many integrations are held in the dark. The same
# number of integrations should be produced from the seed image.
# In the case where the same seed image is matched to multiple
# dark files (e.g. imaging mode obs that needs to have file-
# splitting) we can only get this info from the dark.
try:
num_integrations = self.linear_dark.header['NINTS']
except KeyError:
num_integrations = None
seed_files = seed_dict[linDark]
if isinstance(seed_files[0], str):
self.logger.info('\nSeed files:')
for e in seed_files:
self.logger.info(e)
# Get the corresponding input seed image(s)
if isinstance(seed_files, str):
# If a single filename is supplied
self.seed_image, self.segmap, self.seedheader = self.read_seed(seed_files)
elif isinstance(seed_files, list):
# If the seed image is a list of files (due to high data
# volume)
self.seed_image, self.segmap, self.seedheader = self.combine_seeds(seed_files)
else:
# self.seed is a catalogSeed object.
# In this case we assume that self.segmap and
# self.seedheader have also been provided as the
# appropriate objects, since they are saved in
# the same file as the seed image
self.seed_image = copy.deepcopy(seed_files)
# If seed image is in units of electrons/sec then divide
# by the gain to put in ADU/sec
if 'UNITS' in self.seedheader.keys():
if self.seedheader['UNITS'] in ["e-/sec", "e-"]:
self.logger.info(("Seed image is in units of {}. Dividing by gain."
.format(self.seedheader['units'])))
self.seed_image /= self.gain
else:
raise ValueError(("'UNITS' keyword not present in header of "
"seed image. Unable to determine whether the "
"seed image is in units of ADU or electrons."))
# Translate to ramp if necessary,
# Add poisson noise and cosmic rays
# Rearrange into requested read pattern
# All done in one function to save memory
simexp, simzero = self.add_crs_and_noise(self.seed_image, num_integrations=num_integrations)
# Multiply flat fields
simexp = self.add_flatfield_effects(simexp)
simzero = self.add_flatfield_effects(np.expand_dims(simzero, axis=1))[:, 0, :, :]
# Mask any reference pixels
if self.params['Output']['grism_source_image'] is False:
simexp, simzero = self.mask_refpix(simexp, simzero)
# Add IPC effects
# (Dark current ramp already has IPC in it)
if self.runStep['ipc']:
simexp = self.add_ipc(simexp)
simzero = self.add_ipc(np.expand_dims(simzero, axis=1))[:, 0, :, :]
# Add the simulated source ramp to the dark ramp
lin_outramp, lin_zeroframe, lin_sbAndRefpix = self.add_synthetic_to_dark(simexp,
self.linear_dark,
syn_zeroframe=simzero)
# Add other detector effects (Crosstalk/PAM)
self.logger.info('Adding crosstalk')
lin_outramp = self.add_detector_effects(lin_outramp)
lin_zeroframe = self.add_detector_effects(np.expand_dims(lin_zeroframe, axis=1))[:, 0, :, :]
# We need to first subtract superbias and refpix signals from the
# original saturation limits, and then linearize them
# Refpix signals will vary from group to group, but only by a few
# ADU. So let's cheat and just use the refpix signals from group 0
# Create a linearized saturation map
limits = np.zeros_like(self.satmap) + 1.e6
if self.linear_dark.sbAndRefpix is not None:
lin_satmap = unlinearize.nonLinFunc(self.satmap - self.linear_dark.sbAndRefpix[0, 0, :, :],
nonlincoeffs, limits)
elif ((self.linear_dark.sbAndRefpix is None) & (self.runStep['superbias'])):
# If the superbias and reference pixel signal is not available
# but the superbias reference file is, then just use that.
lin_satmap = unlinearize.nonLinFunc(self.satmap - self.superbias,
nonlincoeffs, limits)
elif ((self.linear_dark.sbAndRefpix is None) & (self.runStep['superbias'] is False)):
# If superbias and refpix signal is not available and
# the superbias reffile is also not available, fall back to
# a superbias value that is roughly correct. Error in this value
# will cause errors in saturation flagging for the highest signal
# pixels.
manual_sb = np.zeros_like(self.satmap) + 12000.
lin_satmap = unlinearize.nonLinFunc(self.satmap - manual_sb,
nonlincoeffs, limits)
# Save the ramp if requested. This is the linear ramp,
# ready to go into the Jump step of the pipeline
self.linear_output = None
if 'linear' in self.params['Output']['datatype'].lower():
# Output filename: append 'linear'
if 'uncal' in basename:
linearrampfile = basename.replace('uncal', 'linear')
else:
linearrampfile = basename.replace('.fits', '_linear.fits')
# Full path of output file
#linearrampfile = linearrampfile.split('/')[-1]
linearrampfile = os.path.join(self.params['Output']['directory'], linearrampfile)
# Saturation flagging - to create the pixeldq extension
# and make data ready for ramp fitting
# Since we subtracted the superbias and refpix signal from the
# saturation map prior to linearizing, we can now compare that map
# to lin_outramp, which also does not include superbias nor refpix
# signal, and is linear.
groupdq = self.flag_saturation(lin_outramp, lin_satmap)
# Create the error and groupdq extensions
err, pixeldq = self.create_other_extensions(copy.deepcopy(lin_outramp))
if self.params['Inst']['use_JWST_pipeline']:
self.save_DMS(lin_outramp, lin_zeroframe, linearrampfile, mod='ramp',
err_ext=err, group_dq=groupdq, pixel_dq=pixeldq)
else:
self.save_fits(lin_outramp, lin_zeroframe, linearrampfile, mod='ramp',
err_ext=err, group_dq=groupdq, pixel_dq=pixeldq)
stp.add_wcs(linearrampfile, roll=self.params['Telescope']['rotation'])
self.logger.info("Final linearized exposure saved to:")
self.logger.info("{}".format(linearrampfile))
self.linear_output = linearrampfile
# If the raw version is requested, we need to unlinearize
# the ramp
self.raw_output = None
if 'raw' in self.params['Output']['datatype'].lower():
if self.linear_dark.sbAndRefpix is not None:
if self.params['Output']['save_intermediates']:
#base_name = self.params['Output']['file'].split('/')[-1]
ofile = os.path.join(self.params['Output']['directory'],
basename[0:-5] + '_doNonLin_accuracy.fits')
savefile = True
else:
ofile = None
savefile = False
self.logger.info('Unlinearizing exposure.')
raw_outramp = unlinearize.unlinearize(lin_outramp, nonlincoeffs, self.satmap,
lin_satmap,
maxiter=self.params['nonlin']['maxiter'],
accuracy=self.params['nonlin']['accuracy'],
save_accuracy_map=savefile,
accuracy_file=ofile)
raw_zeroframe = unlinearize.unlinearize(lin_zeroframe, nonlincoeffs, self.satmap,
lin_satmap,
maxiter=self.params['nonlin']['maxiter'],
accuracy=self.params['nonlin']['accuracy'],
save_accuracy_map=False)
# Add the superbias and reference pixel signal back in
self.logger.info('Adding superbias and reference pixel signals.')
raw_outramp = self.add_superbias_and_refpix(raw_outramp, lin_sbAndRefpix)
raw_zeroframe = self.add_superbias_and_refpix(raw_zeroframe, self.linear_dark.zero_sbAndRefpix)
# Make sure all signals are < 65535
raw_outramp[raw_outramp > 65535] = 65535
raw_zeroframe[raw_zeroframe > 65535] = 65535
# Save the raw ramp
#base_name = self.params['Output']['file'].split('/')[-1]
rawrampfile = os.path.join(self.params['Output']['directory'], basename)
if self.params['Inst']['use_JWST_pipeline']:
self.save_DMS(raw_outramp, raw_zeroframe, rawrampfile, mod='1b')
else:
self.save_fits(raw_outramp, raw_zeroframe, rawrampfile, mod='1b')
stp.add_wcs(rawrampfile, roll=self.params['Telescope']['rotation'])
self.logger.info("Final raw exposure saved to: ")
self.logger.info("{}".format(rawrampfile))
self.raw_output = rawrampfile
# Adding this as an attribute so it can be accessed by soss_simulator.py
self.raw_outramp = raw_outramp
else:
raise ValueError(("WARNING: raw output ramp requested, but the signal associated "
"with the superbias and reference pixels is not present in "
"the dark current data object. Quitting."))
# Stop the timer and record the elapsed time
self.timer.stop(name='seg_{}'.format(str(i+1).zfill(4)))
# If there is more than one segment, provide an estimate of processing time
self.logger.info('\n\nSegment {} out of {} complete.'.format(i+1, len(self.linDark)))
if len(self.linDark) > 1:
time_per_segment = self.timer.sum(key_str='seg_') / (i+1)
estimated_remaining_time = time_per_segment * (len(self.linDark) - (i+1)) * u.second
time_remaining = np.around(estimated_remaining_time.to(u.minute).value, decimals=2)
finish_time = datetime.datetime.now() + datetime.timedelta(minutes=time_remaining)
self.logger.info(('Estimated time remaining in obs_generator: {} minutes. '
'Projected finish time: {}'.format(time_remaining, finish_time)))
self.logger.info("Observation generation complete.")
logging_functions.move_logfile_to_standard_location(self.paramfile, STANDARD_LOGFILE_NAME,
yaml_outdir=self.params['Output']['directory'])
def create_group_entry(self, integration, groupnum, endday, endmilli, endsubmilli, endgroup,
xd, yd, gap, comp_code, comp_text, barycentric, heliocentric):
"""Add the GROUP extension to the output file
From an example Mark Kyprianou sent:
Parameters
----------
integration : int
Integration number
group_number : int
Group number
endday : int
Days since Jan 1 2000
endmilli : integer
Milliseconds of the day for given time
endsubmilli : int
Time since last millisecond?
endgroup : str
End group time, e.g. '2016-01-18T02:43:26.061'
xd : int
Number_of_columns e.g. 2048
yd : int
Number_of_rows e.g. 2048
gap : int
Number of gaps in telemetry
comp_code : int
Completion code number e.g. 0 (nominal?)
comp_text : str
Completion code text e.g. 'COMPLETE'-from howard
'Normal Completion' - from mark
barycentric : float
Barycentric end time (mjd) 57405.11165225
heliocentric : float
Heliocentric end time (mjd) 57405.1163058
Returns
-------
group : nump.ndarray
Input values organized into format needed for group entry in
JWST formatted file
"""
group = np.ndarray(
(1, ),
dtype=[
('integration_number', '<i2'),
('group_number', '<i2'),
('end_day', '<i2'),
('end_milliseconds', '<i4'),
('end_submilliseconds', '<i2'),
('group_end_time', 'S26'),
('number_of_columns', '<i2'),
('number_of_rows', '<i2'),
('number_of_gaps', '<i2'),
('completion_code_number', '<i2'),
('completion_code_text', 'S36'),
('bary_end_time', '<f8'),
('helio_end_time', '<f8')
]
)
group[0]['integration_number'] = integration
group[0]['group_number'] = groupnum
group[0]['end_day'] = endday
group[0]['end_milliseconds'] = endmilli
group[0]['end_submilliseconds'] = endsubmilli
group[0]['group_end_time'] = endgroup
group[0]['number_of_columns'] = xd
group[0]['number_of_rows'] = yd
group[0]['number_of_gaps'] = gap
group[0]['completion_code_number'] = comp_code
group[0]['completion_code_text'] = comp_text
group[0]['bary_end_time'] = barycentric
group[0]['helio_end_time'] = heliocentric
return group
def create_other_extensions(self, data):
"""If the linearized version of the file is to be saved, we
need to create the error, pixel dq, and group dq extensions
Parameters
----------
data : numpy.ndarray
Array containing the exposure data
Returns
-------
err : numpy.ndarray
Array containing error values
pixeldq : numpy.ndarray
Array containing data quality flags
"""
# error extension - keep it simple. sqrt of signal
toolow = data < 0.
data[toolow] = 0.
err = np.sqrt(data)
# pixel dq extension - populate using the mask reference file
if self.runStep['badpixfile']:
mask_hdu = fits.open(self.params['Reffiles']['badpixmask'])
mask = mask_hdu[1].data
dqdef = mask_hdu[2].data
mask_hdu.close()
# Crop to match output subarray size
if "FULL" not in self.params['Readout']['array_name']:
mask = self.crop_to_subarray(mask)
# If the JWST pipeline is available,
# convert mask values to those used by the pipeline
# based on the names in dq_def. This function is basically
# a copy of the dynamic_mask function in dynamicdq.py
# in the JWST pipeline
if self.params['Inst']['use_JWST_pipeline']:
pixeldq = self.convert_mask(mask, dqdef)
else:
# If the pipeline is not to be used, then the
# best we can do is assume that the input bad
# pixel value definitions match what the pipeline
# expects, and keep the mask as read in.
pixeldq = mask
else:
self.logger.info(("No bad pixel mask provided. Setting all pixels in "
"pixel data quality extension to 0, indicating they "
"are good."))
pixeldq = np.zeros(data.shape[2:]).astype(np.uint32)
return err, pixeldq
def crop_to_subarray(self, data):
"""Crop the given (full frame) array to specified subarray
Parameters
----------
data : numpy.ndarray
Array containing exposure data
Returns
-------
data : numpy.ndarray
Array cropped to requested size, using self.subarray_bounds
"""
return data[self.subarray_bounds[1]:self.subarray_bounds[3] + 1,
self.subarray_bounds[0]:self.subarray_bounds[2] + 1]
def crosstalk_image(self, orig, coeffs):
"""Using Xtalk coefficients, generate an image of the crosstalk signal
Parameters
----------
orig : numpy.ndarray
Array to add crosstalk to
coeffs : numpy.ndarray
Crosstalk coefficients from the input coefficint file
Returns
-------
xtalk_corr_im : numpy.ndarray
Input data modified to have crosstalk
"""
xtalk_corr_im = np.zeros_like(orig)
subamp_shift = {"0": 1, "1": -1, "2": 1, "3": -1}
# List of starting columns for all quadrants.
xtqstart = [0, 512, 1024, 1536, 2048]
for amp in range(4):
to_mult = orig[:, xtqstart[amp]:xtqstart[amp+1]]
receivers = []
for i in range(4):
if i != amp:
receivers.append(i)
# Reverse the values to multply if the amps being used
# are adjacent or 3 amps apart
for subamp in receivers:
index = 'xt'+str(amp+1)+str(subamp+1)
if ((np.absolute(amp-subamp) == 1) | (np.absolute(amp-subamp) == 3)):
corr_amp = np.fliplr(to_mult) * coeffs[index]
if (np.absolute(amp-subamp) == 2):
corr_amp = to_mult * coeffs[index]
xtalk_corr_im[:, xtqstart[subamp]:xtqstart[subamp+1]] += corr_amp
# Per Armin's instructions, now repeat the process
# using his xt??post coefficients, but shift the arrays
# by one pixel according to readout direction.
for subamp in receivers:
index = 'xt'+str(amp+1)+str(subamp+1)+'post'
if ((np.absolute(amp-subamp) == 1) | (np.absolute(amp-subamp) == 3)):
corr_amp = np.fliplr(to_mult) * coeffs[index]
corr_amp = np.roll(corr_amp, subamp_shift[str(subamp)], axis=1)
if (np.absolute(amp-subamp) == 2):
corr_amp = to_mult * coeffs[index]
corr_amp = np.roll(corr_amp, subamp_shift[str(subamp)])
xtalk_corr_im[:, xtqstart[subamp]:xtqstart[subamp+1]] += corr_amp
# Save the crosstalk correction image
if self.params['Output']['save_intermediates'] is True:
phdu = fits.PrimaryHDU(xtalk_corr_im)
base_name = self.params['Output']['file'].split('/')[-1]
xtalkout = os.path.join(self.params['Output']['directory'], base_name[0:-5] +
'_xtalk_correction_image.fits')
phdu.writeto(xtalkout, overwrite=True)
return xtalk_corr_im
def dark_setup(self):
"""The input value for self.linDark can be one of several types.
Deal with that here and get self.linear_dark
"""
# Get the input dark if a filename is supplied
if isinstance(self.linDark, mirage.utils.read_fits.Read_fits):
# Case where user has provided a Read_fits object
self.logger.info('Dark object provided')
self.linear_dark = copy.deepcopy(self.linDark)
self.linDark = ['none']
else:
if self.linDark is None:
# If no linearized dark is provided, assume the entry in the
# yaml file is the proper format
self.linDark = [self.params['Reffiles']['linearized_darkfile']]
elif isinstance(self.linDark, list):
# Case where dark is split amongst multiple files due to high
# data volume
self.logger.info('Dark file list: ')
for e in self.linDark:
self.logger.info('{}'.format(self.linDark))
elif isinstance(self.linDark, str):
# If a single filename is given, read in the file
self.logger.info('Reading in dark file: {}'.format(self.linDark))
self.linDark = [self.linDark]
else:
raise TypeError('Unsupported type for self.linDark: {}'.format(type(self.linDark)))
self.linear_dark = self.read_dark_file(self.linDark[0])
def do_cosmic_rays(self, image, ngroup, iframe, ncr, seedval):
"""Add cosmic rays to input data
Parameters
----------
image : numpy.ndarray
2D array containing exposure data to add cosmic rays to
ngroup : int
Group number of the image. Only used when writing out summary info
iframe : int
Frame number of the image. Only used when writing out summary info
ncr : int
Number of cosmic rays to add to the frame
seedval : int
Seed to use for random number generator
Returns
-------
image : numpy.ndarray
Input image with cosmic rays added
"""
# Change the seed each time this is run, or else simulated
# exposures that have more than 1 integration will have the
# same cosmic rays in each integration
self.generator1 = random.Random()
self.generator1.seed(seedval)
# Add cosmic rays to a frame
nray = int(ncr)
i = 0
dims = image.shape
while i < nray:
i = i+1
j = int(self.generator1.random()*dims[0])
k = int(self.generator1.random()*dims[1])
n = int(self.generator1.random()*10.0)
m = int(self.generator1.random()*1000.0)
crimage = np.copy(self.cosmicrays[n][m, :, :])
i1 = max(j-10, 0)
i2 = min(j+11, dims[0])
j1 = max(k-10, 0)
j2 = min(k+11, dims[1])
k1 = 10-(j-i1)
k2 = 10+(i2-j)
l1 = 10-(k-j1)
l2 = 10+(j2-k)
# Insert cosmic ray (divided by gain to put into ADU)
image[i1:i2, j1:j2] = image[i1:i2, j1:j2] + crimage[k1:k2, l1:l2] / self.gain # self.gainim[k1:k2, l1:l2]
self.cosmicraylist.write("{} {} {} {} {} {} {}\n".format((j2-j1)/2+j1, (i2-i1)/2+i1, ngroup,
iframe, n, m, np.max(crimage[k1:k2, l1:l2])))
return image
def do_poisson(self, signalimage, seedval):
"""Add poisson noise to an input image. Input is assumed
to be in units of ADU, meaning it must be multiplied by
the gain when calcuating Poisson noise. Then divide by the
gain in order for the returned image to also be in ADU
Parameters
----------
signalimage : numpy.ndarray
2D array of signals in ADU
seedval : int
Seed value for the random number generator
Returns
-------
newimage : numpy.ndarray
signalimage with Poisson noise added
"""
# Set the seed
np.random.seed(seedval)
# Find the appropriate quantum yield value for the filter
# if self.params['simSignals']['photonyield']:
# try:
# if self.params['Readout']['pupil'][0].upper() == 'F':
# usefilt = 'pupil'
# else:
# usefilt = 'filter'
# pym1=self.qydict[self.params['Readout'][usefilt]] - 1.
# except:
# pym1=0.
# Quantum yield is 1.0 for all NIRCam filters
pym1 = 0.
# Can't add Poisson noise to pixels with negative values
# Set those to zero when adding noise, then replace with
# original value
signalgain = signalimage * self.gain
highpix = np.where(signalgain == np.nanmax(signalgain))
if np.nanmin(signalgain) < 0.:
neg = signalgain < 0.
negatives = copy.deepcopy(signalgain)
negatives[neg] = signalgain[neg]
signalgain[neg] = 0.
# Add poisson noise
newimage = np.random.poisson(signalgain, signalgain.shape).astype(np.float64)
if np.nanmin(signalgain) < 0.:
newimage[neg] = negatives[neg]
newimage /= self.gain
# Quantum yield for NIRCam is always 1.0 (so psym1=0)
# if self.params['simSignals']['photonyield'] and pym1 > 0.000001 and newimage[i, j] > 0:
# if self.params['simSignals']['pymethod']:
# # Calculate the values to make the poisson
# # results the same with/without photon
# # Yield (but not for pymethod true and false)
# # ...use yield -1 because the value
# # cannot be less than 1
# values = np.random.poisson(pym1, newimage[i, j])
# newimage[i, j] = newimage[i, j] + values.sum()
# else:
# newimage[i, j] = newimage[i, j] * self.qydict[self.params['Readout'][usefilt]]
# fract = newimage[i, j] - int(newimage[i, j])
# if self.generator2.random() < fract:
# newimage[i, j] = newimage[i, j] + 1
return newimage
def file_check(self):
"""
Make sure the requested input files exist
For reference files, assume first that they are located in
the directory tree under the datadir (from the MIRAGE_DATA
environment variable). If not, assume the input is a full path
and check there.
"""
rlist = [['Reffiles', 'badpixmask'],
['Reffiles', 'linearity'],
['Reffiles', 'saturation'],
['Reffiles', 'ipc'],
['Reffiles', 'pixelAreaMap'],
['Reffiles', 'gain']]
plist = [['cosmicRay', 'path']]
for ref in rlist:
self.ref_check(ref)
for path in plist:
self.path_check(path)
def flag_saturation(self, data, sat):
"""Flag saturated pixels in intput data. Return a dq map
with the appropriate dq value (2) for saturation
Parameters
----------
data : numpy.ndarray
Exposure data
sat : numpy.ndarray
2D Saturation map
Returns
-------
satmap : numpy.ndarray
Map containing flagged saturated pixels
"""
satmap = (data > sat).astype(np.uint32)
satmap[satmap > 0] = 2
return satmap
def frame_to_ramp(self, data):
"""Convert rate image to ramp, add poisson noise
and cosmic rays
Parameters
----------
data : numpy.ndarray
Seed image. Should be a 2d frame or 3d integration.
If the original seed image is a 4d exposure, call frame_to_ramp
with one integration at a time.
Returns
-------
outramp : numpy.ndarray
3d integration with cosmic rays and poisson noise
zeroframe : numpy.ndarray
2d zeroth frame
"""
# Output ramp will be in requested readout pattern!
ndim = len(data.shape)
if ndim == 4:
raise ValueError("Seed image shouldn't be 4D!")
nintin, ngroupin, yd, xd = data.shape
elif ndim == 3:
ngroupin, yd, xd = data.shape
elif ndim == 2:
yd, xd = data.shape
# If a ramp is given, create a -1st frame that is all zeros
# so that we can create deltaframes for all frames later
# This should be the case only for TSO observations or
# moving targets.
if ndim == 3:
data = np.vstack((np.zeros((1, yd, xd)), data))
outramp = np.zeros((self.params['Readout']['ngroup'], yd, xd), dtype=np.float)
# Set up functions to apply cosmic rays later
# Need the total number of active pixels in the
# output array to multiply the CR rate by
if self.runStep['cosmicray']:
npix = int(yd * xd + 0.02)
# Reinitialize the cosmic ray functions for each integration
crhits, crs_perframe = self.cr_funcs(npix, seed=self.params['cosmicRay']['seed'])
# open output file to contain the list of cosmic rays
base_name = self.params['Output']['file'].split('/')[-1]
crlistout = os.path.join(self.params['Output']['directory'], base_name[0:-5] + '_cosmicrays.list')
self.open_cr_list_file(crlistout, crhits)
# Difference between the latest outimage frame and the
# latest newsignalimage frame. This is important when nframe>1
if ndim == 2:
totalsignalimage = data
elif ndim == 3:
totalsignalimage = data[1, :, :]
# Define signal in the previous frame
# Needed in loop below
previoussignal = np.zeros((yd, xd))
# Container for zeroth frame
zeroframe = None
# Total frames per group (including skipped frames)
framesPerGroup = self.params['Readout']['nframe']+self.params['Readout']['nskip']
# Loop over each group
for i in range(self.params['Readout']['ngroup']):
# Hold the averaged group signal
accumimage = np.zeros((yd, xd))
# Group 0: the initial nskip frames don't exist,
# so adjust indexes accordingly
rstart = 0
if i == 0:
rstart = self.params['Readout']['nskip']
# Loop over frames within each group if necessary
for j in range(rstart, framesPerGroup):
# Frame index number in input data
frameindex = (i * framesPerGroup) + j - self.params['Readout']['nskip']
# Signal only since previous frame
if ndim == 3:
deltaframe = data[frameindex+1] - data[frameindex]
elif ndim == 2:
deltaframe = data * self.frametime
# Add poisson noise
poissonsignal = self.do_poisson(deltaframe, self.params['simSignals']['poissonseed'])
# Increment poisson seed value so that the next frame doesn't have identical
# noise
self.params['simSignals']['poissonseed'] += 1
# Create the frame by adding the delta signal
# and poisson noise associated with the delta signal
# to the previous frame
framesignal = previoussignal + poissonsignal
# Add cosmic rays
if self.runStep['cosmicray']:
framesignal = self.do_cosmic_rays(framesignal, i, j,
crs_perframe[frameindex],
self.params['cosmicRay']['seed'])
# Increment the seed, so that every frame doesn't have identical
# cosmic rays
self.params['cosmicRay']['seed'] += 1
# Keep track of the total signal in the ramp,
# so that we don't neglect signal which comes
# in during the frames that are skipped.
previoussignal = copy.deepcopy(framesignal)
if ((i == 0) & (j == 0)):
zeroframe = copy.deepcopy(framesignal)
# Add the frame to the group signal image
if j >= self.params['Readout']['nskip']:
self.logger.info(' Averaging frame {} into group {}'.format(frameindex, i))
accumimage += framesignal
elif j < self.params['Readout']['nskip']:
self.logger.info(' Skipping frame {}'.format(frameindex))
# divide by nframes if > 1
if self.params['Readout']['nframe'] > 1:
accumimage /= self.params['Readout']['nframe']
outramp[i, :, :] = accumimage
if self.runStep['cosmicray']:
# Close the cosmic ray list file
self.cosmicraylist.close()
return outramp, zeroframe
def frame_to_ramp_no_cr(self, data):
"""Convert input seed image/ramp to a
ramp that includes poisson noise. No
cosmic rays are added
Parameters
----------
data : numpy.ndarray
Seed image. Should be a 2d frame or 3d integration.
If the original seed image is a 4d exposure, call frame_to_ramp
with one integration at a time.
Returns
-------
outramp : numpy.ndarray
3d integration with cosmic rays and poisson noise
zeroframe : numpy.ndarray
2d zeroth frame
"""
# Output ramp will be in requested readout pattern!
ndim = len(data.shape)
if ndim == 3:
ngroupin, yd, xd = data.shape
elif ndim == 2:
yd, xd = data.shape
# Define output ramp
outramp = np.zeros((self.params['Readout']['ngroup'], yd, xd))
# If a ramp is given, create a -1st frame that is all zeros
# so that we can create deltaframes for all frames later
if ndim == 3:
data = np.vstack(np.zeros((1, yd, xd)), data)
# Container for zeroth frame
zeroframe = None
if ndim == 2:
totalsignal = np.zeros((yd, xd))
# Total frames per group (including skipped frames)
framesPerGroup = self.params['Readout']['nframe']+self.params['Readout']['nskip']
# Loop over each group
for i in range(self.params['Readout']['ngroup']):
accumimage = np.zeros((yd, xd))
# Loop over frames within each group if necessary
# create each frame
for j in range(framesPerGroup):
# Frame index number in input data
frameindex = (i * framesPerGroup) + j
# Add poisson noise
if ndim == 3:
framesignal = self.do_poisson(data[frameindex+1],
self.params['simSignals']['poissonseed'])
elif ndim == 2:
framesignal = self.do_poisson(data*frameindex,
self.params['simSignals']['poissonseed'])
# Increment poisson seed value so that the next frame doesn't have identical
# noise
self.params['simSignals']['poissonseed'] += 1
if ((i == 0) & (j == 0)):
zeroframe = copy.deepcopy(framesignal)
# Add the frame to the group signal image
if ((self.params['Readout']['nskip'] > 0) & (j >= self.params['Readout']['nskip'])):
self.logger.info(' Averaging frame {} into group {}'.format(frameindex, i))
accumimage += framesignal
elif ((self.params['Readout']['nskip'] > 0) & (j < self.params['Readout']['nskip'])):
self.logger.info(' Skipping frame {}'.format(frameindex))
# divide by nframes if > 1
if self.params['Readout']['nframe'] > 1:
accumimage /= self.params['Readout']['nframe']
outramp[i, :, :] = accumimage
return outramp, zeroframe
def get_cr_rate(self):
"""Get the base cosmic ray impact probability.
The following values are based on JWST-STScI-001928, "A library of simulated cosmic ray events impacting
JWST HgCdTe detectors by Massimo Robberto", Table 1, times the pixel area of 18 microns square = 3.24e-06
square cm. Values are in nucleon events per pixel per second. Corresponding values from the report are
4.8983 nucleons/cm^2/second, 1.7783 nucleons/cm^2/second, and 3046.83 nucleons/cm^2/second. The expected
rates per full frame read (10.73677 seconds) over the whole set of 2048x2048 pixels are 715, 259, and
444609 events respectively.
Note that the SUNMIN rate is lower than the SUNMAX rate. The MIN and MAX labels refer to the solar activity,
and the galactic cosmic ray contribution at L2 is reduced at solar maximum compared to solar minimum. The
FLARE case is for the largest solar flare event on record (see the Robberto report) and corresponds to conditions
under which JWST would presumably not be operating.
"""
self.crrate = 0.
# The previous values were per full frame read and there was a transcription issue in Volk's code. These
# have been corrected. Values are cosmic ray "hit" rates per pixel per second.
if "SUNMIN" in self.params["cosmicRay"]["library"]:
self.crrate = 1.587e-05
if "SUNMAX" in self.params["cosmicRay"]["library"]:
self.crrate = 5.762e-06
if "FLARES" in self.params["cosmicRay"]["library"]:
self.crrate = 0.0098729
if self.crrate > 0.:
self.logger.info("Base cosmic ray probability per pixel per second: {}".format(self.crrate))
def get_nonlin_coeffs(self, linfile):
"""Read in non-linearity coefficients from given file
Parameters
----------
linfile : str
Name of fits file containing linearity coefficients
Returns
-------
nonlin : numpy.ndarray
Collection of nonlinearity correction coefficients
"""
nonlin, nonlinheader = self.read_cal_file(linfile)
# Set NaN coefficients such that no correction will be made
nans = np.isnan(nonlin[1, :, :])
numnan = np.sum(nans)
if numnan > 0:
self.logger.info(("The linearity coefficients of {} pixels are NaNs. "
"Setting these coefficients such that no linearity "
"correction is made.".format(numnan)))
for i, cof in enumerate(range(nonlin.shape[0])):
tmp = nonlin[cof, :, :]
if i == 1:
tmp[nans] = 1.
else:
tmp[nans] = 0.
nonlin[cof, :, :] = tmp
# # Crop to appropriate subarray - ALREADY DONE IN read_cal_file
# if "FULL" not in self.params['Readout']['array_name']:
# nonlin = self.crop_to_subarray(nonlin)
return nonlin
def get_nonlinearity_coeffs(self):
"""Wrapper around get_nonlin_coeffs. If the file can't
be opened, or no file is given, the code falls back to some
average non-linearity coefficients. This would probably be
bad to use...
"""
if self.params['Reffiles']['linearity'] is not None:
try:
nonlin = self.get_nonlin_coeffs(self.params['Reffiles']['linearity'])
except:
self.logger.warning(("Unable to read in non-linearity correction coefficients "
"from {}.".format(self.params['Reffiles']['linearity'])))
self.logger.info("Using a set of mean coefficients.")
nonlin = np.array([0., 1.0, 9.69903112e-07, 3.85263835e-11,
1.09267058e-16, -5.30613939e-20, 9.27963411e-25])
else:
self.logger.info(("No linearity coefficient file provided. Proceeding using a "
"set of mean coefficients derived from CV3 data."))
nonlin = np.array([0., 1.0, 9.69903112e-07, 3.85263835e-11,
1.09267058e-16, -5.30613939e-20, 9.27963411e-25])
# print('Nonlinearity coefficients: ', nonlin)
return nonlin
def invert_ipc_kernel(self, kern):
"""
Invert the IPC kernel such that it goes from being used to remove
IPC effects from data, to being used to add IPC effects to data,
or vice versa.
Parameters
----------
kern : obj
numpy ndarray, either 2D or 4D, containing the kernel
Returns
-------
returns : obj
numpy ndarray containing iInverted" kernel
"""
shape = kern.shape
ys = 0
ye = shape[-2]
xs = 0
xe = shape[-1]
if shape[-1] == 2048:
xs = 4
xe = 2044
if shape[-2] == 2048:
ys = 4
ye = 2044
if len(shape) == 2:
subkernel = kern[ys:ye, xs:xe]
elif len(shape) == 4:
subkernel = kern[:, :, ys:ye, xs:xe]
dims = subkernel.shape
# Force subkernel to be 4D to make the function cleaner
# Dimensions are (kernely, kernelx, detectory, detectorx)
if len(dims) == 2:
subkernel = np.expand_dims(subkernel, axis=2)
subkernel = np.expand_dims(subkernel, axis=3)
dims = subkernel.shape
delta = subkernel * 0.
nyc = dims[0] // 2
nxc = dims[1] // 2
delta[nyc, nxc, :, :] = 1.
a1 = np.fft.fft2(subkernel, axes=(0, 1))
a2 = np.fft.fft2(delta, axes=(0, 1))
aout = a2 / a1
imout = np.fft.ifft2(aout, axes=(0, 1))
imout1 = np.fft.fftshift(imout, axes=(0, 1))
realout1 = np.real(imout1)
# If the input kernel was 2D, make the output 2D
# If the input was 4D and had reference pixels, then
# surround the inverted kernel with reference pixels
if len(shape) == 2:
newkernel = realout1[:, :, 0, 0]
elif len(shape) == 4:
newkernel = np.copy(kern)
newkernel[:, :, ys:ye, xs:xe] = realout1
# Save the inverted kernel for future simulator runs
h0 = fits.PrimaryHDU()
h1 = fits.ImageHDU(newkernel)
h1.header["DETECTOR"] = self.detector
h1.header["INSTRUME"] = self.params["Inst"]["instrument"]
hlist = fits.HDUList([h0, h1])
indir, infile = os.path.split(self.params["Reffiles"]["ipc"])
outname = os.path.join(indir, "Kernel_to_add_IPC_effects_from_" + infile)
hlist.writeto(outname, overwrite=True)
self.logger.info(("Inverted IPC kernel saved to {} for future simulator "
"runs.".format(outname)))
return newkernel
def map_seeds_to_dark(self):
"""
Create a mapping of which seed image filenames belong to each dark
file. This is needed primarily for cases where the dark and/or
seeds are split into multiple files due to data volume limitataions
Returns
-------
mapping : dict
Dictionary that gives the list of seed images associated with
each dark current file
"""
self.seed.sort()
mapping = {}
for dark in self.linDark:
seg = dark.find('_seg')
seg_str = dark[seg+1:seg+7]
seeds = [name for name in self.seed if seg_str in name]
mapping[dark] = seeds
return mapping
def mask_refpix(self, ramp, zero):
"""Make sure that reference pixels have no signal
in the simulated source ramp
Parameters
----------
ramp : numpy.ndarray
Array containing exposure data
zero : numpy.ndarray
Zeroth frame data
Returns
-------
ramp : numpy.ndarray
Exposure data with reference pixels zeroed out
zero : numpy.ndarray
Zeroth frame data with reference pixels zeroed out
"""
maskimage = np.zeros((self.ffsize, self.ffsize), dtype=np.int)
maskimage[4:self.ffsize - 4, 4:self.ffsize - 4] = 1.
# Crop the mask to match the requested output array
if "FULL" not in self.params['Readout']['array_name']:
maskimage = self.crop_to_subarray(maskimage)
ramp *= maskimage
zero *= maskimage
return ramp, zero
def open_cr_list_file(self, filename, hits):
"""Open a new file and print header info. This file
that will contain the list and positions of inserted
cosmic rays.
Parameters
----------
filename : str
Name of ascii file to contain the summary of added cosmic rays
hits : int
Number of cosmic ray hits per frame
Returns
-------
None
"""
self.cosmicraylist = open(filename, "w")
self.cosmicraylist.write("# Cosmic ray list (file set %s random seed %d)\n" %
(self.crfile, self.params['cosmicRay']['seed']))
self.cosmicraylist.write('# Cosmic ray rate per frame: %13.6e (scale factor %f)\n' %
(hits, self.params['cosmicRay']['scale']))
self.cosmicraylist.write(("Image_x Image_y Group Frame CR_File_Index CR_file_frame "
"Max_CR_Signal\n"))
def path_check(self, p):
"""
Check for the existence of the input path.
Assume first that the path is in relation to
the directory tree specified by the MIRAGE_DATA
environment variable
Parameters
----------
p : tup
Nested keys that point to a directory in self.params
Returns
-------
Nothing
"""
pth = self.params[p[0]][p[1]]
c1 = os.path.exists(pth)
if not c1:
raise NotADirectoryError(("WARNING: Unable to find the requested path "
"{}. Not present in directory tree specified by "
"the {} environment variable."
.format(pth, self.env_var)))
def populate_group_table(self, starttime, grouptime, ramptime, numint, numgroup, ny, nx):
"""Create some reasonable values to fill the GROUP extension table.
These will not be completely correct because access to other ssb
scripts and more importantly, databases, is necessary. But they should be
close.
Parameters
----------
starttime : astropy.time.Time
Starting time of exposure
grouptime : float
Exposure time of a single group (seconds)
ramptime : float
Exposure time of the entire exposure (seconds)
numint : int
Number of integrations in data
numgroup : int
Number of groups per integration
ny : int
Number of pixels in the y dimension
nx : int
Number of pixels in the x dimension
Returns
-------
grouptable : numpy.ndarray
Group extension data for all groups in the exposure
"""
# Create the table with a first row populated by garbage
grouptable = self.create_group_entry(999, 999, 0, 0, 0, 'void', 0, 0, 0, 0, 'void', 1., 1.)
# Quantities that are fixed for all exposures
compcode = 0
comptext = 'Normal Completion'
numgap = 0
# Ignore warnings as astropy.time.Time will give a warning
# related to unknown leap seconds if the date is too far in
# the future.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
baseday = Time('2020-01-01T00:00:00')
# Integration start times
rampdelta = TimeDelta(ramptime, format='sec')
groupdelta = TimeDelta(grouptime, format='sec')
intstarts = starttime + (np.arange(numint)*rampdelta)
for integ in range(numint):
groups = np.arange(1, numgroup+1)
groupends = intstarts[integ] + (np.arange(1, numgroup+1)*groupdelta)
endday = (groupends - baseday).jd
# If the integration has a single group, force endday to be an array
if isinstance(endday, float):
endday = np.array([endday])
enddayint = [np.int(s) for s in endday]
# Now to get end_milliseconds, we need milliseconds from the beginning
# of the day
inday = TimeDelta(endday - enddayint, format='jd')
endmilli = inday.sec * 1000.
# Submilliseconds - just use a random number
endsubmilli = np.random.randint(0, 1000, len(endmilli))
# Group end time. need to remove : and - and make lowercase t
groupending = groupends.isot
# Approximate these as just the group end time in mjd
barycentric = groupends.mjd
heliocentric = groupends.mjd
# For the case of an integration with a single group, force quantities to be
# arrays so that everything is consistent
if isinstance(groupending, str):
groupending = np.array([groupending])
barycentric = np.array([barycentric])
heliocentric = np.array([heliocentric])
for grp, day, milli, submilli, grpstr, bary, helio in zip(groups, endday, endmilli,
endsubmilli, groupending,
barycentric, heliocentric):
entry = self.create_group_entry(integ+1, grp, day, milli, submilli, grpstr, nx, ny,
numgap, compcode, comptext, bary, helio)
grouptable = np.vstack([grouptable, entry])
# Now remove the top garbage row from the table
grouptable = grouptable[1:]
return grouptable
def read_cal_file(self, filename):
"""Read in the specified calibration fits file. This is for files that contain
images (e.g. flats, superbias, etc)
Parameters
----------
filename : str
Name of file to be opened
Returns
-------
image : numpy.ndarray
Array data from input file
header : list
Information from file header
"""
try:
with fits.open(filename) as h:
image = h[1].data
header = h[0].header
except FileNotFoundError:
self.logger.error("ERROR: Unable to open {}".format(filename))
# extract the appropriate subarray if necessary
if ((self.subarray_bounds[0] != 0) or
(self.subarray_bounds[2] != (self.ffsize - 1)) or
(self.subarray_bounds[1] != 0) or
(self.subarray_bounds[3] != (self.ffsize - 1))):
if len(image.shape) == 2:
image = image[self.subarray_bounds[1]:self.subarray_bounds[3] + 1,
self.subarray_bounds[0]:self.subarray_bounds[2] + 1]
if len(image.shape) == 3:
image = image[:, self.subarray_bounds[1]:self.subarray_bounds[3] + 1,
self.subarray_bounds[0]:self.subarray_bounds[2] + 1]
return image, header
def read_cr_files(self):
"""Read in the 10 files that comprise the cosmic ray library"""
self.cosmicrays = []
self.cosmicraysheader = []
for i in range(10):
idx = '_%2.2d_' % (i)
str1 = idx + self.params['cosmicRay']['suffix'] + '.fits'
name = self.crfile + str1
with fits.open(name) as h:
im = h[1].data
head = h[0].header
self.cosmicrays.append(im)
self.cosmicraysheader.append(head)
def read_crosstalk_file(self, file, detector):
"""Read in appropriate line from the xtalk coefficients
file for the given detector and return the coeffs
Parameters
----------
file : str
Name of ascii file containing the crosstalk coefficients
detector : str
Name of the detector being simulated
Returns
-------
xtcoeffs : list
Collection of crosstalk coefficients associated with detector
"""
xtcoeffs = ascii.read(file, header_start=0)
coeffs = []
mtch = xtcoeffs['Det'] == detector.upper()
if np.any(mtch) is False:
raise ValueError('Detector {} not found in xtalk file {}'.format(detector, file))
return xtcoeffs[mtch]
def read_dark_file(self, filename):
"""Read in a prepared dark current exposure
Parameters
----------
filename : str
Name of fits file containing dark current data
Returns
-------
obj : read_fits object
obj contains: obj.data, obj.sbAndRefpix,
obj.zeroframe, obj.zero_sbAndRefpix,
obj.header
Values are None for objects that don't exist
"""
obj = read_fits.Read_fits()
obj.file = filename
obj.read_astropy()
return obj
def read_gain_map(self):
"""Read in the gain map. This will be used to
translate signals from e/s to ADU/sec
"""
if self.runStep['gain']:
self.gainim, self.gainhead = self.read_cal_file(self.params['Reffiles']['gain'])
# set any NaN's to 1.0
bad = ((~np.isfinite(self.gainim)) | (self.gainim == 0))
self.gainim[bad] = 1.0
# Pixels that have a gain value of 0
# will be reset to have values of 1.0
# zs = gainim == 0
# gainim[zs] = 1.0
def read_parameter_file(self):
"""Read in the yaml parameter file (main input to Mirage)."""
try:
with open(self.paramfile, 'r') as infile:
self.params = yaml.safe_load(infile)
except FileNotFoundError as e:
self.logger.warning("Unable to open {}".format(self.paramfile))
if self.params['Inst']['instrument'].lower() == 'niriss':
newfilter,newpupil = utils.check_niriss_filter(self.params['Readout']['filter'],self.params['Readout']['pupil'])
self.params['Readout']['filter'] = newfilter
self.params['Readout']['pupil'] = newpupil
def read_saturation_file(self):
"""Read in saturation map from fits file"""
if self.runStep['saturation_lin_limit']:
try:
self.satmap, self.satheader = self.read_cal_file(self.params['Reffiles']['saturation'])
bad = ~np.isfinite(self.satmap)
self.satmap[bad] = 1.e6
except Exception:
self.logger.warning(('WARNING: unable to open saturation file {}.'
.format(self.params['Reffiles']['saturation'])))
self.logger.warning(("Please provide a valid file, or place 'none' "
"in the saturation entry in the parameter file, "
"in which case the nonlin limit value in the "
"parameter file ({}) will be used for all pixels."
.format(self.params['nonlin']['limit'])))
else:
self.logger.warning(('No saturation map provided. Using '
'{} for all pixels.'.format(self.params['nonlin']['limit'])))
dy, dx = self.linear_dark.data.shape[2:]
self.satmap = np.zeros((dy, dx)) + self.params['nonlin']['limit']
def read_seed(self, filename):
"""Read in the fits file containing the seed image/ramp
Parameters
----------
filename : str
Fits file containing the seed image
Returns
-------
seed : numpy.ndarray
Seed image
segmap : numnpy.ndarray
Segmentation map
seedheader : list
Information from the header of the seed image file
"""
with fits.open(filename) as h:
seed = h[1].data
seedheader = h[0].header
try:
segmap = h[2].data
except:
segmap = None
return seed, segmap, seedheader
def read_superbias_file(self):
"""Read in superbias from fits file"""
if self.runStep['superbias']:
try:
self.superbias, self.superbiasheader = self.read_cal_file(self.params['Reffiles']['superbias'])
except:
raise IOError(("WARNING: unable to open superbias file {}. "
"Please provide a valid file in the superbias "
"entry in the parameter file."
.format(self.params['Reffiles']['superbias'])))
else:
raise ValueError('CAUTION: no superbias provided. Quitting.')
def readpattern_check(self):
"""Check the readout pattern that's entered and set number of used frames and
number of skipped frames per group, from the readout pattern definition file
"""
self.params['Readout']['readpatt'] = self.params['Readout']['readpatt'].upper()
# Read in readout pattern definition file
# and make sure the possible readout patterns are in upper case
self.readpatterns = ascii.read(self.params['Reffiles']['readpattdefs'])
self.readpatterns['name'] = [s.upper() for s in self.readpatterns['name']]
# If the requested readout pattern is in the table of options,
# then adopt the appropriate nframe and nskip
if self.params['Readout']['readpatt'] in self.readpatterns['name']:
mtch = self.params['Readout']['readpatt'] == self.readpatterns['name']
self.params['Readout']['nframe'] = self.readpatterns['nframe'][mtch].data[0]
self.params['Readout']['nskip'] = self.readpatterns['nskip'][mtch].data[0]
self.logger.info(('Requested readout pattern {} is valid. '
'Using nframe = {} and nskip = {}'
.format(self.params['Readout']['readpatt'],
self.params['Readout']['nframe'],
self.params['Readout']['nskip'])))
else:
# If the read pattern is not present in the definition file
# then quit.
raise ValueError(("WARNING: the {} readout pattern is not defined in {}."
.format(self.params['Readout']['readpatt'],
self.params['Reffiles']['readpattdefs'])))
def readpattern_compatible(self):
"""Make sure the input dark has a readout pattern
that is compatible with the requested output
readout pattern. The dark must be a flavor of RAPID,
or have a readout pattern that matches the output.
"""
rapids = ["RAPID", "NISRAPID", "FGSRAPID"]
darkpatt = self.linear_dark.header['READPATT']
if ((darkpatt != self.params['Readout']['readpatt']) &
(darkpatt not in rapids)):
raise ValueError(("WARNING: Unable to convert input dark with a "
"readout pattern of {}, to the requested readout "
"pattern of {}. The readout pattern of the dark "
"must be RAPID, NISRAPID, FGSRAPID, or match the requested output "
"readout pattern.".format(darkpatt, self.params['Readout']['readpatt'])))
def ref_check(self, rele):
"""
Check for the existence of the input reference file
Assume first that the file is in the directory tree
specified by the MIRAGE_DATA environment variable.
Parameters
----------
rele : tup
Nested keys that point to the refrence file of
interest. These come from the yaml input file
Reutrns:
-------
Nothing
"""
rfile = self.params[rele[0]][rele[1]]
if rfile.lower() != 'none':
rfile = os.path.abspath(rfile)
c1 = os.path.isfile(rfile)
if not c1:
raise FileNotFoundError(("WARNING: Unable to locate the {}, {} "
"input file! Not present in {}"
.format(rele[0], rele[1], rfile)))
def int_times_table(self, integration_time, date_obs, time_obs, num_ints):
"""Create and populate the INT_TIMES table, which is saved as a
separate extension in the output data file
Parameters
----------
integration_time : float
Exposure time for a single integration, including the reset
frame, in seconds
date_obs : str
Date string of observation ('2020-02-28')
time_obs : str
Time string of observation ('12:24:56')
num_ints : int
Number of integrations to put in the table
Returns
-------
int_times_tab : astropy.table.Table
Table of starting, mid, and end times for each integration
"""
integration_numbers = np.arange(self.params['Readout']['nint'])
start_time_string = date_obs + 'T' + time_obs
start_time = Time(start_time_string)
# There may or may not be an initial reset at the start of the
# exposure. If not, this will shift the start times of the
# subsequent integrations
integ_0_time_delta = TimeDelta((integration_time - self.num_resets_before_exposure * self.frametime) * u.second)
integ_time_delta = TimeDelta(integration_time * u.second)
start_times = start_time + integ_0_time_delta + (integ_time_delta * (integration_numbers - 1))
integration_time_exclude_reset = TimeDelta((integration_time - self.frametime) * u.second)
end_times = start_times + integration_time_exclude_reset
mid_times = start_times + integration_time_exclude_reset / 2.
# For now, let's keep the BJD (Barycentric?) times identical
# to the MJD times.
start_times_bjd = start_times
mid_times_bjd = mid_times
end_times_bjd = end_times
# Create table
nrows = len(integration_numbers)
data_list = [(integration_numbers[i] + 1, start_times.mjd[i], mid_times.mjd[i], end_times.mjd[i],
start_times_bjd.mjd[i], mid_times_bjd.mjd[i], end_times_bjd.mjd[i]) for i in range(nrows)]
int_times_tab = np.array(data_list,
dtype=[('integration_number','<i2'),
('int_start_MJD_UTC','<f8'),
('int_mid_MJD_UTC', '<f8'),
('int_end_MJD_UTC','<f8'),
('int_start_BJD_TDB','<f8'),
('int_mid_BJD_TDB','<f8'),
('int_end_BJD_TDB','<f8')])
return int_times_tab
def save_DMS(self, ramp, zeroframe, filename, mod='1b', err_ext=None,
group_dq=None, pixel_dq=None):
"""Save the new, simulated integration in DMS format (i.e. DMS orientation
rather than raw fitswriter orientation) using JWST data models
Parameters
----------
ramp : numpy.ndarray
Array containing the exposure to be saved
zeroframe : numpy.ndarray
The zeroth frame(s) for the exposure
filename : str
Name of output file
mod : str
Format in which to save the data. Can be '1b' or 'ramp'
'1b' will save the file in JWST Level 1B format. 'ramp'
will save the data as if it has gone beyond level 1b, and
contains the error and dq extensions
err_ext : numpy.ndarray
Array containing error values to save in error extension, if
using mod='ramp'
group_dq : numpy.ndarray
Array containing group data quality values. Used only if mod='ramp'
pixel_dq : numpy.ndarray
Array containing pixel data quality values. Used only if mod='ramp'
Returns
-------
None
"""
extra_fits_hdulist = self.add_mirage_info()
if mod == '1b':
from jwst.datamodels import Level1bModel as DataModel
elif mod == 'ramp':
from jwst.datamodels import RampModel as DataModel
else:
raise ValueError(("Model type to use for saving output is "
"not recognized. Must be either '1b' or 'ramp'."))
outModel = DataModel(extra_fits_hdulist)
# make sure the ramp to be saved has the right number of dimensions
imshape = ramp.shape
if len(imshape) == 3:
ramp = np.expand_dims(ramp, axis=0)
# insert data into model
outModel.data = ramp
if mod == 'ramp':
outModel.err = err_ext
outModel.groupdq = group_dq
outModel.pixeldq = pixel_dq
# if saving the zeroth frame is requested, insert into the model instance
if zeroframe is not None:
# if the zeroframe is a 2D image, then add a dimension,
# as the model expects 3D
if len(zeroframe.shape) == 2:
zeroframe = np.expand_dims(zeroframe, 0)
outModel.zeroframe = zeroframe
else:
self.logger.info("Zeroframe not present. Setting to all zeros")
numint, numgroup, ys, xs = ramp.shape
outModel.zeroframe = np.zeros((numint, ys, xs))
try:
outModel.meta.exposure.type = EXPTYPES[self.params['Inst']['instrument'].lower()]\
[self.params['Inst']['mode'].lower()]
except:
raise ValueError('EXPTYPE mapping not complete for this!!! FIX ME!')
# update various header keywords
dims = outModel.data.shape
dtor = radians(1.)
# Ignore warnings as astropy.time.Time will give a warning
# related to unknown leap seconds if the date is too far in
# the future.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
current_time = datetime.datetime.utcnow()
start_time_string = self.params['Output']['date_obs'] + 'T' + self.params['Output']['time_obs']
ct = Time(start_time_string)
outModel.meta.date = start_time_string
outModel.meta.telescope = 'JWST'
outModel.meta.instrument.name = self.params['Inst']['instrument'].upper()
if self.instrument.upper() == 'NIRCAM':
outModel.meta.instrument.module = self.detector[3]
channel = 'SHORT'
if 'LONG' in self.detector:
channel = 'LONG'
outModel.meta.instrument.channel = channel
if self.instrument.upper() in ['NIRISS', 'FGS']:
outModel.meta.instrument.focus_position = 0.0 #Placeholder, required by WSS; will be float in flight.
outModel.meta.instrument.detector = self.detector
outModel.meta.coordinates.reference_frame = 'ICRS'
outModel.meta.subarray.fastaxis = self.fastaxis
outModel.meta.subarray.slowaxis = self.slowaxis
outModel.meta.origin = 'STScI'
outModel.meta.filename = filename
outModel.meta.filetype = 'raw'
outModel.meta.observation.obs_id = self.params['Output']['obs_id']
outModel.meta.observation.visit_id = self.params['Output']['visit_id']
outModel.meta.observation.visit_number = self.params['Output']['visit_number']
outModel.meta.observation.program_number = self.params['Output']['program_number']
outModel.meta.observation.observation_number = self.params['Output']['observation_number']
outModel.meta.observation.observation_label = self.params['Output']['observation_label']
outModel.meta.observation.visit_group = self.params['Output']['visit_group']
outModel.meta.observation.sequence_id = self.params['Output']['sequence_id']
outModel.meta.observation.activity_id = self.params['Output']['activity_id']
outModel.meta.observation.exposure_number = self.params['Output']['exposure_number']
outModel.meta.program.pi_name = self.params['Output']['PI_Name']
outModel.meta.program.title = self.params['Output']['title']
outModel.meta.program.category = self.params['Output']['Proposal_category']
outModel.meta.program.sub_category = 'UNKNOWN'
outModel.meta.program.science_category = self.params['Output']['Science_category']
outModel.meta.program.continuation_id = 0
outModel.meta.aperture.name = self.params['Readout']['array_name']
outModel.meta.target.catalog_name = 'UNKNOWN'
outModel.meta.target.ra = self.params['Output']['target_ra']
outModel.meta.target.dec = self.params['Output']['target_dec']
outModel.meta.target.proposer_name = self.params['Output']['target_name']
outModel.meta.coordinates.reference_frame = 'ICRS'
outModel.meta.wcsinfo.wcsaxes = 2
outModel.meta.wcsinfo.crval1 = self.ra
outModel.meta.wcsinfo.crval2 = self.dec
outModel.meta.wcsinfo.crpix1 = self.siaf.XSciRef
outModel.meta.wcsinfo.crpix2 = self.siaf.YSciRef
outModel.meta.wcsinfo.ctype1 = 'RA---TAN'
outModel.meta.wcsinfo.ctype2 = 'DEC--TAN'
outModel.meta.wcsinfo.cunit1 = 'deg'
outModel.meta.wcsinfo.cunit2 = 'deg'
outModel.meta.wcsinfo.v2_ref = self.siaf.V2Ref
outModel.meta.wcsinfo.v3_ref = self.siaf.V3Ref
outModel.meta.wcsinfo.vparity = self.siaf.VIdlParity
outModel.meta.wcsinfo.v3yangle = self.siaf.V3IdlYAngle
outModel.meta.wcsinfo.cdelt1 = self.siaf.XSciScale / 3600.
outModel.meta.wcsinfo.cdelt2 = self.siaf.YSciScale / 3600.
outModel.meta.wcsinfo.roll_ref = self.local_roll
# Grism TSO data have the XREF_SCI and YREF_SCI keywords populated.
# These are used to describe the location of the source on the detector.
try:
self.logger.info('\n\nPopulating xref_sci in output file:')
self.logger.info('{}'.format(self.seedheader['XREF_SCI']))
outModel.meta.wcsinfo.siaf_xref_sci = self.seedheader['XREF_SCI']
outModel.meta.wcsinfo.siaf_yref_sci = self.seedheader['YREF_SCI']
except KeyError:
self.logger.warning('Unable to propagate XREF_SCI, YREF_SCI from seed image to simulated data file.')
# ra_v1, dec_v1, and pa_v3 are not used by the level 2 pipelines
# compute pointing of V1 axis
pointing_ra_v1, pointing_dec_v1 = pysiaf.rotations.pointing(self.attitude_matrix, 0., 0.)
outModel.meta.pointing.ra_v1 = pointing_ra_v1
outModel.meta.pointing.dec_v1 = pointing_dec_v1
outModel.meta.pointing.pa_v3 = self.params['Telescope']['rotation']
outModel.meta.observation.date = self.params['Output']['date_obs']
outModel.meta.observation.time = self.params['Output']['time_obs']
# Create INT_TIMES table, to be saved in INT_TIMES extension
int_times = self.int_times_table(self.ramptime, self.params['Output']['date_obs'], self.params['Output']['time_obs'],
outModel.data.shape[0])
outModel.int_times = int_times
# Set filter and pupil values
pw = self.params['Readout']['pupil']
fw = self.params['Readout']['filter']
# Get FGS filter/pupil in proper format
if fw == 'NA':
fw = 'N/A'
if pw == 'NA':
pw = 'N/A'
# Filter and pupil info
outModel.meta.instrument.filter = fw
outModel.meta.instrument.pupil = pw
if self.instrument.upper() == 'NIRISS':
outModel.meta.instrument.filter_position = self.filter_wheel_position
outModel.meta.instrument.pupil_position = self.pupil_wheel_position
# Specify whether the exposure is part of a TSO observation
if self.params['Inst']['mode'].lower() not in ['ts_imaging', 'ts_grism']:
outModel.meta.visit.tsovisit = False
else:
outModel.meta.visit.tsovisit = True
num_primary_dithers = self.params['Output']['total_primary_dither_positions']
if isinstance(self.params['Output']['total_primary_dither_positions'], str):
num_primary_dithers = np.int(self.params['Output']['total_primary_dither_positions'][0])
outModel.meta.dither.primary_type = self.params['Output']['primary_dither_type'].upper()
outModel.meta.dither.position_number = self.params['Output']['primary_dither_position']
outModel.meta.dither.total_points = num_primary_dithers
outModel.meta.dither.dither_points = str(self.params['Output']['total_primary_dither_positions'])
outModel.meta.dither.pattern_size = 'DEFAULT'
outModel.meta.dither.subpixel_type = self.params['Output']['subpix_dither_type']
outModel.meta.dither.subpixel_number = self.params['Output']['subpix_dither_position']
outModel.meta.dither.subpixel_total_points = self.params['Output']['total_subpix_dither_positions']
outModel.meta.dither.x_offset = self.params['Output']['xoffset']
outModel.meta.dither.y_offset = self.params['Output']['yoffset']
# pixel coordinates in FITS header start from 1 not from 0
xc = (self.subarray_bounds[2] + self.subarray_bounds[0])/2.+1.
yc = (self.subarray_bounds[3] + self.subarray_bounds[1])/2.+1.
outModel.meta.exposure.readpatt = self.params['Readout']['readpatt']
# The subarray name needs to come from the "Name" column in the
# subarray definitions dictionary
mtch = self.subdict["AperName"] == self.params["Readout"]['array_name']
outModel.meta.subarray.name = str(self.subdict["Name"].data[mtch][0])
# subarray_bounds indexed to zero, but values in header should be
# indexed to 1.
outModel.meta.subarray.xstart = self.subarray_bounds[0]+1
outModel.meta.subarray.ystart = self.subarray_bounds[1]+1
outModel.meta.subarray.xsize = self.subarray_bounds[2]-self.subarray_bounds[0]+1
outModel.meta.subarray.ysize = self.subarray_bounds[3]-self.subarray_bounds[1]+1
nlrefpix = max(4-self.subarray_bounds[0], 0)
nbrefpix = max(4-self.subarray_bounds[1], 0)
nrrefpix = max(self.subarray_bounds[2]-(self.ffsize-4), 0)
ntrefpix = max(self.subarray_bounds[3]-(self.ffsize-4), 0)
outModel.meta.exposure.nframes = self.params['Readout']['nframe']
outModel.meta.exposure.ngroups = self.params['Readout']['ngroup']
outModel.meta.exposure.nints = self.params['Readout']['nint']
# TODO: Putting this try/except here because SOSS mode mysteriously breaks it (Joe)
try:
outModel.meta.exposure.integration_start = self.seedheader['SEGINTST'] + 1
outModel.meta.exposure.integration_end = self.seedheader['SEGINTED'] + 1
except KeyError:
pass
outModel.meta.exposure.sample_time = 10
outModel.meta.exposure.frame_time = self.frametime
outModel.meta.exposure.group_time = self.frametime * (self.params['Readout']['nframe'] +
self.params['Readout']['nskip'])
outModel.meta.exposure.groupgap = self.params['Readout']['nskip']
outModel.meta.exposure.nresets_at_start = 1
outModel.meta.exposure.nresets_between_ints = 1
outModel.meta.exposure.integration_time = self.rampexptime
outModel.meta.exposure.exposure_time = self.rampexptime * self.params['Readout']['nint']
outModel.meta.model_type = 'RampModel'
# set the exposure start time
outModel.meta.exposure.start_time = ct.mjd
endingTime = ct.mjd + outModel.meta.exposure.exposure_time/3600./24.
outModel.meta.exposure.end_time = endingTime
outModel.meta.exposure.mid_time = ct.mjd + outModel.meta.exposure.exposure_time/3600./24./2.
outModel.meta.exposure.duration = self.get_duration()
# populate the GROUP extension table
n_int, n_group, n_y, n_x = outModel.data.shape
with warnings.catch_warnings():
warnings.simplefilter("ignore")
outModel.group = self.populate_group_table(ct, outModel.meta.exposure.group_time, self.rampexptime,
n_int, n_group, n_y, n_x)
outModel.save(filename)
# Now we need to adjust the datamodl header keyword
# If we leave it as Level1bModel, the pipeline doesn't
# work properly
if mod == '1b':
temp = fits.open(filename, mode='update')
temp[0].header['DATAMODL'] = 'RampModel'
temp.flush()
return
def save_fits(self, ramp, zeroframe, filename, mod='1b', err_ext=None,
group_dq=None, pixel_dq=None):
"""Save the new, simulated integration in DMS format (i.e. DMS orientation
rather than raw fitswriter orientation) using astropy rather than
JWST data models
Parameters
----------
ramp : numpy.ndarray
Array containing the exposure to be saved
zeroframe : numpy.ndarray
The zeroth frame(s) for the exposure
filename : str
Name of output file
mod : str
Format in which to save the data. Can be '1b' or 'ramp'
'1b' will save the file in JWST Level 1B format. 'ramp'
will save the data as if it has gone beyond level 1b, and
contains the error and dq extensions
err_ext : numpy.ndarray
Array containing error values to save in error extension, if
using mod='ramp'
group_dq : numpy.ndarray
Array containing group data quality values. Used only if mod='ramp'
pixel_dq : numpy.ndarray
Array containing pixel data quality values. Used only if mod='ramp'
Returns
-------
filename : str
Same as input filename
"""
# Make sure the ramp to be saved has the right number of dimensions
imshape = ramp.shape
if len(imshape) == 3:
ramp = np.expand_dims(ramp, axis=0)
if mod == '1b':
toohigh = ramp > 65535
ramp[toohigh] = 65535
# If saving the zeroth frame is requested, insert into the model instance
if zeroframe is not None:
# If the zeroframe is a 2D image, then add a dimension
if len(zeroframe.shape) == 2:
zeroframe = np.expand_dims(zeroframe, 0)
else:
self.logger.info("Zeroframe not present. Setting to all zeros")
numint, numgroup, ys, xs = ramp.shape
# Place the arrays in the correct extensions of the HDUList
# using int16 below causes problems! anything set to 65535
# gets reset to -1, which screws up saturation flagging
# I think the answer is to save as uint16...
# Create HDU List of Mirage-centric info
extra_fits_hdulist = self.add_mirage_info()
extra_header0 = extra_fits_hdulist[0].header
if mod == 'ramp':
ex0 = fits.PrimaryHDU(header=extra_header0)
ex1 = fits.ImageHDU(ramp.astype(np.float32), name='SCI')
ex2 = fits.ImageHDU(pixel_dq.astype(np.uint32), name='PIXELDQ')
ex3 = fits.ImageHDU(group_dq.astype(np.uint8), name='GROUPDQ')
ex4 = fits.ImageHDU(err_ext.astype(np.float32), name='ERR')
ex5 = fits.ImageHDU(zeroframe.astype(np.float32), name='ZEROFRAME')
ex6 = fits.BinTableHDU(name='GROUP')
ex7 = fits.BinTableHDU(name='INT_TIMES')
outModel = fits.HDUList([ex0, ex1, ex2, ex3, ex4, ex5, ex6, ex7])
groupextnum = 6
elif mod == '1b':
ex0 = fits.PrimaryHDU(header=extra_header0)
ex1 = fits.ImageHDU(ramp.astype(np.uint16), name='SCI')
ex2 = fits.ImageHDU(zeroframe.astype(np.uint16), name='ZEROFRAME')
ex3 = fits.BinTableHDU(name='GROUP')
ex4 = fits.BinTableHDU(name='INT_TIMES')
outModel = fits.HDUList([ex0, ex1, ex2, ex3, ex4])
groupextnum = 3
try:
outModel[0].header['EXP_TYPE'] = EXPTYPES[self.params['Inst']['instrument'].lower()]\
[self.params['Inst']['mode'].lower()]
except:
raise ValueError('EXPTYPE mapping not complete for this!!! FIX ME!')
# update various header keywords
dims = outModel[1].data.shape
dtor = radians(1.)
# Ignore warnings as astropy.time.Time will give a warning
# related to unknown leap seconds if the date is too far in
# the future.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
current_time = datetime.datetime.utcnow()
start_time_string = self.params['Output']['date_obs'] + 'T' + self.params['Output']['time_obs']
ct = Time(start_time_string)
outModel[0].header['DATE'] = start_time_string
outModel[0].header['TELESCOP'] = 'JWST'
outModel[0].header['INSTRUME'] = self.params['Inst']['instrument'].upper()
outModel[0].header['DETECTOR'] = self.detector
if self.instrument.upper() == 'NIRCAM':
outModel[0].header['MODULE'] = self.detector[3]
channel = 'SHORT'
if 'LONG' in self.detector:
channel = 'LONG'
outModel[0].header['CHANNEL'] = channel
outModel[0].header['FASTAXIS'] = self.fastaxis
outModel[0].header['SLOWAXIS'] = self.slowaxis
outModel[1].header['RADESYS'] = 'ICRS'
outModel[0].header['ORIGIN'] = 'STScI'
outModel[0].header['FILENAME'] = os.path.split(filename)[1]
outModel[0].header['FILETYPE'] = 'raw'
outModel[0].header['OBS_ID'] = self.params['Output']['obs_id']
outModel[0].header['VISIT_ID'] = self.params['Output']['visit_id']
outModel[0].header['VISIT'] = self.params['Output']['visit_number']
outModel[0].header['PROGRAM'] = self.params['Output']['program_number']
outModel[0].header['OBSERVTN'] = self.params['Output']['observation_number']
outModel[0].header['OBSLABEL'] = self.params['Output']['observation_label']
outModel[0].header['VISITGRP'] = self.params['Output']['visit_group']
outModel[0].header['SEQ_ID'] = self.params['Output']['sequence_id']
outModel[0].header['ACT_ID'] = self.params['Output']['activity_id']
outModel[0].header['EXPOSURE'] = self.params['Output']['exposure_number']
outModel[0].header['PI_NAME'] = self.params['Output']['PI_Name']
outModel[0].header['TITLE'] = self.params['Output']['title']
outModel[0].header['CATEGORY'] = self.params['Output']['Proposal_category']
outModel[0].header['SUBCAT'] = 'UNKNOWN'
outModel[0].header['SCICAT'] = self.params['Output']['Science_category']
outModel[0].header['CONT_ID'] = 0
outModel[0].header['APERNAME'] = self.params['Readout']['array_name']
outModel[0].header['TARGNAME'] = 'UNKNOWN'
outModel[0].header['TARGPROP'] = self.params['Output']['target_name']
outModel[0].header['TARG_RA'] = self.params['Output']['target_ra']
outModel[0].header['TARG_DEC'] = self.params['Output']['target_dec']
outModel[1].header['WCSAXES'] = 2
outModel[1].header['CRVAL1'] = self.ra
outModel[1].header['CRVAL2'] = self.dec
outModel[1].header['CRPIX1'] = self.siaf.XSciRef
outModel[1].header['CRPIX2'] = self.siaf.YSciRef
outModel[1].header['CTYPE1'] = 'RA---TAN'
outModel[1].header['CTYPE2'] = 'DEC--TAN'
outModel[1].header['CUNIT1'] = 'deg'
outModel[1].header['CUNIT2'] = 'deg'
outModel[1].header['V2_REF'] = self.siaf.V2Ref
outModel[1].header['V3_REF'] = self.siaf.V3Ref
outModel[1].header['VPARITY'] = self.siaf.VIdlParity
outModel[1].header['V3I_YANG'] = self.siaf.V3IdlYAngle
outModel[1].header['CDELT1'] = self.siaf.XSciScale / 3600.
outModel[1].header['CDELT2'] = self.siaf.YSciScale / 3600.
outModel[1].header['ROLL_REF'] = self.local_roll
# ra_v1, dec_v1, and pa_v3 are not used by the level 2 pipelines
# compute pointing of V1 axis
pointing_ra_v1, pointing_dec_v1 = pysiaf.rotations.pointing(self.attitude_matrix, 0., 0.)
outModel[0].header['RA_V1'] = pointing_ra_v1
outModel[0].header['DEC_V1'] = pointing_dec_v1
outModel[0].header['PA_V3'] = self.params['Telescope']['rotation']
# elapsed time from the end and from the start of the supposid ramp, in seconds
# put the end of the ramp 1 second before the time the file is written
# these only go in the fake ramp, not in the signal images....
outModel[0].header['DATE-OBS'] = self.params['Output']['date_obs']
outModel[0].header['TIME-OBS'] = self.params['Output']['time_obs']
# Create INT_TIMES table, to be saved in INT_TIMES extension
int_times = self.int_times_table(self.ramptime, self.params['Output']['date_obs'], self.params['Output']['time_obs'],
outModel['SCI'].data.shape[0])
outModel['INT_TIMES'].data = int_times
# Set filter and pupil values
pw = self.params['Readout']['pupil']
fw = self.params['Readout']['filter']
# Get FGS filter/pupil in proper format
if fw == 'NA':
fw = 'N/A'
if pw == 'NA':
pw = 'N/A'
# Filter and pupil info
outModel[0].header['FILTER'] = fw
outModel[0].header['PUPIL'] = pw
if self.instrument.upper() == 'NIRISS':
outModel[0].header['FWCPOS'] = self.filter_wheel_position
outModel[0].header['PWCPOS'] = self.pupil_wheel_position
if self.instrument.upper() in ['NIRISS', 'FGS']:
outModel[0].header['FOCUSPOS'] = 'DEFAULT' #Placeholder, required by WSS; will be float in flight.
# Specify whether the exposure is part of a TSO observation
if self.params['Inst']['mode'].lower() not in ['ts_imaging', 'ts_grism']:
outModel[0].header['TSOVISIT'] = False
else:
outModel[0].header['TSOVISIT'] = True
num_primary_dithers = self.params['Output']['total_primary_dither_positions']
if isinstance(self.params['Output']['total_primary_dither_positions'], str):
num_primary_dithers = np.int(self.params['Output']['total_primary_dither_positions'][0])
outModel[0].header['PATTTYPE'] = self.params['Output']['primary_dither_type']
outModel[0].header['PATT_NUM'] = self.params['Output']['primary_dither_position']
outModel[0].header['NUMDTHPT'] = num_primary_dithers
outModel[0].header['NDITHPTS'] = str(self.params['Output']['total_primary_dither_positions'])
outModel[0].header['PATTSIZE'] = 'DEFAULT'
outModel[0].header['SUBPXTYP'] = self.params['Output']['subpix_dither_type']
outModel[0].header['SUBPXNUM'] = self.params['Output']['subpix_dither_position']
outModel[0].header['SUBPXPNS'] = self.params['Output']['total_subpix_dither_positions']
outModel[0].header['XOFFSET'] = self.params['Output']['xoffset']
outModel[0].header['YOFFSET'] = self.params['Output']['yoffset']
# pixel coordinates in FITS header start from 1 not from 0
xc = (self.subarray_bounds[2]+self.subarray_bounds[0])/2.+1.
yc = (self.subarray_bounds[3]+self.subarray_bounds[1])/2.+1.
outModel[0].header['READPATT'] = self.params['Readout']['readpatt']
# The subarray name needs to come from the "Name" column in the
# subarray definitions dictionary
mtch = self.subdict["AperName"] == self.params["Readout"]['array_name']
outModel[0].header['SUBARRAY'] = str(self.subdict["Name"].data[mtch][0])
# subarray_bounds indexed to zero, but values in header should be
# indexed to 1.
outModel[0].header['SUBSTRT1'] = self.subarray_bounds[0]+1
outModel[0].header['SUBSTRT2'] = self.subarray_bounds[1]+1
outModel[0].header['SUBSIZE1'] = self.subarray_bounds[2]-self.subarray_bounds[0]+1
outModel[0].header['SUBSIZE2'] = self.subarray_bounds[3]-self.subarray_bounds[1]+1
nlrefpix = max(4-self.subarray_bounds[0], 0)
nbrefpix = max(4-self.subarray_bounds[1], 0)
nrrefpix = max(self.subarray_bounds[2]-(self.ffsize-4), 0)
ntrefpix = max(self.subarray_bounds[3]-(self.ffsize-4), 0)
outModel[0].header['NFRAMES'] = self.params['Readout']['nframe']
outModel[0].header['NGROUPS'] = self.params['Readout']['ngroup']
outModel[0].header['NINTS'] = self.params['Readout']['nint']
outModel[0].header['TSAMPLE'] = 10
outModel[0].header['TFRAME'] = self.frametime
outModel[0].header['TGROUP'] = self.frametime * (self.params['Readout']['nframe'] +
self.params['Readout']['nskip'])
outModel[0].header['GROUPGAP'] = self.params['Readout']['nskip']
outModel[0].header['NRSTSTRT'] = 1
outModel[0].header['NRESETS'] = 1
outModel[0].header['EFFINTTM'] = self.rampexptime
outModel[0].header['EFFEXPTM'] = self.rampexptime * self.params['Readout']['nint']
# set the exposure start time as the current time
outModel[0].header['EXPSTART'] = ct.mjd
outModel[0].header['EXPEND'] = ct.mjd + outModel[0].header['EFFEXPTM']/3600./24.
outModel[0].header['EXPMID'] = ct.mjd + outModel[0].header['EFFEXPTM']/3600./24./2.
outModel[0].header['DURATION'] = self.get_duration()
# populate the GROUP extension table
n_int, n_group, n_y, n_x = outModel[1].data.shape
with warnings.catch_warnings():
warnings.simplefilter("ignore")
outModel[groupextnum].data = self.populate_group_table(ct, outModel[0].header['TGROUP'], self.rampexptime,
n_int, n_group, n_y, n_x)
outModel.writeto(filename, overwrite=True)
return filename
def resets_before_exp(self):
"""Find the number of detector resets that happen at the start
of the exposure. This will only be used to get the start times
of the integrations correct in the output file
"""
aperture_type = "sub"
if self.params['Inst']['instrument'].lower() == 'nircam':
if 'full' in self.params['Readout']['array_name'].lower():
aperture_type = "full"
elif self.params['Inst']['instrument'].lower() == 'niriss':
if 'cen' in self.params['Readout']['array_name'].lower():
aperture_type = "full"
elif self.params['Inst']['instrument'].lower() == 'fgs':
if 'full' in self.params['Readout']['array_name'].lower():
aperture_type = "full"
self.num_resets_before_exposure = NUM_RESETS_BEFORE_EXP[self.instrument.lower()][aperture_type]
def get_duration(self):
"""Calcualte the duration time of an exposure, following the JWST keyword
dictionary definition of "duration"
Returns
-------
duration : float
Duration of the exposure in seconds
"""
total_photon_collection_time = self.frametime * ((self.params['Readout']['ngroup'] * self.params['Readout']['nframe'] \
+ (self.params['Readout']['ngroup'] - 1) * self.params['Readout']['nskip']) * self.params['Readout']['nint'])
duration = total_photon_collection_time + self.frametime * (self.num_resets_before_exposure + \
NUM_RESETS_BEFORE_INT[self.instrument.lower()] * (self.params['Readout']['nint'] - 1))
# Kevin says that NIRISS also does a row-by-row reset of the full detector between
# subarray integrations. This will add 10 usec * 2048 rows * (Nints-1)
if self.params['Inst']['instrument'].lower() == 'niriss' and 'CEN' not in self.params['Readout']['array_name']:
duration += 1e-5 * 2048 * (self.params['Readout']['nint'] - 1)
return duration
def seed_mapping(self):
"""Create a mapping of the seed images to the dark data. Take into
account that self.seed can be either a list of filenames or a numpy
array. self.linDark should be a list of filenames.
"""
mapping = {}
if isinstance(self.seed, list):
if len(self.linDark) == len(self.seed):
if len(self.linDark) == 1:
mapping[self.linDark[0]] = self.seed
else:
mapping = self.map_seeds_to_dark()
elif ((len(self.linDark) > 1) and (len(self.seed) == 1)):
for dark_element in self.linDark:
mapping[dark_element] = self.seed
else:
mapping = self.map_seeds_to_dark()
elif isinstance(self.seed, np.ndarray):
for dark_element in self.linDark:
mapping[dark_element] = self.seed
return mapping
def simple_get_image(self, name):
"""Read in an array from a fits file and crop using subarray_bounds
Parameters
----------
name : str
Name of fits file to be read in
Returns
-------
image : numpy.ndarray
Array populated with the file contents
"""
try:
image, header = fits.getdata(name, header=True)
except:
raise FileNotFoundError('WARNING: unable to read in {}'.format(name))
# assume that the input is 2D, since we are using it to build a signal rate frame
imageshape = image.shape
if len(imageshape) != 2:
self.logger.error("Error: image {} is not two-dimensional".format(name))
return None, None
imageshape = image.shape
try:
image = image[self.subarray_bounds[1]:self.subarray_bounds[3]+1,
self.subarray_bounds[0]:self.subarray_bounds[2]+1]
except:
raise ValueError("Unable to crop image from {}".format(name))
return image
def add_options(self, parser=None, usage=None):
if parser is None:
parser = argparse.ArgumentParser(usage=usage,
description='Simulate JWST ramp')
parser.add_argument("paramfile", help=('File describing the input parameters and instrument '
'settings to use. (YAML format).'))
parser.add_argument("linDark", help='File containing linearized dark ramp.')
parser.add_argument("seed", help='File containing seed image and segmentation map')
return parser
if __name__ == '__main__':
usagestring = ('USAGE: obs_generator.py inputs.yaml '
'lindark.fits seedimg.fits')
obs = Observation()
parser = obs.add_options(usage=usagestring)
args = parser.parse_args(namespace=obs)
obs.create()
|
the-stack_106_28363 | from django.http import JsonResponse
from rest_framework import viewsets,mixins
from user.serializers import UserProfileSerializer, User, UserRegisterSerializer
from rest_framework.views import APIView
from django.contrib.auth import logout
from django.views.generic.base import View
from user.models import UserProfile
from dockerapi.common import R
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token
class ListAndUpdateViewSet(mixins.UpdateModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
"""
A viewset that provides default `update()`, `list()`actions.
"""
pass
class UserSet(ListAndUpdateViewSet):
serializer_class = UserProfileSerializer
def get_queryset(self):
if self.request.user.is_superuser:
return UserProfile.objects.all()
else:
return []
def update(self, request, *args, **kwargs):
user = request.user
if not user.is_superuser:
return JsonResponse(R.build(msg="权限不足"))
print(self.get_object())
new_pwd = request.data.get("pwd", "")
new_pwd = new_pwd.strip()
print(new_pwd)
if len(new_pwd) < 6:
return JsonResponse(R.build(msg="密码格式不正确"))
user_info = self.get_object()
user_info.set_password(new_pwd)
user_info.save()
return JsonResponse(R.ok())
class get_user_info(APIView):
def get(self, request):
user_info = User.objects.get(pk=request.user.id)
serializer = UserProfileSerializer(user_info)
return JsonResponse(serializer.data)
class LogoutView(View):
def get(self, request):
logout(request)
return JsonResponse({"msg": "OK"})
class UserRegView(viewsets.mixins.CreateModelMixin,viewsets.GenericViewSet):
authentication_classes = []
permission_classes = []
queryset = UserProfile.objects.all()
serializer_class = UserRegisterSerializer
|
the-stack_106_28364 | # pytorch re-implementation of
# https://github.com/xuqiantong/GAN-Metrics/blob/master/framework/metric.py
# https://github.com/stevenygd/PointFlow/blob/master/metrics/evaluation_metrics.py
import torch
from tqdm import tqdm
from .distance import chamfer_distance, earth_mover_distance
def compute_emd(pcs_1, pcs_2):
B, N_1, N_2 = pcs_1.size(0), pcs_1.size(1), pcs_2.size(1)
assert N_1 == N_2
emd = earth_mover_distance(pcs_1, pcs_2) # (B,)
emd_norm = emd / float(N_1) # (B,)
return emd_norm
def compute_cd(pcs_1, pcs_2):
dl, dr = chamfer_distance(pcs_1, pcs_2)
return dl.mean(dim=1) + dr.mean(dim=1)
def _pairwise_distance(pcs_1, pcs_2, batch_size, metrics=("cd", "emd"), verbose=True):
B_1 = pcs_1.size(0)
B_2 = pcs_2.size(0)
device = pcs_1.device
distance = {}
for key in metrics:
distance[key] = torch.zeros(B_1, B_2, device=device)
for i in tqdm(
range(B_1),
desc="distance matrix {}".format(str(metrics)),
leave=False,
disable=not verbose,
):
for j in range(0, B_2, batch_size):
# The size of 'batch_2' may be not 'batch_size'
batch_2 = pcs_2[j : j + batch_size]
batch_1 = pcs_1[[i]].expand(batch_2.size(0), -1, -1)
if "cd" in metrics:
dist_cd = compute_cd(batch_1, batch_2)
distance["cd"][i, j : j + batch_size] = dist_cd
if "emd" in metrics:
dist_emd = compute_emd(batch_1, batch_2)
distance["emd"][i, j : j + batch_size] = dist_emd
return distance
def _compute_cov_mmd(M_rg):
N_ref, N_gen = M_rg.shape
mmd_gen, min_idx_gen = M_rg.min(dim=0)
mmd_ref, _ = M_rg.min(dim=1)
mmd = mmd_ref.mean().item()
mmd_gen = mmd_gen.mean().item()
cov = float(len(torch.unique(min_idx_gen))) / float(N_ref)
return {
"mmd": mmd,
"mmd-sample": mmd_gen,
"cov": cov,
}
def _compute_nna(M_rr, M_rg, M_gg, k, sqrt=False):
N_ref, N_gen = M_rg.shape
device = M_rg.device
label_ref = torch.ones(N_ref, device=device)
label_gen = torch.zeros(N_gen, device=device)
label = torch.cat([label_ref, label_gen], dim=0)
# matrix for leave-one-out
M_ref = torch.cat((M_rr, M_rg), dim=1)
M_gen = torch.cat((M_rg.t(), M_gg), dim=1)
M = torch.cat([M_ref, M_gen], dim=0) # (N_r+N_g, N_r+N_g)
M = M.abs().sqrt() if sqrt else M
M = M + torch.diag(float("inf") * torch.ones_like(label))
_, idx = M.topk(k=k, dim=0, largest=False) # idx.shape is (k, N_r+N_g)
# vote & classify
count = torch.zeros_like(label)
for i in range(0, k):
count = count + label.index_select(0, idx[i])
pred = (count / k >= 0.5).float()
s = {
"tp": (pred * label).sum().item(),
"fp": (pred * (1 - label)).sum().item(),
"fn": ((1 - pred) * label).sum().item(),
"tn": ((1 - pred) * (1 - label)).sum().item(),
}
s.update(
{
"precision": s["tp"] / (s["tp"] + s["fp"] + 1e-10),
"recall": s["tp"] / (s["tp"] + s["fn"] + 1e-10),
"accuracy_t": s["tp"] / (s["tp"] + s["fn"] + 1e-10),
"accuracy_f": s["tn"] / (s["tn"] + s["fp"] + 1e-10),
"accuracy": torch.eq(label, pred).float().mean().item(),
}
)
return s
@torch.no_grad()
def compute_cov_mmd_1nna(
pcs_gen, pcs_ref, batch_size, metrics=("cd", "emd"), verbose=True
):
assert isinstance(metrics, tuple)
results = {}
M_rr = _pairwise_distance(pcs_ref, pcs_ref, batch_size, metrics, verbose)
M_rg = _pairwise_distance(pcs_ref, pcs_gen, batch_size, metrics, verbose)
M_gg = _pairwise_distance(pcs_gen, pcs_gen, batch_size, metrics, verbose)
for metric in metrics:
# COV and MMD
scores_mmd_cov = _compute_cov_mmd(M_rg[metric])
for k, v in scores_mmd_cov.items():
results.update({"{}-{}".format(k, metric): v})
# 1-NNA
scores_1nna = _compute_nna(
M_rr[metric],
M_rg[metric],
M_gg[metric],
k=1,
sqrt=False,
)
for k, v in scores_1nna.items():
results.update({"1-nn-{}-{}".format(k, metric): v})
return results
if __name__ == "__main__":
torch.set_grad_enabled(False)
a = torch.rand(100, 2048, 3).cuda()
b = torch.rand_like(a)
r = compute_cov_mmd_1nna(a, b, 512, ("cd", "emd"))
for k, v in r.items():
print(k.rjust(20), v)
|
the-stack_106_28365 | from __future__ import absolute_import, division, print_function
import re
from unittest import TestCase
from webob import Request, Response
from webtest import TestApp, TestRequest
from manhattan.middleware import ManhattanMiddleware
from manhattan.record import Record
from manhattan.log.memory import MemoryLog
class SampleApp(object):
def __call__(self, environ, start_response):
req = Request(environ)
if req.path_info.endswith('.txt'):
s = 'Hello %s' % req.path_info
resp = Response(s)
resp.content_type = 'text/plain'
elif req.path_info.endswith('.iter'):
resp = Response()
s = 'Hello %s' % req.path_info.encode('ascii')
def app_iter(sample):
for piece in ('<html><body>', sample, '</body>', '</html>'):
yield piece
self.consumed_iter = True
yield ' '
self.consumed_iter = False
resp.content_type = 'text/html'
resp.app_iter = app_iter(s)
else:
s = '<html><body><h1>Hello %s</h1></body></html>' % req.path_info
resp = Response(s)
resp.content_type = 'text/html'
return resp(environ, start_response)
log = MemoryLog()
host_map = {'localhost': 3,
'example.com': 5}
inner_app = SampleApp()
wrapped_app = ManhattanMiddleware(inner_app, log, 'secret', host_map=host_map)
app = TestApp(wrapped_app)
class TestMiddleware(TestCase):
def setUp(self):
app.reset()
log.purge()
def process(self):
records = list(log.process())
self.assertEqual(len(records), 1)
record = Record.from_list(records[0][0])
return record
def test_request(self):
resp = app.get('/')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertEqual(record.site_id, '3')
first_vid = record.vid
m = re.search('<img (.+)src="(.+)" alt="" />', resp.body)
pixel_path = m.group(2)
resp = app.get(pixel_path)
self.assertEqual(resp.content_type, 'image/gif')
record = self.process()
self.assertEqual(record.key, 'pixel')
self.assertEqual(record.site_id, '3')
self.assertEqual(first_vid, record.vid)
resp = app.get('/foo')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/foo'))
self.assertEqual(record.site_id, '3')
self.assertEqual(first_vid, record.vid)
def test_host_map(self):
resp = app.get('/hello', extra_environ={'HTTP_HOST': 'example.com'})
self.assertEqual(resp.content_type, 'text/html')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/hello'))
self.assertEqual(record.site_id, '5')
def test_unknown_host(self):
resp = app.get('/somepage',
extra_environ={'HTTP_HOST':
'supercalifragilicious.com'})
self.assertEqual(resp.content_type, 'text/html')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/somepage'))
self.assertEqual(record.site_id, '0')
def test_pixel_req(self):
resp = app.get('/vpixel.gif')
self.assertEqual(resp.content_type, 'image/gif',
'An html response should have a pixel tag.')
def test_non_html_pixel(self):
resp = app.get('/non-html-page.txt')
self.assertNotIn('/vpixel.gif', resp.body,
'A non-html response should not have a pixel tag.')
def test_generator_response(self):
req = Request.blank('/quux.iter')
resp = req.get_response(wrapped_app)
self.assertFalse(inner_app.consumed_iter,
'The generator response has been buffered by '
'middleware before instead of being returned as an '
'iterable.')
self.assertIn('/vpixel.gif', resp.body)
self.assertTrue(inner_app.consumed_iter)
def test_latin1_user_agent(self):
# Example user agent is latin1-encoded, so should be preserved.
sample_ua = '\xc0 \xe0 hello'
app.get('/somepage', extra_environ={'HTTP_USER_AGENT': sample_ua})
record = self.process()
self.assertEqual(record.user_agent, sample_ua.decode('latin1'))
def test_nongetpost_methods_not_processed(self):
app.put('/somepage')
app.delete('/somepage')
app.options('/somepage')
records = list(log.process())
self.assertEqual(len(records), 0)
def test_safari_top_sites_not_counted(self):
app.get('/blah', headers={'X-Purpose': 'preview'})
records = list(log.process())
self.assertEqual(len(records), 0)
def test_signature_mangled(self):
app.get('/')
orig_cookie = app.cookies['manhattan']
# truncate the last 4 chars, which will blow the sig
bad_cookie = orig_cookie[:-4]
bad_request = TestRequest.blank('/', cookies={'manhattan': bad_cookie})
app.request(bad_request)
new_cookie = app.cookies['manhattan']
self.assertNotEqual(bad_cookie, new_cookie)
|
the-stack_106_28367 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import importlib
import inspect
import json
import os
import pdb
import re
import sys
import unittest
import traceback
from datetime import datetime
from collections import OrderedDict
# This ensures that absolute imports of typ modules will work when
# running typ/runner.py as a script even if typ is not installed.
# We need this entry in addition to the one in __main__.py to ensure
# that typ/runner.py works when invoked via subprocess on windows in
# _spawn_main().
path_to_file = os.path.realpath(__file__)
if path_to_file.endswith('.pyc'): # pragma: no cover
path_to_file = path_to_file[:-1]
dir_above_typ = os.path.dirname(os.path.dirname(path_to_file))
dir_cov = os.path.join(os.path.dirname(dir_above_typ), 'coverage')
for path in (dir_above_typ, dir_cov):
if path not in sys.path: # pragma: no cover
sys.path.append(path)
from typ import artifacts
from typ import json_results
from typ import result_sink
from typ.arg_parser import ArgumentParser
from typ.expectations_parser import TestExpectations, Expectation
from typ.host import Host
from typ.pool import make_pool
from typ.stats import Stats
from typ.printer import Printer
from typ.test_case import TestCase as TypTestCase
from typ.version import VERSION
Result = json_results.Result
ResultSet = json_results.ResultSet
ResultType = json_results.ResultType
FailureReason = json_results.FailureReason
# Matches the first line of stack entries in formatted Python tracebacks.
# The first capture group is the name of the file, the second is the line.
# The method name is not extracted.
# See: https://github.com/python/cpython/blob/3.10/Lib/traceback.py#L440
_TRACEBACK_FILE_RE = re.compile(r'^ File "[^"]*[/\\](.*)", line ([0-9]+), in ')
def main(argv=None, host=None, win_multiprocessing=None, **defaults):
host = host or Host()
runner = Runner(host=host)
if win_multiprocessing is not None:
runner.win_multiprocessing = win_multiprocessing
return runner.main(argv, **defaults)
class TestInput(object):
def __init__(self, name, msg='', timeout=None, expected=None, iteration=0):
self.name = name
self.msg = msg
self.timeout = timeout
self.expected = expected
# Iteration makes more sense as part of the test run, not the test
# input, but since the pool used to run tests persists across
# iterations, we need to store the iteration number in something that
# gets updated each test run, such as TestInput.
self.iteration = iteration
class TestSet(object):
def __init__(self, test_name_prefix='', iteration=0):
self.test_name_prefix = test_name_prefix
self.parallel_tests = []
self.isolated_tests = []
self.tests_to_skip = []
self.iteration = iteration
def copy(self):
test_set = TestSet(self.test_name_prefix)
test_set.tests_to_skip = self.tests_to_skip[:]
test_set.isolated_tests = self.isolated_tests[:]
test_set.parallel_tests = self.parallel_tests[:]
return test_set
def _get_test_name(self, test_case):
_validate_test_starts_with_prefix(
self.test_name_prefix, test_case.id())
return test_case.id()[len(self.test_name_prefix):]
def add_test_to_skip(self, test_case, reason=''):
self.tests_to_skip.append(
TestInput(self._get_test_name(
test_case), reason, iteration=self.iteration))
def add_test_to_run_isolated(self, test_case):
self.isolated_tests.append(
TestInput(self._get_test_name(test_case), iteration=self.iteration))
def add_test_to_run_in_parallel(self, test_case):
self.parallel_tests.append(
TestInput(self._get_test_name(test_case), iteration=self.iteration))
def _validate_test_starts_with_prefix(prefix, test_name):
assert test_name.startswith(prefix), (
'The test prefix passed at the command line does not match the prefix '
'of all the tests generated')
class WinMultiprocessing(object):
ignore = 'ignore'
importable = 'importable'
spawn = 'spawn'
values = [ignore, importable, spawn]
class _AddTestsError(Exception):
pass
class Runner(object):
def __init__(self, host=None):
self.args = None
self.classifier = None
self.cov = None
self.context = None
self.coverage_source = None
self.host = host or Host()
self.loader = unittest.loader.TestLoader()
self.printer = None
self.setup_fn = None
self.stats = None
self.teardown_fn = None
self.top_level_dir = None
self.top_level_dirs = []
self.win_multiprocessing = WinMultiprocessing.spawn
self.final_responses = []
self.has_expectations = False
self.expectations = None
self.metadata = {}
self.path_delimiter = json_results.DEFAULT_TEST_SEPARATOR
self.artifact_output_dir = None
# initialize self.args to the defaults.
parser = ArgumentParser(self.host)
self.parse_args(parser, [])
def main(self, argv=None, **defaults):
parser = ArgumentParser(self.host)
self.parse_args(parser, argv, **defaults)
if parser.exit_status is not None:
return parser.exit_status
try:
ret, _, _ = self.run()
return ret
except KeyboardInterrupt:
self.print_("interrupted, exiting", stream=self.host.stderr)
return 130
def parse_args(self, parser, argv, **defaults):
for attrname in defaults:
if not hasattr(self.args, attrname):
parser.error("Unknown default argument name '%s'" % attrname,
bailout=False)
return
parser.set_defaults(**defaults)
self.args = parser.parse_args(args=argv)
if parser.exit_status is not None:
return
def print_(self, msg='', end='\n', stream=None):
self.host.print_(msg, end, stream=stream)
def run(self, test_set=None):
ret = 0
h = self.host
if self.args.version:
self.print_(VERSION)
return ret, None, None
if self.args.write_full_results_to:
self.artifact_output_dir = os.path.join(
os.path.dirname(
self.args.write_full_results_to), 'artifacts')
should_spawn = self._check_win_multiprocessing()
if should_spawn:
return self._spawn(test_set)
ret = self._set_up_runner()
if ret:
return ret, None, None
find_start = h.time()
if self.cov: # pragma: no cover
self.cov.erase()
self.cov.start()
full_results = None
result_set = ResultSet()
if not test_set:
ret, test_set = self.find_tests(self.args)
find_end = h.time()
if not ret:
self.stats.total = (len(test_set.parallel_tests) +
len(test_set.isolated_tests) +
len(test_set.tests_to_skip)) * self.args.repeat
all_tests = [ti.name for ti in
_sort_inputs(test_set.parallel_tests +
test_set.isolated_tests +
test_set.tests_to_skip)]
self.metadata = {tup[0]:tup[1]
for tup in
[md.split('=', 1) for md in self.args.metadata]}
if self.args.test_name_prefix:
self.metadata['test_name_prefix'] = self.args.test_name_prefix
if self.args.tags:
self.metadata['tags'] = self.args.tags
if self.args.expectations_files:
self.metadata['expectations_files'] = [
os.path.basename(exp)
if not self.args.repository_absolute_path
else ('//' + os.path.relpath(
exp, self.args.repository_absolute_path).replace(
os.path.sep, '/'))
for exp in self.args.expectations_files]
if self.args.list_only:
self.print_('\n'.join(all_tests))
else:
self.print_('Start running tests: %s' % str(datetime.now()))
for _ in range(self.args.repeat):
current_ret, full_results=self._run_tests(
result_set, test_set.copy(), all_tests)
ret = ret or current_ret
if self.cov: # pragma: no cover
self.cov.stop()
self.cov.save()
test_end = h.time()
trace = self._trace_from_results(result_set)
if full_results:
self._summarize(full_results)
self._write(self.args.write_full_results_to, full_results)
upload_ret = self._upload(full_results)
if not ret:
ret = upload_ret
reporting_end = h.time()
self._add_trace_event(trace, 'run', find_start, reporting_end)
self._add_trace_event(trace, 'discovery', find_start, find_end)
self._add_trace_event(trace, 'testing', find_end, test_end)
self._add_trace_event(trace, 'reporting', test_end, reporting_end)
self._write(self.args.write_trace_to, trace)
self.report_coverage()
else:
upload_ret = 0
return ret, full_results, trace
def _check_win_multiprocessing(self):
wmp = self.win_multiprocessing
ignore, importable, spawn = WinMultiprocessing.values
if wmp not in WinMultiprocessing.values:
raise ValueError('illegal value %s for win_multiprocessing' %
wmp)
h = self.host
if wmp == ignore and h.platform == 'win32': # pragma: win32
raise ValueError('Cannot use WinMultiprocessing.ignore for '
'win_multiprocessing when actually running '
'on Windows.')
if wmp == ignore or self.args.jobs == 1:
return False
if wmp == importable:
if self._main_is_importable():
return False
raise ValueError('The __main__ module (%s) ' # pragma: no cover
'may not be importable' %
sys.modules['__main__'].__file__)
assert wmp == spawn
return True
def _main_is_importable(self): # pragma: untested
path = sys.modules['__main__'].__file__
if not path:
return False
if path.endswith('.pyc'):
path = path[:-1]
if not path.endswith('.py'):
return False
if path.endswith('__main__.py'):
# main modules are not directly importable.
return False
path = self.host.realpath(path)
for d in sys.path:
if path.startswith(self.host.realpath(d)):
return True
return False # pragma: no cover
def _spawn(self, test_set):
# TODO: Handle picklable hooks, rather than requiring them to be None.
assert self.classifier is None
assert self.context is None
assert self.setup_fn is None
assert self.teardown_fn is None
assert test_set is None
h = self.host
if self.args.write_trace_to: # pragma: untested
should_delete_trace = False
else:
should_delete_trace = True
fp = h.mktempfile(delete=False)
fp.close()
self.args.write_trace_to = fp.name
if self.args.write_full_results_to: # pragma: untested
should_delete_results = False
else:
should_delete_results = True
fp = h.mktempfile(delete=False)
fp.close()
self.args.write_full_results_to = fp.name
argv = ArgumentParser(h).argv_from_args(self.args)
ret = h.call_inline([h.python_interpreter, path_to_file] + argv)
trace = self._read_and_delete(self.args.write_trace_to,
should_delete_trace)
full_results = self._read_and_delete(self.args.write_full_results_to,
should_delete_results)
return ret, full_results, trace
def _set_up_runner(self):
h = self.host
args = self.args
self.stats = Stats(args.status_format, h.time, args.jobs)
self.printer = Printer(
self.print_, args.overwrite, args.terminal_width)
if self.args.top_level_dirs and self.args.top_level_dir:
self.print_(
'Cannot specify both --top-level-dir and --top-level-dirs',
stream=h.stderr)
return 1
self.top_level_dirs = args.top_level_dirs
if not self.top_level_dirs and args.top_level_dir:
self.top_level_dirs = [args.top_level_dir]
if not self.top_level_dirs:
for test in [t for t in args.tests if h.exists(t)]:
if h.isdir(test):
top_dir = test
else:
top_dir = h.dirname(test)
while h.exists(top_dir, '__init__.py'):
top_dir = h.dirname(top_dir)
top_dir = h.realpath(top_dir)
if not top_dir in self.top_level_dirs:
self.top_level_dirs.append(top_dir)
if not self.top_level_dirs:
top_dir = h.getcwd()
while h.exists(top_dir, '__init__.py'):
top_dir = h.dirname(top_dir)
top_dir = h.realpath(top_dir)
self.top_level_dirs.append(top_dir)
if not self.top_level_dir and self.top_level_dirs:
self.top_level_dir = self.top_level_dirs[0]
for path in self.top_level_dirs:
h.add_to_path(path)
for path in args.path:
h.add_to_path(path)
if args.coverage: # pragma: no cover
try:
import coverage
except ImportError:
self.print_('Error: coverage is not installed.')
return 1
source = self.args.coverage_source
if not source:
source = self.top_level_dirs + self.args.path
self.coverage_source = source
self.cov = coverage.coverage(source=self.coverage_source,
data_suffix=True)
self.cov.erase()
if args.expectations_files:
ret = self.parse_expectations()
if ret:
return ret
elif args.tags:
self.print_('Error: tags require expectations files.')
return 1
return 0
def parse_expectations(self):
args = self.args
if len(args.expectations_files) != 1:
# TODO(crbug.com/835690): Fix this.
self.print_(
'Only a single expectation file is currently supported',
stream=self.host.stderr)
return 1
contents = self.host.read_text_file(args.expectations_files[0])
expectations = TestExpectations(set(args.tags), args.ignored_tags)
err, msg = expectations.parse_tagged_list(
contents, args.expectations_files[0])
if err:
self.print_(msg, stream=self.host.stderr)
return err
self.has_expectations = True
self.expectations = expectations
def find_tests(self, args):
test_set = TestSet(self.args.test_name_prefix)
orig_skip = unittest.skip
orig_skip_if = unittest.skipIf
if args.all:
unittest.skip = lambda reason: lambda x: x
unittest.skipIf = lambda condition, reason: lambda x: x
try:
names = self._name_list_from_args(args)
classifier = self.classifier or self.default_classifier
for name in names:
try:
self._add_tests_to_set(test_set, args.suffixes,
self.top_level_dirs, classifier,
name)
except (AttributeError, ImportError, SyntaxError) as e:
ex_str = traceback.format_exc()
self.print_('Failed to load "%s" in find_tests: %s' %
(name, e))
self.print_(' %s' %
'\n '.join(ex_str.splitlines()))
self.print_(ex_str)
return 1, None
except _AddTestsError as e:
self.print_(str(e))
return 1, None
# TODO: Add support for discovering setupProcess/teardownProcess?
shard_index = args.shard_index
total_shards = args.total_shards
assert total_shards >= 1
assert shard_index >= 0 and shard_index < total_shards, (
'shard_index (%d) must be >= 0 and < total_shards (%d)' %
(shard_index, total_shards))
test_set.parallel_tests = _sort_inputs(
test_set.parallel_tests)[shard_index::total_shards]
test_set.isolated_tests = _sort_inputs(
test_set.isolated_tests)[shard_index::total_shards]
test_set.tests_to_skip = _sort_inputs(
test_set.tests_to_skip)[shard_index::total_shards]
return 0, test_set
finally:
unittest.skip = orig_skip
unittest.skipIf = orig_skip_if
def _name_list_from_args(self, args):
if args.tests:
names = args.tests
elif args.file_list:
if args.file_list == '-':
s = self.host.stdin.read()
else:
s = self.host.read_text_file(args.file_list)
names = [line.strip() for line in s.splitlines()]
else:
names = self.top_level_dirs
return names
def _add_tests_to_set(self, test_set, suffixes, top_level_dirs, classifier,
name):
h = self.host
loader = self.loader
add_tests = _test_adder(test_set, classifier)
found = set()
for d in top_level_dirs:
if h.isfile(name):
rpath = h.relpath(name, d)
if rpath.startswith('..'):
continue
if rpath.endswith('.py'):
rpath = rpath[:-3]
module = rpath.replace(h.sep, '.')
if module not in found:
found.add(module)
add_tests(loader.loadTestsFromName(module))
elif h.isdir(name):
rpath = h.relpath(name, d)
if rpath.startswith('..'):
continue
for suffix in suffixes:
if not name in found:
found.add(name + '/' + suffix)
add_tests(loader.discover(name, suffix, d))
else:
possible_dir = name.replace('.', h.sep)
if h.isdir(d, possible_dir):
for suffix in suffixes:
path = h.join(d, possible_dir)
if not path in found:
found.add(path + '/' + suffix)
suite = loader.discover(path, suffix, d)
add_tests(suite)
elif not name in found:
found.add(name)
add_tests(loader.loadTestsFromName(
self.args.test_name_prefix + name))
# pylint: disable=no-member
if hasattr(loader, 'errors') and loader.errors: # pragma: python3
# In Python3's version of unittest, loader failures get converted
# into failed test cases, rather than raising exceptions. However,
# the errors also get recorded so you can err out immediately.
if isinstance(loader.errors, list):
raise ImportError('\n'.join(loader.errors))
raise ImportError(loader.errors)
def _run_tests(self, result_set, test_set, all_tests):
h = self.host
self.last_runs_retry_on_failure_tests = set()
def get_tests_to_retry(results):
# If the --retry-only-retry-on-failure-tests command line argument
# is passed , then a set of test failures with the RetryOnFailure
# expectation from the last run of tests will be returned. The
# self.last_runs_retry_on_failure_tests will be set to an empty set
# for the next run of tests. Otherwise all regressions from the
# last run will be returned.
if self.args.retry_only_retry_on_failure_tests:
ret = self.last_runs_retry_on_failure_tests.copy()
self.last_runs_retry_on_failure_tests = set()
return ret
else:
return json_results.regressions(results)
if len(test_set.parallel_tests):
jobs = min(
len(test_set.parallel_tests), self.args.jobs)
else:
jobs = 1
child = _Child(self)
pool = make_pool(h, jobs, _run_one_test, child,
_setup_process, _teardown_process)
self._run_one_set(self.stats, result_set, test_set, jobs, pool)
tests_to_retry = sorted(get_tests_to_retry(result_set))
retry_limit = self.args.retry_limit
try:
# Start at 1 since we already did iteration 0 above.
for iteration in range(1, self.args.retry_limit + 1):
if not tests_to_retry:
break
if retry_limit == self.args.retry_limit:
self.flush()
self.args.overwrite = False
self.printer.should_overwrite = False
self.args.verbose = min(self.args.verbose, 1)
self.print_('')
self.print_('Retrying failed tests (attempt #%d of %d)...' %
(iteration, self.args.retry_limit))
self.print_('')
stats = Stats(self.args.status_format, h.time, 1)
stats.total = len(tests_to_retry)
test_set = TestSet(self.args.test_name_prefix)
test_set.isolated_tests = [
TestInput(name,
iteration=iteration) for name in tests_to_retry]
tests_to_retry = test_set
retry_set = ResultSet()
self._run_one_set(stats, retry_set, tests_to_retry, 1, pool)
result_set.results.extend(retry_set.results)
tests_to_retry = get_tests_to_retry(retry_set)
retry_limit -= 1
pool.close()
finally:
self.final_responses.extend(pool.join())
if retry_limit != self.args.retry_limit:
self.print_('')
full_results = json_results.make_full_results(self.metadata,
int(h.time()),
all_tests, result_set,
self.path_delimiter)
retcode = (json_results.exit_code_from_full_results(full_results)
| result_sink.result_sink_retcode_from_result_set(result_set))
return (retcode, full_results)
def _run_one_set(self, stats, result_set, test_set, jobs, pool):
self._skip_tests(stats, result_set, test_set.tests_to_skip)
self._run_list(stats, result_set,
test_set.parallel_tests, jobs, pool)
self._run_list(stats, result_set,
test_set.isolated_tests, 1, pool)
def _skip_tests(self, stats, result_set, tests_to_skip):
for test_input in tests_to_skip:
last = self.host.time()
stats.started += 1
self._print_test_started(stats, test_input)
now = self.host.time()
result = Result(test_input.name, actual=ResultType.Skip,
started=last, took=(now - last), worker=0,
expected=[ResultType.Skip],
out=test_input.msg)
result_set.add(result)
stats.finished += 1
self._print_test_finished(stats, result)
def _run_list(self, stats, result_set, test_inputs, jobs, pool):
running_jobs = set()
while test_inputs or running_jobs:
while test_inputs and (len(running_jobs) < jobs):
test_input = test_inputs.pop(0)
stats.started += 1
pool.send(test_input)
running_jobs.add(test_input.name)
self._print_test_started(stats, test_input)
result, should_retry_on_failure = pool.get()
if result.is_regression:
stats.failed += 1
if (self.args.typ_max_failures is not None
and stats.failed >= self.args.typ_max_failures):
print('\nAborting, waiting for processes to close')
pool.close()
pool.join()
raise RuntimeError(
'Encountered %d failures with max of %d set, aborting.' % (
stats.failed, self.args.typ_max_failures))
if (self.args.retry_only_retry_on_failure_tests and
result.actual == ResultType.Failure and
should_retry_on_failure):
self.last_runs_retry_on_failure_tests.add(result.name)
running_jobs.remove(result.name)
result_set.add(result)
stats.finished += 1
self._print_test_finished(stats, result)
def _print_test_started(self, stats, test_input):
if self.args.quiet:
# Print nothing when --quiet was passed.
return
# If -vvv was passed, print when the test is queued to be run.
# We don't actually know when the test picked up to run, because
# that is handled by the child process (where we can't easily
# print things). Otherwise, only print when the test is started
# if we know we can overwrite the line, so that we do not
# get multiple lines of output as noise (in -vvv, we actually want
# the noise).
test_start_msg = stats.format() + test_input.name
if self.args.verbose > 2:
self.update(test_start_msg + ' queued', elide=False)
if self.args.overwrite:
self.update(test_start_msg, elide=(not self.args.verbose))
def _print_test_finished(self, stats, result):
stats.add_time()
assert result.actual in [ResultType.Failure, ResultType.Skip,
ResultType.Pass]
if result.actual == ResultType.Failure:
result_str = ' failed'
elif result.actual == ResultType.Skip:
result_str = ' was skipped'
elif result.actual == ResultType.Pass:
result_str = ' passed'
if result.unexpected:
result_str += ' unexpectedly'
elif result.actual == ResultType.Failure:
result_str += ' as expected'
if self.args.timing:
timing_str = ' %.4fs' % result.took
else:
timing_str = ''
suffix = '%s%s' % (result_str, timing_str)
out = result.out
err = result.err
if result.is_regression:
if out or err:
suffix += ':\n'
self.update(stats.format() + result.name + suffix, elide=False)
for l in out.splitlines():
self.print_(' %s' % l)
for l in err.splitlines():
self.print_(' %s' % l)
elif not self.args.quiet:
if self.args.verbose > 1 and (out or err):
suffix += ':\n'
self.update(stats.format() + result.name + suffix,
elide=(not self.args.verbose))
if self.args.verbose > 1:
for l in out.splitlines():
self.print_(' %s' % l)
for l in err.splitlines():
self.print_(' %s' % l)
if self.args.verbose:
self.flush()
def update(self, msg, elide):
self.printer.update(msg, elide)
def flush(self):
self.printer.flush()
def _summarize(self, full_results):
num_passes = json_results.num_passes(full_results)
num_failures = json_results.num_failures(full_results)
num_skips = json_results.num_skips(full_results)
if self.args.quiet and num_failures == 0:
return
if self.args.timing:
timing_clause = ' in %.1fs' % (self.host.time() -
self.stats.started_time)
else:
timing_clause = ''
self.update('%d test%s passed%s, %d skipped, %d failure%s.' %
(num_passes,
'' if num_passes == 1 else 's',
timing_clause,
num_skips,
num_failures,
'' if num_failures == 1 else 's'), elide=False)
self.print_()
def _read_and_delete(self, path, delete):
h = self.host
obj = None
if h.exists(path):
contents = h.read_text_file(path)
if contents:
obj = json.loads(contents)
if delete:
h.remove(path)
return obj
def _write(self, path, obj):
if path:
self.host.write_text_file(path, json.dumps(obj, indent=2) + '\n')
def _upload(self, full_results):
h = self.host
if not self.args.test_results_server:
return 0
url, content_type, data = json_results.make_upload_request(
self.args.test_results_server, self.args.builder_name,
self.args.master_name, self.args.test_type,
full_results)
try:
h.fetch(url, data, {'Content-Type': content_type})
return 0
except Exception as e:
h.print_('Uploading the JSON results raised "%s"' % str(e))
return 1
def report_coverage(self):
if self.args.coverage: # pragma: no cover
self.host.print_()
import coverage
cov = coverage.coverage(data_suffix=True)
cov.combine()
cov.report(show_missing=self.args.coverage_show_missing,
omit=self.args.coverage_omit)
if self.args.coverage_annotate:
cov.annotate(omit=self.args.coverage_omit)
def _add_trace_event(self, trace, name, start, end):
event = {
'name': name,
'ts': int((start - self.stats.started_time) * 1000000),
'dur': int((end - start) * 1000000),
'ph': 'X',
'pid': self.host.getpid(),
'tid': 0,
}
trace['traceEvents'].append(event)
def _trace_from_results(self, result_set):
trace = OrderedDict()
trace['traceEvents'] = []
trace['otherData'] = {}
if self.metadata:
trace['otherData'] = self.metadata
for result in result_set.results:
started = int((result.started - self.stats.started_time) * 1000000)
took = int(result.took * 1000000)
event = OrderedDict()
event['name'] = result.name
event['dur'] = took
event['ts'] = started
event['ph'] = 'X' # "Complete" events
event['pid'] = result.pid
event['tid'] = result.worker
args = OrderedDict()
args['expected'] = sorted(str(r) for r in result.expected)
args['actual'] = str(result.actual)
args['out'] = result.out
args['err'] = result.err
args['code'] = result.code
args['unexpected'] = result.unexpected
args['flaky'] = result.flaky
args['file'] = result.file_path
args['line'] = result.line_number
event['args'] = args
trace['traceEvents'].append(event)
return trace
def expectations_for(self, test_case):
test_name = test_case.id()[len(self.args.test_name_prefix):]
if self.has_expectations:
return self.expectations.expectations_for(test_name)
else:
return Expectation(test=test_name)
def default_classifier(self, test_set, test):
if self.matches_filter(test):
if self.should_skip(test):
test_set.add_test_to_skip(test, 'skipped by request')
elif self.should_isolate(test):
test_set.add_test_to_run_isolated(test)
else:
test_set.add_test_to_run_in_parallel(test)
def matches_filter(self, test_case):
_validate_test_starts_with_prefix(
self.args.test_name_prefix, test_case.id())
test_name = test_case.id()[len(self.args.test_name_prefix):]
if self.args.test_filter:
return any(
fnmatch.fnmatch(test_name, glob)
for glob in self.args.test_filter.split('::'))
if self.args.partial_match_filter:
return any(
substr in test_name
for substr in self.args.partial_match_filter)
return True
def should_isolate(self, test_case):
_validate_test_starts_with_prefix(
self.args.test_name_prefix, test_case.id())
test_name = test_case.id()[len(self.args.test_name_prefix):]
return any(fnmatch.fnmatch(test_name, glob)
for glob in self.args.isolate)
def should_skip(self, test_case):
_validate_test_starts_with_prefix(
self.args.test_name_prefix, test_case.id())
if self.args.all:
return False
test_name = test_case.id()[len(self.args.test_name_prefix):]
if self.has_expectations:
expected_results = self.expectations.expectations_for(test_name).results
else:
expected_results = {ResultType.Pass}
return (
ResultType.Skip in expected_results or
any(fnmatch.fnmatch(test_name, glob) for glob in self.args.skip))
def _test_adder(test_set, classifier):
def add_tests(obj):
if isinstance(obj, unittest.suite.TestSuite):
for el in obj:
add_tests(el)
elif (obj.id().startswith('unittest.loader.LoadTestsFailure') or
obj.id().startswith('unittest.loader.ModuleImportFailure')):
# Access to protected member pylint: disable=W0212
module_name = obj._testMethodName
try:
method = getattr(obj, obj._testMethodName)
method()
except Exception as e:
if 'LoadTests' in obj.id():
raise _AddTestsError('%s.load_tests() failed: %s'
% (module_name, str(e)))
else:
raise _AddTestsError(str(e))
else:
assert isinstance(obj, unittest.TestCase)
classifier(test_set, obj)
return add_tests
class _Child(object):
def __init__(self, parent):
self.host = None
self.worker_num = None
self.all = parent.args.all
self.debugger = parent.args.debugger
self.coverage = parent.args.coverage and parent.args.jobs > 1
self.coverage_source = parent.coverage_source
self.dry_run = parent.args.dry_run
self.loader = parent.loader
self.passthrough = parent.args.passthrough
self.context = parent.context
self.setup_fn = parent.setup_fn
self.teardown_fn = parent.teardown_fn
self.context_after_setup = None
self.top_level_dir = parent.top_level_dir
self.top_level_dirs = parent.top_level_dirs
self.loaded_suites = {}
self.cov = None
self.has_expectations = parent.has_expectations
self.expectations = parent.expectations
self.test_name_prefix = parent.args.test_name_prefix
self.artifact_output_dir = parent.artifact_output_dir
self.result_sink_reporter = None
self.disable_resultsink = parent.args.disable_resultsink
def _setup_process(host, worker_num, child):
child.host = host
child.result_sink_reporter = result_sink.ResultSinkReporter(
host, child.disable_resultsink)
child.worker_num = worker_num
# pylint: disable=protected-access
if child.coverage: # pragma: no cover
import coverage
child.cov = coverage.coverage(source=child.coverage_source,
data_suffix=True)
child.cov._warn_no_data = False
child.cov.start()
if child.setup_fn:
child.context_after_setup = child.setup_fn(child, child.context)
else:
child.context_after_setup = child.context
return child
def _teardown_process(child):
res = None
exc = None
if child.teardown_fn:
try:
res = child.teardown_fn(child, child.context_after_setup)
except Exception as e:
exc = e
pass
if child.cov: # pragma: no cover
child.cov.stop()
child.cov.save()
return (child.worker_num, res, exc)
def _run_one_test(child, test_input):
h = child.host
pid = h.getpid()
test_name = test_input.name
started = h.time()
# It is important to capture the output before loading the test
# to ensure that
# 1) the loader doesn't logs something we don't captured
# 2) neither the loader nor the test case grab a reference to the
# uncaptured stdout or stderr that later is used when the test is run.
# This comes up when using the FakeTestLoader and testing typ itself,
# but could come up when testing non-typ code as well.
h.capture_output(divert=not child.passthrough)
if child.has_expectations:
expectation = child.expectations.expectations_for(test_name)
expected_results, should_retry_on_failure = (
expectation.results, expectation.should_retry_on_failure)
else:
expected_results, should_retry_on_failure = {ResultType.Pass}, False
ex_str = ''
try:
orig_skip = unittest.skip
orig_skip_if = unittest.skipIf
if child.all:
unittest.skip = lambda reason: lambda x: x
unittest.skipIf = lambda condition, reason: lambda x: x
elif ResultType.Skip in expected_results:
h.restore_output()
return (Result(test_name, ResultType.Skip, started, 0,
child.worker_num, expected=expected_results,
unexpected=False, pid=pid), False)
test_name_to_load = child.test_name_prefix + test_name
try:
# If we have errors around from before, clear them now so we don't
# attempt to handle them later.
if hasattr(child.loader, 'errors') and child.loader.errors:
child.loader.errors.clear()
suite = child.loader.loadTestsFromName(test_name_to_load)
# From Python 3.5, AttributeError will not be thrown when calling
# LoadTestsFromName. Instead, it adds error messages in the loader.
# As a result, the original handling cannot kick in properly. We
# now check the error message and throw exception as needed.
if hasattr(child.loader, 'errors') and child.loader.errors:
if isinstance(child.loader.errors, list):
raise AttributeError('\n'.join(child.loader.errors))
raise AttributeError(child.loader.errors)
except Exception as e:
ex_str = ('loadTestsFromName("%s") failed: %s\n%s\n' %
(test_name_to_load, e, traceback.format_exc()))
try:
suite = _load_via_load_tests(child, test_name_to_load)
ex_str += ('\nload_via_load_tests(\"%s\") returned %d tests\n' %
(test_name_to_load, len(list(suite))))
except Exception as e: # pragma: untested
suite = []
ex_str += ('\nload_via_load_tests("%s") failed: %s\n%s\n' %
(test_name_to_load, e, traceback.format_exc()))
finally:
unittest.skip = orig_skip
unittest.skipIf = orig_skip_if
tests = list(suite)
if len(tests) != 1:
err = 'Failed to load "%s" in run_one_test' % test_name
if ex_str: # pragma: untested
err += '\n ' + '\n '.join(ex_str.splitlines())
h.restore_output()
return (Result(test_name, ResultType.Failure, started, took=0,
worker=child.worker_num, unexpected=True, code=1,
err=err, pid=pid), False)
art = artifacts.Artifacts(
child.artifact_output_dir, h, test_input.iteration, test_name)
test_case = tests[0]
if isinstance(test_case, TypTestCase):
test_case.child = child
test_case.context = child.context_after_setup
test_case.set_artifacts(art)
test_result = unittest.TestResult()
out = ''
err = ''
try:
if child.dry_run:
pass
elif child.debugger: # pragma: no cover
_run_under_debugger(h, test_case, suite, test_result)
else:
suite.run(test_result)
finally:
out, err = h.restore_output()
# Clear the artifact implementation so that later tests don't try to
# use a stale instance.
if isinstance(test_case, TypTestCase):
test_case.set_artifacts(None)
took = h.time() - started
# If the test signaled that it should be retried on failure, do so.
if isinstance(test_case, TypTestCase):
# Handle the case where the test called self.skipTest, e.g. if it
# determined that the test is not valid on the current configuration.
if test_result.skipped and test_case.programmaticSkipIsExpected:
return (Result(test_name, ResultType.Skip, started, took,
child.worker_num, expected={ResultType.Skip},
unexpected=False, pid=pid), False)
should_retry_on_failure = (should_retry_on_failure
or test_case.retryOnFailure)
result = _result_from_test_result(test_result, test_name, started, took, out,
err, child.worker_num, pid, test_case,
expected_results, child.has_expectations,
art.artifacts)
test_location = inspect.getsourcefile(test_case.__class__)
test_method = getattr(test_case, test_case._testMethodName)
# Test methods are often wrapped by decorators such as @mock. Try to get to
# the actual test method instead of the wrapper.
if hasattr(test_method, '__wrapped__'):
test_method = test_method.__wrapped__
# Some tests are generated and don't have valid line numbers. Such test
# methods also have a source location different from module location.
if inspect.getsourcefile(test_method) == test_location:
test_line = inspect.getsourcelines(test_method)[1]
else:
test_line = None
result.result_sink_retcode =\
child.result_sink_reporter.report_individual_test_result(
child.test_name_prefix, result, child.artifact_output_dir,
child.expectations, test_location, test_line)
return (result, should_retry_on_failure)
def _run_under_debugger(host, test_case, suite,
test_result): # pragma: no cover
# Access to protected member pylint: disable=W0212
test_func = getattr(test_case, test_case._testMethodName)
fname = inspect.getsourcefile(test_func)
lineno = inspect.getsourcelines(test_func)[1] + 1
dbg = pdb.Pdb(stdout=host.stdout.stream)
dbg.set_break(fname, lineno)
dbg.runcall(suite.run, test_result)
def _result_from_test_result(test_result, test_name, started, took, out, err,
worker_num, pid, test_case, expected_results,
has_expectations, artifacts):
failure_reason = None
if test_result.failures:
actual = ResultType.Failure
code = 1
err = err + test_result.failures[0][1]
unexpected = actual not in expected_results
for i, failure in enumerate(test_result.failures):
if failure_reason is None:
failure_reason = _failure_reason_from_traceback(failure[1])
elif test_result.errors:
actual = ResultType.Failure
code = 1
err = err + test_result.errors[0][1]
unexpected = actual not in expected_results
for i, error in enumerate(test_result.errors):
if failure_reason is None:
failure_reason = _failure_reason_from_traceback(error[1])
elif test_result.skipped:
actual = ResultType.Skip
err = err + test_result.skipped[0][1]
code = 0
if has_expectations:
unexpected = actual not in expected_results
else:
unexpected = False
expected_results = {ResultType.Skip}
elif test_result.expectedFailures:
actual = ResultType.Failure
code = 1
err = err + test_result.expectedFailures[0][1]
unexpected = False
elif test_result.unexpectedSuccesses:
actual = ResultType.Pass
code = 0
unexpected = True
else:
actual = ResultType.Pass
code = 0
unexpected = actual not in expected_results
flaky = False
test_func = getattr(test_case, test_case._testMethodName)
test_func = getattr(test_func, 'real_test_func', test_func)
file_path = inspect.getsourcefile(test_func)
line_number = inspect.getsourcelines(test_func)[1]
return Result(test_name, actual, started, took, worker_num,
expected_results, unexpected, flaky, code, out, err, pid,
file_path, line_number, artifacts, failure_reason)
def _failure_reason_from_traceback(traceback):
"""Attempts to extract a failure reason from formatted Traceback data.
The formatted traceback data handled by this method is that populated on
unittest.TestResult objects in the errors and/or failures attribute(s).
We reverse this formatting process to obtain the underlying failure
exception message or assertion failure, excluding stacktrace and other
data.
When reading this method, it is useful to read python unittest sources
at the same time, as this reverses some of the formatting defined there.
https://github.com/python/cpython/blob/3.10/Lib/unittest/result.py#L119
https://github.com/python/cpython/blob/3.10/Lib/unittest/result.py#L173
https://github.com/python/cpython/blob/3.10/Lib/traceback.py#L652
This method may not succeed in extracting a failure reason. In this case,
it returns None.
"""
lines = traceback.splitlines()
# Start line index of the interesting region (the line(s) that has
# the assertion failure or exception emssage).
start_index = 0
# End index of the interesting region.
end_index = len(lines)
# The file name and line that raised the exception or assertion failure.
# Formatted as "filename.py(123)".
context_file_line = None
in_traceback = False
for i, line in enumerate(lines):
# Tracebacks precede the interesting message. It is possible
# for there to be multiple tracebacks blocks in case of chained
# exceptions. E.g. "While handling a XYZError, the following
# exception was also raised:". The interesting message is
# after all such chained stacks.
if line == 'Traceback (most recent call last):':
in_traceback = True
start_index = i + 1
context_file_line = None
elif line.startswith(' ') and in_traceback:
# Continuation of traceback.
start_index = i + 1
# Keep track of the last file in the traceback.
file_match = _TRACEBACK_FILE_RE.match(line)
if file_match:
context_file_line = '{}({})'.format(
file_match.group(1),
file_match.group(2))
else:
in_traceback = False
# The "Stdout:" or "Stderr:" blocks (if present) are after the
# interesting failure message.
if line == 'Stdout:' or line == 'Stderr:':
if i < end_index:
end_index = i
interesting_lines = lines[start_index:end_index]
if len(interesting_lines) > 0 and context_file_line is not None:
# Let the failure reason be look like:
# "my_unittest.py(123): AssertionError: unexpectedly None".
#
# We include the file and line of the original exception
# in failure reason, as basic assertion failures
# (true != false, None is not None, etc.) can be too generic
# to be clustered in a useful way without this.
message = '{}: {}'.format(context_file_line,
'\n'.join(interesting_lines).strip())
return FailureReason(message)
return None
def _load_via_load_tests(child, test_name):
# If we couldn't import a test directly, the test may be only loadable
# via unittest's load_tests protocol. See if we can find a load_tests
# entry point that will work for this test.
loader = child.loader
comps = test_name.split('.')
new_suite = unittest.TestSuite()
while comps:
name = '.'.join(comps)
module = None
suite = None
if name not in child.loaded_suites:
try:
module = importlib.import_module(name)
except ImportError:
pass
if module:
suite = loader.loadTestsFromModule(module)
child.loaded_suites[name] = suite
suite = child.loaded_suites[name]
if suite:
for test_case in suite:
assert isinstance(test_case, unittest.TestCase)
if test_case.id() == test_name: # pragma: untested
new_suite.addTest(test_case)
break
comps.pop()
return new_suite
def _sort_inputs(inps):
return sorted(inps, key=lambda inp: inp.name)
if __name__ == '__main__': # pragma: no cover
sys.modules['__main__'].__file__ = path_to_file
sys.exit(main(win_multiprocessing=WinMultiprocessing.importable))
|
the-stack_106_28369 | import csv
import datetime
from django.contrib import admin
from django.http import HttpResponse
from .models import DataLogSheet
def export_to_csv(modeladmin, request, queryset):
opts = modeladmin.model._meta
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;'\
'filename={}.csv'.format(opts.verbose_name)
writer = csv.writer(response)
fields = [field for field in opts.get_fields() if not field.many_to_many
and not field.one_to_many]
# Write a first row with header information
writer.writerow([field.verbose_name for field in fields])
# Write data rows
for obj in queryset:
data_row = []
for field in fields:
value = getattr(obj, field.name)
if isinstance(value, datetime.datetime):
value = value.strftime('%d/%m/%Y')
data_row.append(value)
writer.writerow(data_row)
return response
export_to_csv.short_description = 'Export to CSV'
@admin.register(DataLogSheet)
class DataLogSheetAdmin(admin.ModelAdmin):
list_display = ['reel', 'epi','scn','location', 'clips', 'shot', 'take','note', 'updated']
list_filter = ['epi', 'reel', 'updated']
actions = [export_to_csv]
list_per_page = 10 |
the-stack_106_28370 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pre-process Data / features files and build vocabulary
"""
import argparse
import glob
import sys
import gc
import codecs
import torch
from onmt.utils.logging import init_logger, logger
import onmt.inputters as inputters
import onmt.opts as opts
def check_existing_pt_files(opt):
""" Checking if there are existing .pt files to avoid tampering """
# We will use glob.glob() to find sharded {train|valid}.[0-9]*.pt
# when training, so check to avoid tampering with existing pt files
# or mixing them up.
for t in ['train', 'valid', 'vocab']:
pattern = opt.save_data + '.' + t + '*.pt'
if glob.glob(pattern):
sys.stderr.write("Please backup existing pt file: %s, "
"to avoid tampering!\n" % pattern)
sys.exit(1)
def parse_args():
""" Parsing arguments """
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.preprocess_opts(parser)
opt = parser.parse_args()
torch.manual_seed(opt.seed)
check_existing_pt_files(opt)
return opt
def build_save_in_shards_using_shards_size(src_corpus, tgt_corpus, fields,
corpus_type, opt):
"""
Divide src_corpus and tgt_corpus into smaller multiples
src_copus and tgt corpus files, then build shards, each
shard will have opt.shard_size samples except last shard.
The reason we do this is to avoid taking up too much memory due
to sucking in a huge corpus file.
"""
with codecs.open(src_corpus, "r", encoding="utf-8") as fsrc:
with codecs.open(tgt_corpus, "r", encoding="utf-8") as ftgt:
src_data = fsrc.readlines()
tgt_data = ftgt.readlines()
src_corpus = "".join(src_corpus.split(".")[:-1])
tgt_corpus = "".join(tgt_corpus.split(".")[:-1])
num_shards = int(len(src_data) / opt.shard_size)
for x in range(num_shards):
f = codecs.open(src_corpus + ".{0}.txt".format(x), "w",
encoding="utf-8")
f.writelines(
src_data[x * opt.shard_size: (x + 1) * opt.shard_size])
f.close()
f = codecs.open(tgt_corpus + ".{0}.txt".format(x), "w",
encoding="utf-8")
f.writelines(
tgt_data[x * opt.shard_size: (x + 1) * opt.shard_size])
f.close()
num_written = num_shards * opt.shard_size
if len(src_data) > num_written:
f = codecs.open(src_corpus + ".{0}.txt".format(num_shards),
'w', encoding="utf-8")
f.writelines(
src_data[num_shards * opt.shard_size:])
f.close()
f = codecs.open(tgt_corpus + ".{0}.txt".format(num_shards),
'w', encoding="utf-8")
f.writelines(
tgt_data[num_shards * opt.shard_size:])
f.close()
src_list = sorted(glob.glob(src_corpus + '.*.txt'))
tgt_list = sorted(glob.glob(tgt_corpus + '.*.txt'))
ret_list = []
for index, src in enumerate(src_list):
dataset = inputters.build_dataset(
fields, opt.data_type,
src_path=src,
tgt_path=tgt_list[index],
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
image_channel_size=opt.image_channel_size
)
pt_file = "{:s}.{:s}.{:d}.pt".format(
opt.save_data, corpus_type, index)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
logger.info(" * saving %sth %s data shard to %s."
% (index, corpus_type, pt_file))
torch.save(dataset, pt_file)
ret_list.append(pt_file)
del dataset.examples
gc.collect()
del dataset
gc.collect()
return ret_list
def build_save_dataset(corpus_type, fields, opt):
""" Building and saving the dataset """
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
src_corpus = opt.train_src
tgt_corpus = opt.train_tgt
else:
src_corpus = opt.valid_src
tgt_corpus = opt.valid_tgt
if (opt.shard_size > 0):
return build_save_in_shards_using_shards_size(src_corpus,
tgt_corpus,
fields,
corpus_type,
opt)
# For data_type == 'img' or 'audio', currently we don't do
# preprocess sharding. We only build a monolithic dataset.
# But since the interfaces are uniform, it would be not hard
# to do this should users need this feature.
dataset = inputters.build_dataset(
fields, opt.data_type,
src_path=src_corpus,
tgt_path=tgt_corpus,
src_dir=opt.src_dir,
src_seq_length=opt.src_seq_length,
tgt_seq_length=opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
window_stride=opt.window_stride,
window=opt.window,
image_channel_size=opt.image_channel_size)
# We save fields in vocab.pt seperately, so make it empty.
dataset.fields = []
pt_file = "{:s}.{:s}.pt".format(opt.save_data, corpus_type)
logger.info(" * saving %s dataset to %s." % (corpus_type, pt_file))
torch.save(dataset, pt_file)
return [pt_file]
def build_save_vocab(train_dataset, fields, opt):
""" Building and saving the vocab """
fields = inputters.build_vocab(train_dataset, fields, opt.data_type,
opt.share_vocab,
opt.src_vocab,
opt.src_vocab_size,
opt.src_words_min_frequency,
opt.tgt_vocab,
opt.tgt_vocab_size,
opt.tgt_words_min_frequency)
# Can't save fields, so remove/reconstruct at training time.
vocab_file = opt.save_data + '.vocab.pt'
torch.save(inputters.save_fields_to_vocab(fields), vocab_file)
def main():
opt = parse_args()
if (opt.max_shard_size > 0):
raise AssertionError("-max_shard_size is deprecated, please use \
-shard_size (number of examples) instead.")
init_logger(opt.log_file)
logger.info("Extracting features...")
src_nfeats = inputters.get_num_features(
opt.data_type, opt.train_src, 'src')
tgt_nfeats = inputters.get_num_features(
opt.data_type, opt.train_tgt, 'tgt')
logger.info(" * number of source features: %d." % src_nfeats)
logger.info(" * number of target features: %d." % tgt_nfeats)
logger.info("Building `Fields` object...")
fields = inputters.get_fields(opt.data_type, src_nfeats, tgt_nfeats)
logger.info("Building & saving training data...")
train_dataset_files = build_save_dataset('train', fields, opt)
logger.info("Building & saving validation data...")
valid_dataset_files = build_save_dataset('valid', fields, opt)
logger.info("Building & saving vocabulary...")
build_save_vocab(train_dataset_files + valid_dataset_files, fields, opt)
if __name__ == "__main__":
main()
|
the-stack_106_28372 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'limin'
'''
菲阿里四价 策略(日内突破策略, 在每日收盘前对所持合约进行平仓)
参考: https://www.shinnytech.com/blog/fairy-four-price/
注: 该示例策略仅用于功能示范, 实盘时请根据自己的策略/经验进行修改
'''
from tqsdk import TqApi, TqAuth, TargetPosTask
from datetime import datetime
import time
symbol = "SHFE.ni2012" # 合约代码
close_hour, close_minute = 14, 50 # 平仓时间
api = TqApi(auth=TqAuth("信易账户", "账户密码")) # 使用模拟帐号直连行情和交易服务器
quote = api.get_quote(symbol) # 获取指定合约的盘口行情
klines = api.get_kline_serial(symbol, 24 * 60 * 60) # 获取日线
position = api.get_position(symbol) # 持仓信息
target_pos = TargetPosTask(api, symbol) # 目标持仓
top_rail = klines.high.iloc[-2] # 上轨: 昨日高点
bottom_rail = klines.low.iloc[-2] # 下轨: 昨日低点
print("上轨:", top_rail, ",下轨:", bottom_rail, ",昨日收盘价:", klines.close.iloc[-2], ",今日开盘价:", klines.open.iloc[-1])
while True:
api.wait_update()
if api.is_changing(klines.iloc[-1], "datetime"): # 如果产生一根新日线 (即到达下一个交易日): 重新获取上下轨
top_rail = klines.high.iloc[-2]
bottom_rail = klines.low.iloc[-2]
print("上轨:", top_rail, ",下轨:", bottom_rail, ",昨日收盘价:", klines.close.iloc[-2], ",今日开盘价:", klines.open.iloc[-1])
if api.is_changing(quote, "last_price"): # 如果行情最新价发生变化
print("当前最新价", quote.last_price)
# 开仓突破
if quote.last_price > top_rail and position.pos_long == 0: # 如果价格突破上轨: 买入开仓
print("最新价:", quote.last_price, ", 价格突破上轨,买入开仓")
target_pos.set_target_volume(3) # 设置目标持仓手数,将指定合约调整到目标头寸
elif quote.last_price < bottom_rail and position.pos_short == 0: # 如果价格跌破下轨: 卖出开仓
print("最新价:", quote.last_price, ", 价格跌破下轨, 卖出开仓")
target_pos.set_target_volume(-3)
# 平仓止损: 当价格 向上突破上轨 或 向下突破下轨 后, 再次回破当日开盘价
if (quote.highest > top_rail and quote.last_price <= quote.open) or (
quote.lowest < bottom_rail and quote.last_price >= quote.open):
print("平仓止损")
target_pos.set_target_volume(0)
if api.is_changing(quote, "datetime"):
now_time = datetime.strptime(quote.datetime, "%Y-%m-%d %H:%M:%S.%f") # 获取当前的行情时间
if now_time.hour == close_hour and now_time.minute >= close_minute: # 到达平仓时间: 平仓
print("临近本交易日收盘: 平仓")
target_pos.set_target_volume(0)
deadline = time.time() + 60 # 设置截止时间为当前时间的60秒以后
while api.wait_update(deadline=deadline): # 等待60秒
pass
api.close() # 关闭api
break # 退出while循环
|
the-stack_106_28375 | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for plotting and summarizing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import scipy
import tensorflow as tf
import models
def summarize_ess(weights, only_last_timestep=False):
"""Plots the effective sample size.
Args:
weights: List of length num_timesteps Tensors of shape
[num_samples, batch_size]
"""
num_timesteps = len(weights)
batch_size = tf.cast(tf.shape(weights[0])[1], dtype=tf.float64)
for i in range(num_timesteps):
if only_last_timestep and i < num_timesteps-1: continue
w = tf.nn.softmax(weights[i], dim=0)
centered_weights = w - tf.reduce_mean(w, axis=0, keepdims=True)
variance = tf.reduce_sum(tf.square(centered_weights))/(batch_size-1)
ess = 1./tf.reduce_mean(tf.reduce_sum(tf.square(w), axis=0))
tf.summary.scalar("ess/%d" % i, ess)
tf.summary.scalar("ese/%d" % i, ess / batch_size)
tf.summary.scalar("weight_variance/%d" % i, variance)
def summarize_particles(states, weights, observation, model):
"""Plots particle locations and weights.
Args:
states: List of length num_timesteps Tensors of shape
[batch_size*num_particles, state_size].
weights: List of length num_timesteps Tensors of shape [num_samples,
batch_size]
observation: Tensor of shape [batch_size*num_samples, state_size]
"""
num_timesteps = len(weights)
num_samples, batch_size = weights[0].get_shape().as_list()
# get q0 information for plotting
q0_dist = model.q.q_zt(observation, tf.zeros_like(states[0]), 0)
q0_loc = q0_dist.loc[0:batch_size, 0]
q0_scale = q0_dist.scale[0:batch_size, 0]
# get posterior information for plotting
post = (model.p.mixing_coeff, model.p.prior_mode_mean, model.p.variance,
tf.reduce_sum(model.p.bs), model.p.num_timesteps)
# Reshape states and weights to be [time, num_samples, batch_size]
states = tf.stack(states)
weights = tf.stack(weights)
# normalize the weights over the sample dimension
weights = tf.nn.softmax(weights, dim=1)
states = tf.reshape(states, tf.shape(weights))
ess = 1./tf.reduce_sum(tf.square(weights), axis=1)
def _plot_states(states_batch, weights_batch, observation_batch, ess_batch, q0, post):
"""
states: [time, num_samples, batch_size]
weights [time, num_samples, batch_size]
observation: [batch_size, 1]
q0: ([batch_size], [batch_size])
post: ...
"""
num_timesteps, _, batch_size = states_batch.shape
plots = []
for i in range(batch_size):
states = states_batch[:,:,i]
weights = weights_batch[:,:,i]
observation = observation_batch[i]
ess = ess_batch[:,i]
q0_loc = q0[0][i]
q0_scale = q0[1][i]
fig = plt.figure(figsize=(7, (num_timesteps + 1) * 2))
# Each timestep gets two plots -- a bar plot and a histogram of state locs.
# The bar plot will be bar_rows rows tall.
# The histogram will be 1 row tall.
# There is also 1 extra plot at the top showing the posterior and q.
bar_rows = 8
num_rows = (num_timesteps + 1) * (bar_rows + 1)
gs = gridspec.GridSpec(num_rows, 1)
# Figure out how wide to make the plot
prior_lims = (post[1] * -2, post[1] * 2)
q_lims = (scipy.stats.norm.ppf(0.01, loc=q0_loc, scale=q0_scale),
scipy.stats.norm.ppf(0.99, loc=q0_loc, scale=q0_scale))
state_width = states.max() - states.min()
state_lims = (states.min() - state_width * 0.15,
states.max() + state_width * 0.15)
lims = (min(prior_lims[0], q_lims[0], state_lims[0]),
max(prior_lims[1], q_lims[1], state_lims[1]))
# plot the posterior
z0 = np.arange(lims[0], lims[1], 0.1)
alpha, pos_mu, sigma_sq, B, T = post
neg_mu = -pos_mu
scale = np.sqrt((T + 1) * sigma_sq)
p_zn = (
alpha * scipy.stats.norm.pdf(
observation, loc=pos_mu + B, scale=scale) + (1 - alpha) *
scipy.stats.norm.pdf(observation, loc=neg_mu + B, scale=scale))
p_z0 = (
alpha * scipy.stats.norm.pdf(z0, loc=pos_mu, scale=np.sqrt(sigma_sq))
+ (1 - alpha) * scipy.stats.norm.pdf(
z0, loc=neg_mu, scale=np.sqrt(sigma_sq)))
p_zn_given_z0 = scipy.stats.norm.pdf(
observation, loc=z0 + B, scale=np.sqrt(T * sigma_sq))
post_z0 = (p_z0 * p_zn_given_z0) / p_zn
# plot q
q_z0 = scipy.stats.norm.pdf(z0, loc=q0_loc, scale=q0_scale)
ax = plt.subplot(gs[0:bar_rows, :])
ax.plot(z0, q_z0, color="blue")
ax.plot(z0, post_z0, color="green")
ax.plot(z0, p_z0, color="red")
ax.legend(("q", "posterior", "prior"), loc="best", prop={"size": 10})
ax.set_xticks([])
ax.set_xlim(*lims)
# plot the states
for t in range(num_timesteps):
start = (t + 1) * (bar_rows + 1)
ax1 = plt.subplot(gs[start:start + bar_rows, :])
ax2 = plt.subplot(gs[start + bar_rows:start + bar_rows + 1, :])
# plot the states barplot
# ax1.hist(
# states[t, :],
# weights=weights[t, :],
# bins=50,
# edgecolor="none",
# alpha=0.2)
ax1.bar(states[t,:], weights[t,:], width=0.02, alpha=0.2, edgecolor = "none")
ax1.set_ylabel("t=%d" % t)
ax1.set_xticks([])
ax1.grid(True, which="both")
ax1.set_xlim(*lims)
# plot the observation
ax1.axvline(x=observation, color="red", linestyle="dashed")
# add the ESS
ax1.text(0.1, 0.9, "ESS: %0.2f" % ess[t],
ha='center', va='center', transform=ax1.transAxes)
# plot the state location histogram
ax2.hist2d(
states[t, :], np.zeros_like(states[t, :]), bins=[50, 1], cmap="Greys")
ax2.grid(False)
ax2.set_yticks([])
ax2.set_xlim(*lims)
if t != num_timesteps - 1:
ax2.set_xticks([])
fig.canvas.draw()
p = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
plots.append(p.reshape(fig.canvas.get_width_height()[::-1] + (3,)))
plt.close(fig)
return np.stack(plots)
plots = tf.py_func(_plot_states,
[states, weights, observation, ess, (q0_loc, q0_scale), post],
[tf.uint8])[0]
tf.summary.image("states", plots, 5, collections=["infrequent_summaries"])
def plot_weights(weights, resampled=None):
"""Plots the weights and effective sample size from an SMC rollout.
Args:
weights: [num_timesteps, num_samples, batch_size] importance weights
resampled: [num_timesteps] 0/1 indicating if resampling ocurred
"""
weights = tf.convert_to_tensor(weights)
def _make_plots(weights, resampled):
num_timesteps, num_samples, batch_size = weights.shape
plots = []
for i in range(batch_size):
fig, axes = plt.subplots(nrows=1, sharex=True, figsize=(8, 4))
axes.stackplot(np.arange(num_timesteps), np.transpose(weights[:, :, i]))
axes.set_title("Weights")
axes.set_xlabel("Steps")
axes.set_ylim([0, 1])
axes.set_xlim([0, num_timesteps - 1])
for j in np.where(resampled > 0)[0]:
axes.axvline(x=j, color="red", linestyle="dashed", ymin=0.0, ymax=1.0)
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plots.append(data)
plt.close(fig)
return np.stack(plots, axis=0)
if resampled is None:
num_timesteps, _, batch_size = weights.get_shape().as_list()
resampled = tf.zeros([num_timesteps], dtype=tf.float32)
plots = tf.py_func(_make_plots,
[tf.nn.softmax(weights, dim=1),
tf.to_float(resampled)], [tf.uint8])[0]
batch_size = weights.get_shape().as_list()[-1]
tf.summary.image(
"weights", plots, batch_size, collections=["infrequent_summaries"])
def summarize_weights(weights, num_timesteps, num_samples):
# weights is [num_timesteps, num_samples, batch_size]
weights = tf.convert_to_tensor(weights)
mean = tf.reduce_mean(weights, axis=1, keepdims=True)
squared_diff = tf.square(weights - mean)
variances = tf.reduce_sum(squared_diff, axis=1) / (num_samples - 1)
# average the variance over the batch
variances = tf.reduce_mean(variances, axis=1)
avg_magnitude = tf.reduce_mean(tf.abs(weights), axis=[1, 2])
for t in xrange(num_timesteps):
tf.summary.scalar("weights/variance_%d" % t, variances[t])
tf.summary.scalar("weights/magnitude_%d" % t, avg_magnitude[t])
tf.summary.histogram("weights/step_%d" % t, weights[t])
def summarize_learning_signal(rewards, tag):
num_resampling_events, _ = rewards.get_shape().as_list()
mean = tf.reduce_mean(rewards, axis=1)
avg_magnitude = tf.reduce_mean(tf.abs(rewards), axis=1)
reward_square = tf.reduce_mean(tf.square(rewards), axis=1)
for t in xrange(num_resampling_events):
tf.summary.scalar("%s/mean_%d" % (tag, t), mean[t])
tf.summary.scalar("%s/magnitude_%d" % (tag, t), avg_magnitude[t])
tf.summary.scalar("%s/squared_%d" % (tag, t), reward_square[t])
tf.summary.histogram("%s/step_%d" % (tag, t), rewards[t])
def summarize_qs(model, observation, states):
model.q.summarize_weights()
if hasattr(model.p, "posterior") and callable(getattr(model.p, "posterior")):
states = [tf.zeros_like(states[0])] + states[:-1]
for t, prev_state in enumerate(states):
p = model.p.posterior(observation, prev_state, t)
q = model.q.q_zt(observation, prev_state, t)
kl = tf.reduce_mean(tf.contrib.distributions.kl_divergence(p, q))
tf.summary.scalar("kl_q/%d" % t, tf.reduce_mean(kl))
mean_diff = q.loc - p.loc
mean_abs_err = tf.abs(mean_diff)
mean_rel_err = tf.abs(mean_diff / p.loc)
tf.summary.scalar("q_mean_convergence/absolute_error_%d" % t,
tf.reduce_mean(mean_abs_err))
tf.summary.scalar("q_mean_convergence/relative_error_%d" % t,
tf.reduce_mean(mean_rel_err))
sigma_diff = tf.square(q.scale) - tf.square(p.scale)
sigma_abs_err = tf.abs(sigma_diff)
sigma_rel_err = tf.abs(sigma_diff / tf.square(p.scale))
tf.summary.scalar("q_variance_convergence/absolute_error_%d" % t,
tf.reduce_mean(sigma_abs_err))
tf.summary.scalar("q_variance_convergence/relative_error_%d" % t,
tf.reduce_mean(sigma_rel_err))
def summarize_rs(model, states):
model.r.summarize_weights()
for t, state in enumerate(states):
true_r = model.p.lookahead(state, t)
r = model.r.r_xn(state, t)
kl = tf.reduce_mean(tf.contrib.distributions.kl_divergence(true_r, r))
tf.summary.scalar("kl_r/%d" % t, tf.reduce_mean(kl))
mean_diff = true_r.loc - r.loc
mean_abs_err = tf.abs(mean_diff)
mean_rel_err = tf.abs(mean_diff / true_r.loc)
tf.summary.scalar("r_mean_convergence/absolute_error_%d" % t,
tf.reduce_mean(mean_abs_err))
tf.summary.scalar("r_mean_convergence/relative_error_%d" % t,
tf.reduce_mean(mean_rel_err))
sigma_diff = tf.square(r.scale) - tf.square(true_r.scale)
sigma_abs_err = tf.abs(sigma_diff)
sigma_rel_err = tf.abs(sigma_diff / tf.square(true_r.scale))
tf.summary.scalar("r_variance_convergence/absolute_error_%d" % t,
tf.reduce_mean(sigma_abs_err))
tf.summary.scalar("r_variance_convergence/relative_error_%d" % t,
tf.reduce_mean(sigma_rel_err))
def summarize_model(model, true_bs, observation, states, bound, summarize_r=True):
if hasattr(model.p, "bs"):
model_b = tf.reduce_sum(model.p.bs, axis=0)
true_b = tf.reduce_sum(true_bs, axis=0)
abs_err = tf.abs(model_b - true_b)
rel_err = abs_err / true_b
tf.summary.scalar("sum_of_bs/data_generating_process", tf.reduce_mean(true_b))
tf.summary.scalar("sum_of_bs/model", tf.reduce_mean(model_b))
tf.summary.scalar("sum_of_bs/absolute_error", tf.reduce_mean(abs_err))
tf.summary.scalar("sum_of_bs/relative_error", tf.reduce_mean(rel_err))
#summarize_qs(model, observation, states)
#if bound == "fivo-aux" and summarize_r:
# summarize_rs(model, states)
def summarize_grads(grads, loss_name):
grad_ema = tf.train.ExponentialMovingAverage(decay=0.99)
vectorized_grads = tf.concat(
[tf.reshape(g, [-1]) for g, _ in grads if g is not None], axis=0)
new_second_moments = tf.square(vectorized_grads)
new_first_moments = vectorized_grads
maintain_grad_ema_op = grad_ema.apply([new_first_moments, new_second_moments])
first_moments = grad_ema.average(new_first_moments)
second_moments = grad_ema.average(new_second_moments)
variances = second_moments - tf.square(first_moments)
tf.summary.scalar("grad_variance/%s" % loss_name, tf.reduce_mean(variances))
tf.summary.histogram("grad_variance/%s" % loss_name, variances)
tf.summary.histogram("grad_mean/%s" % loss_name, first_moments)
return maintain_grad_ema_op
|
the-stack_106_28376 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ATM Class Controller
Created on Tue Aug 17 14:16:44 2021
Version: 1.0
Universidad Santo Tomás Tunja
Simulation
@author: Juana Valentina Mendoza Santamaría
@author: Alix Ivonne Chaparro Vasquez
presented to: Martha Susana Contreras Ortiz
"""
from random import randint, randrange
from models.atm import ATM
class ATMs():
"""ATM Collection.
"""
#%%
def __init__(self, bank, maxATMs, minCash, maxCash, minBill):
"""ATM Collection Default Constructor.
Args:
bank (Bank): ATM's Bank.
maxATMs (int): Number of ATMs to generate.
minCash (int): Cash minimum of ATM.
maxCash (int): Cash maximum of ATM.
minBill (int): Minimum denomination of bills in the ATM.
"""
self._bank = bank
self._maxATMs = maxATMs
self._minCash = minCash
self._maxCash = maxCash
self._minBill = minBill
self._atms = []
#%%
@property
def atms(self):
return self._atms
@atms.setter
def atms(self, atms):
self._atms = atms
#%%
def createATMs(self):
"""Create three ATM instances
"""
self._atms = [
ATM (
1,
self._bank,
'Tunja Downtown',
['Español', 'English'],
20000000
),
ATM (
2,
self._bank,
'North Tunja',
['Español', 'English'],
25000000
),
ATM (
3,
self._bank,
'Santoto University',
['Español', 'English'],
10000000
),
]
#%%
def createRandomATMs(self):
"""Create a random ATM Collection.
"""
self._atms = []
for id in range(randint(1, self._maxATMs)):
cash = randint(self._minCash, self._maxCash)
cash -= cash % self._minBill # Divisible per minBill
atm = ATM(
(id + 1),
self._bank,
"ATM Address {}".format(id + 1),
['Español', 'English'],
cash
)
self._atms.append(atm)
#%%
def randomSelectedATM(self):
"""Selecting an ATM
Returns:
ATM: The selected ATM
"""
return self._atms[randrange(len(self._atms))]
#%%
def __str__(self):
"""ATMs output
Returns:
String: Output ATM string
"""
output = '[ATMs: \n'
for atm in self._atms:
output += atm.__str__() + '\n'
return output + ']'
|
the-stack_106_28379 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import six
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_server_actions
from nova.tests.unit import utils as test_utils
class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
microversion = None
ADMIN_API = True
sample_dir = 'os-instance-actions'
def setUp(self):
super(ServerActionsSampleJsonTest, self).setUp()
self.api.microversion = self.microversion
self.actions = fake_server_actions.FAKE_ACTIONS
self.events = fake_server_actions.FAKE_EVENTS
self.instance = test_utils.get_test_instance(obj=True)
def _fake_get(stub_self, context, instance_uuid, expected_attrs=None):
return fake_instance.fake_instance_obj(
None, **{'uuid': instance_uuid})
def fake_instance_action_get_by_request_id(context, uuid, request_id):
return copy.deepcopy(self.actions[uuid][request_id])
def fake_server_actions_get(context, uuid):
return [copy.deepcopy(value) for value in
six.itervalues(self.actions[uuid])]
def fake_instance_action_events_get(context, action_id):
return copy.deepcopy(self.events[action_id])
def fake_instance_get_by_uuid(context, instance_id):
return self.instance
self.stub_out('nova.db.action_get_by_request_id',
fake_instance_action_get_by_request_id)
self.stub_out('nova.db.actions_get', fake_server_actions_get)
self.stub_out('nova.db.action_events_get',
fake_instance_action_events_get)
self.stub_out('nova.db.instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stub_out('nova.compute.api.API.get', _fake_get)
def test_instance_action_get(self):
fake_uuid = fake_server_actions.FAKE_UUID
fake_request_id = fake_server_actions.FAKE_REQUEST_ID1
fake_action = self.actions[fake_uuid][fake_request_id]
response = self._do_get('servers/%s/os-instance-actions/%s' %
(fake_uuid, fake_request_id))
subs = {}
subs['action'] = '(reboot)|(resize)'
subs['instance_uuid'] = str(fake_uuid)
subs['integer_id'] = '[0-9]+'
subs['request_id'] = str(fake_action['request_id'])
subs['start_time'] = str(fake_action['start_time'])
subs['result'] = '(Success)|(Error)'
subs['event'] = '(schedule)|(compute_create)'
self._verify_response('instance-action-get-resp', subs, response, 200)
def test_instance_actions_list(self):
fake_uuid = fake_server_actions.FAKE_UUID
response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
subs = {}
subs['action'] = '(reboot)|(resize)'
subs['integer_id'] = '[0-9]+'
subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}')
self._verify_response('instance-actions-list-resp', subs,
response, 200)
class ServerActionsV221SampleJsonTest(ServerActionsSampleJsonTest):
microversion = '2.21'
scenarios = [('v2_21', {'api_major_version': 'v2.1'})]
|
the-stack_106_28381 | from model.contact import Contact
empty = Contact(firstname="",lastname= "",company="",address="",home="",mobile_phone="",work_phone="", email_1="")
testdata = [Contact(firstname="fname1",
lastname= "lname1",
company="company1",
address="address1",
home="home1",
mobile_phone="mobile_phone1",
work_phone="work_phone",
email_1=""),
Contact(firstname="fname2",
lastname= "lname2",
company="company2",
address="adress2",
home="home",
mobile_phone="mobile_phone2",
work_phone="work_phone2",
email_1="email_12")
]
|
the-stack_106_28382 | #
# The SELDnet architecture
#
from keras.layers import Bidirectional, Conv2D, MaxPooling2D, Input
from keras.layers.core import Dense, Activation, Dropout, Reshape, Permute
from keras.layers.recurrent import GRU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.layers.wrappers import TimeDistributed
from keras.optimizers import Adam
import keras
keras.backend.set_image_data_format('channels_first')
from IPython import embed
def get_model(data_in, data_out, dropout_rate, nb_cnn2d_filt, pool_size,
rnn_size, fnn_size, weights):
# model definition
spec_start = Input(shape=(data_in[-3], data_in[-2], data_in[-1]))
# CNN
spec_cnn = spec_start
for i, convCnt in enumerate(pool_size):
spec_cnn = Conv2D(filters=nb_cnn2d_filt, kernel_size=(3, 3), padding='same')(spec_cnn)
spec_cnn = BatchNormalization()(spec_cnn)
spec_cnn = Activation('relu')(spec_cnn)
spec_cnn = MaxPooling2D(pool_size=(1, pool_size[i]))(spec_cnn)
spec_cnn = Dropout(dropout_rate)(spec_cnn)
spec_cnn = Permute((2, 1, 3))(spec_cnn)
# RNN
spec_rnn = Reshape((data_in[-2], -1))(spec_cnn)
for nb_rnn_filt in rnn_size:
spec_rnn = Bidirectional(
GRU(nb_rnn_filt, activation='tanh', dropout=dropout_rate, recurrent_dropout=dropout_rate,
return_sequences=True),
merge_mode='mul'
)(spec_rnn)
# FC - DOA
doa = spec_rnn
for nb_fnn_filt in fnn_size:
doa = TimeDistributed(Dense(nb_fnn_filt))(doa)
doa = Dropout(dropout_rate)(doa)
doa = TimeDistributed(Dense(data_out[1][-1]))(doa)
doa = Activation('linear', name='doa_out')(doa)
# FC - SED
sed = spec_rnn
for nb_fnn_filt in fnn_size:
sed = TimeDistributed(Dense(nb_fnn_filt))(sed)
sed = Dropout(dropout_rate)(sed)
sed = TimeDistributed(Dense(data_out[0][-1]))(sed)
sed = Activation('sigmoid', name='sed_out')(sed)
model = Model(inputs=spec_start, outputs=[sed, doa])
model.compile(optimizer=Adam(), loss=['binary_crossentropy', 'mse'], loss_weights=weights)
model.summary()
return model
|
the-stack_106_28383 | import click
from awsctl.packages.ssm import AmazonSystemsManager
from awsctl.packages.cloudwatch import AmazonCloudwatch, AmazonCloudwatchLogs
from awsctl.packages.inspector import AmazonInspector
@click.group("install")
def install_group():
pass
@install_group.command("ssm")
def install_ssm():
ssm = AmazonSystemsManager()
ssm.install()
@install_group.command("cloudwatch")
def install_cloudwatch():
cloudwatch = AmazonCloudwatch()
cloudwatch.install()
@install_group.command("cloudwatch-logs")
@click.argument('region')
@click.argument('config')
def install_cloudwatch_logs(region, config):
cloudwatch = AmazonCloudwatchLogs()
cloudwatch.install(region, config)
@install_group.command("inspector")
def install_inspector():
inspector = AmazonInspector()
inspector.install()
|
the-stack_106_28384 | # Copyright Epic Games, Inc. All Rights Reserved.
import sys as _sys
import json as _json
import uuid as _uuid
import time as _time
import socket as _socket
import logging as _logging
import threading as _threading
def hello():
_logging.debug("Hello from remote")
# Protocol constants (see PythonScriptRemoteExecution.cpp for the full protocol definition)
_PROTOCOL_VERSION = 1 # Protocol version number
_PROTOCOL_MAGIC = 'ue_py' # Protocol magic identifier
_TYPE_PING = 'ping' # Service discovery request (UDP)
_TYPE_PONG = 'pong' # Service discovery response (UDP)
_TYPE_OPEN_CONNECTION = 'open_connection' # Open a TCP command connection with the requested server (UDP)
_TYPE_CLOSE_CONNECTION = 'close_connection' # Close any active TCP command connection (UDP)
_TYPE_COMMAND = 'command' # Execute a remote Python command (TCP)
_TYPE_COMMAND_RESULT = 'command_result' # Result of executing a remote Python command (TCP)
_NODE_PING_SECONDS = 1 # Number of seconds to wait before sending another "ping" message to discover remote notes
_NODE_TIMEOUT_SECONDS = 5 # Number of seconds to wait before timing out a remote node that was discovered via UDP and has stopped sending "pong" responses
DEFAULT_MULTICAST_TTL = 0 # Multicast TTL (0 is limited to the local host, 1 is limited to the local subnet)
DEFAULT_MULTICAST_GROUP_ENDPOINT = ('239.0.0.1',
6766) # The multicast group endpoint tuple that the UDP multicast socket should join (must match the "Multicast Group Endpoint" setting in the Python plugin)
DEFAULT_MULTICAST_BIND_ADDRESS = '0.0.0.0' # The adapter address that the UDP multicast socket should bind to, or 0.0.0.0 to bind to all adapters (must match the "Multicast Bind Address" setting in the Python plugin)
DEFAULT_COMMAND_ENDPOINT = ('127.0.0.1',
6776) # The endpoint tuple for the TCP command connection hosted by this client (that the remote client will connect to)
# Execution modes (these must match the names given to LexToString for EPythonCommandExecutionMode in IPythonScriptPlugin.h)
MODE_EXEC_FILE = 'ExecuteFile' # Execute the Python command as a file. This allows you to execute either a literal Python script containing multiple statements, or a file with optional arguments
MODE_EXEC_STATEMENT = 'ExecuteStatement' # Execute the Python command as a single statement. This will execute a single statement and print the result. This mode cannot run files
MODE_EVAL_STATEMENT = 'EvaluateStatement' # Evaluate the Python command as a single statement. This will evaluate a single statement and return the result. This mode cannot run files
class RemoteExecutionConfig(object):
'''
Configuration data for establishing a remote connection with a UE4 instance running Python.
'''
def __init__(self):
self.multicast_ttl = DEFAULT_MULTICAST_TTL
self.multicast_group_endpoint = DEFAULT_MULTICAST_GROUP_ENDPOINT
self.multicast_bind_address = DEFAULT_MULTICAST_BIND_ADDRESS
self.command_endpoint = DEFAULT_COMMAND_ENDPOINT
class RemoteExecution(object):
'''
A remote execution session. This class can discover remote "nodes" (UE4 instances running Python), and allow you to open a command channel to a particular instance.
Args:
config (RemoteExecutionConfig): Configuration controlling the connection settings for this session.
'''
def __init__(self, config=RemoteExecutionConfig()):
self._config = config
self._broadcast_connection = None
self._command_connection = None
self._node_id = str(_uuid.uuid4())
@property
def remote_nodes(self):
'''
Get the current set of discovered remote "nodes" (UE4 instances running Python).
Returns:
list: A list of dicts containg the node ID and the other data.
'''
return self._broadcast_connection.remote_nodes if self._broadcast_connection else []
def start(self):
'''
Start the remote execution session. This will begin the discovey process for remote "nodes" (UE4 instances running Python).
'''
self._broadcast_connection = _RemoteExecutionBroadcastConnection(self._config, self._node_id)
self._broadcast_connection.open()
def stop(self):
'''
Stop the remote execution session. This will end the discovey process for remote "nodes" (UE4 instances running Python), and close any open command connection.
'''
self.close_command_connection()
if self._broadcast_connection:
self._broadcast_connection.close()
self._broadcast_connection = None
def has_command_connection(self):
'''
Check whether the remote execution session has an active command connection.
Returns:
bool: True if the remote execution session has an active command connection, False otherwise.
'''
return self._command_connection is not None
def open_command_connection(self, remote_node_id):
'''
Open a command connection to the given remote "node" (a UE4 instance running Python), closing any command connection that may currently be open.
Args:
remote_node_id (string): The ID of the remote node (this can be obtained by querying `remote_nodes`).
'''
self._command_connection = _RemoteExecutionCommandConnection(self._config, self._node_id, remote_node_id)
self._command_connection.open(self._broadcast_connection)
def close_command_connection(self):
'''
Close any command connection that may currently be open.
'''
if self._command_connection:
self._command_connection.close(self._broadcast_connection)
self._command_connection = None
def run_command(self, command, unattended=True, exec_mode=MODE_EXEC_FILE, raise_on_failure=False):
'''
Run a command remotely based on the current command connection.
Args:
command (string): The Python command to run remotely.
unattended (bool): True to run this command in "unattended" mode (suppressing some UI).
exec_mode (string): The execution mode to use as a string value (must be one of MODE_EXEC_FILE, MODE_EXEC_STATEMENT, or MODE_EVAL_STATEMENT).
raise_on_failure (bool): True to raise a RuntimeError if the command fails on the remote target.
Returns:
dict: The result from running the remote command (see `command_result` from the protocol definition).
'''
data = self._command_connection.run_command(command, unattended, exec_mode)
if raise_on_failure and not data['success']:
raise RuntimeError('Remote Python Command failed! {0}'.format(data['result']))
return data
class _RemoteExecutionNode(object):
'''
A discovered remote "node" (aka, a UE4 instance running Python).
Args:
data (dict): The data representing this node (from its "pong" reponse).
now (float): The timestamp at which this node was last seen.
'''
def __init__(self, data, now=None):
self.data = data
self._last_pong = _time_now(now)
def should_timeout(self, now=None):
'''
Check to see whether this remote node should be considered timed-out.
Args:
now (float): The current timestamp.
Returns:
bool: True of the node has exceeded the timeout limit (`_NODE_TIMEOUT_SECONDS`), False otherwise.
'''
return (self._last_pong + _NODE_TIMEOUT_SECONDS) < _time_now(now)
class _RemoteExecutionBroadcastNodes(object):
'''
A thread-safe set of remote execution "nodes" (UE4 instances running Python).
'''
def __init__(self):
self._remote_nodes = {}
self._remote_nodes_lock = _threading.RLock()
@property
def remote_nodes(self):
'''
Get the current set of discovered remote "nodes" (UE4 instances running Python).
Returns:
list: A list of dicts containg the node ID and the other data.
'''
with self._remote_nodes_lock:
remote_nodes_list = []
for node_id, node in self._remote_nodes.items():
remote_node_data = dict(node.data)
remote_node_data['node_id'] = node_id
remote_nodes_list.append(remote_node_data)
return remote_nodes_list
def update_remote_node(self, node_id, node_data, now=None):
'''
Update a remote node, replacing any existing data.
Args:
node_id (str): The ID of the remote node (from its "pong" reponse).
node_data (dict): The data representing this node (from its "pong" reponse).
now (float): The timestamp at which this node was last seen.
'''
now = _time_now(now)
with self._remote_nodes_lock:
if node_id not in self._remote_nodes:
_logger.debug('Found Node {0}: {1}'.format(node_id, node_data))
self._remote_nodes[node_id] = _RemoteExecutionNode(node_data, now)
def timeout_remote_nodes(self, now=None):
'''
Check to see whether any remote nodes should be considered timed-out, and if so, remove them from this set.
Args:
now (float): The current timestamp.
'''
now = _time_now(now)
with self._remote_nodes_lock:
for node_id, node in list(self._remote_nodes.items()):
if node.should_timeout(now):
_logger.debug('Lost Node {0}: {1}'.format(node_id, node.data))
del self._remote_nodes[node_id]
class _RemoteExecutionBroadcastConnection(object):
'''
A remote execution broadcast connection (for UDP based messaging and node discovery).
Args:
config (RemoteExecutionConfig): Configuration controlling the connection settings.
node_id (string): The ID of the local "node" (this session).
'''
def __init__(self, config, node_id):
self._config = config
self._node_id = node_id
self._nodes = None
self._running = False
self._broadcast_socket = None
self._broadcast_listen_thread = None
@property
def remote_nodes(self):
'''
Get the current set of discovered remote "nodes" (UE4 instances running Python).
Returns:
list: A list of dicts containg the node ID and the other data.
'''
return self._nodes.remote_nodes if self._nodes else []
def open(self):
'''
Open the UDP based messaging and discovery connection. This will begin the discovey process for remote "nodes" (UE4 instances running Python).
'''
self._running = True
self._last_ping = None
self._nodes = _RemoteExecutionBroadcastNodes()
self._init_broadcast_socket()
self._init_broadcast_listen_thread()
def close(self):
'''
Close the UDP based messaging and discovery connection. This will end the discovey process for remote "nodes" (UE4 instances running Python).
'''
self._running = False
if self._broadcast_listen_thread:
self._broadcast_listen_thread.join()
if self._broadcast_socket:
self._broadcast_socket.close()
self._broadcast_socket = None
self._nodes = None
def _init_broadcast_socket(self):
'''
Initialize the UDP based broadcast socket based on the current configuration.
'''
self._broadcast_socket = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM,
_socket.IPPROTO_UDP) # UDP/IP socket
if hasattr(_socket, 'SO_REUSEPORT'):
self._broadcast_socket.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEPORT, 1)
else:
self._broadcast_socket.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1)
self._broadcast_socket.bind((self._config.multicast_bind_address, self._config.multicast_group_endpoint[1]))
self._broadcast_socket.setsockopt(_socket.IPPROTO_IP, _socket.IP_MULTICAST_LOOP, 1)
self._broadcast_socket.setsockopt(_socket.IPPROTO_IP, _socket.IP_MULTICAST_TTL, self._config.multicast_ttl)
self._broadcast_socket.setsockopt(_socket.IPPROTO_IP, _socket.IP_ADD_MEMBERSHIP, _socket.inet_aton(
self._config.multicast_group_endpoint[0]) + _socket.inet_aton('0.0.0.0'))
self._broadcast_socket.settimeout(0.1)
def _init_broadcast_listen_thread(self):
'''
Initialize the listen thread for the UDP based broadcast socket to allow discovery to run async.
'''
self._broadcast_listen_thread = _threading.Thread(target=self._run_broadcast_listen_thread)
self._broadcast_listen_thread.daemon = True
self._broadcast_listen_thread.start()
def _run_broadcast_listen_thread(self):
'''
Main loop for the listen thread that handles processing discovery messages.
'''
while self._running:
# Receive and process all pending data
while True:
try:
data = self._broadcast_socket.recv(4096)
except _socket.timeout:
data = None
if data:
self._handle_data(data)
else:
break
# Run tick logic
now = _time_now()
self._broadcast_ping(now)
self._nodes.timeout_remote_nodes(now)
_time.sleep(0.1)
def _broadcast_message(self, message):
'''
Broadcast the given message over the UDP socket to anything that might be listening.
Args:
message (_RemoteExecutionMessage): The message to broadcast.
'''
self._broadcast_socket.sendto(message.to_json_bytes(), self._config.multicast_group_endpoint)
def _broadcast_ping(self, now=None):
'''
Broadcast a "ping" message over the UDP socket to anything that might be listening.
Args:
now (float): The current timestamp.
'''
now = _time_now(now)
if not self._last_ping or ((self._last_ping + _NODE_PING_SECONDS) < now):
self._last_ping = now
self._broadcast_message(_RemoteExecutionMessage(_TYPE_PING, self._node_id))
def broadcast_open_connection(self, remote_node_id):
'''
Broadcast an "open_connection" message over the UDP socket to be handled by the specified remote node.
Args:
remote_node_id (string): The ID of the remote node that we want to open a command connection with.
'''
self._broadcast_message(_RemoteExecutionMessage(_TYPE_OPEN_CONNECTION, self._node_id, remote_node_id, {
'command_ip': self._config.command_endpoint[0],
'command_port': self._config.command_endpoint[1],
}))
def broadcast_close_connection(self, remote_node_id):
'''
Broadcast a "close_connection" message over the UDP socket to be handled by the specified remote node.
Args:
remote_node_id (string): The ID of the remote node that we want to close a command connection with.
'''
self._broadcast_message(_RemoteExecutionMessage(_TYPE_CLOSE_CONNECTION, self._node_id, remote_node_id))
def _handle_data(self, data):
'''
Handle data received from the UDP broadcast socket.
Args:
data (bytes): The raw bytes received from the socket.
'''
message = _RemoteExecutionMessage(None, None)
if message.from_json_bytes(data):
self._handle_message(message)
def _handle_message(self, message):
'''
Handle a message received from the UDP broadcast socket.
Args:
message (_RemoteExecutionMessage): The message received from the socket.
'''
if not message.passes_receive_filter(self._node_id):
return
if message.type_ == _TYPE_PONG:
self._handle_pong_message(message)
return
_logger.debug('Unhandled remote execution message type "{0}"'.format(message.type_))
def _handle_pong_message(self, message):
'''
Handle a "pong" message received from the UDP broadcast socket.
Args:
message (_RemoteExecutionMessage): The message received from the socket.
'''
self._nodes.update_remote_node(message.source, message.data)
class _RemoteExecutionCommandConnection(object):
'''
A remote execution command connection (for TCP based command processing).
Args:
config (RemoteExecutionConfig): Configuration controlling the connection settings.
node_id (string): The ID of the local "node" (this session).
remote_node_id (string): The ID of the remote "node" (the UE4 instance running Python).
'''
def __init__(self, config, node_id, remote_node_id):
self._config = config
self._node_id = node_id
self._remote_node_id = remote_node_id
self._command_listen_socket = None
self._command_channel_socket = _socket.socket() # This type is only here to appease PyLint
def open(self, broadcast_connection):
'''
Open the TCP based command connection, and wait to accept the connection from the remote party.
Args:
broadcast_connection (_RemoteExecutionBroadcastConnection): The broadcast connection to send UDP based messages over.
'''
self._nodes = _RemoteExecutionBroadcastNodes()
self._init_command_listen_socket()
self._try_accept(broadcast_connection)
def close(self, broadcast_connection):
'''
Close the TCP based command connection, attempting to notify the remote party.
Args:
broadcast_connection (_RemoteExecutionBroadcastConnection): The broadcast connection to send UDP based messages over.
'''
broadcast_connection.broadcast_close_connection(self._remote_node_id)
if self._command_channel_socket:
self._command_channel_socket.close()
self._command_channel_socket = None
if self._command_listen_socket:
self._command_listen_socket.close()
self._command_listen_socket = None
def run_command(self, command, unattended, exec_mode):
'''
Run a command on the remote party.
Args:
command (string): The Python command to run remotely.
unattended (bool): True to run this command in "unattended" mode (suppressing some UI).
exec_mode (string): The execution mode to use as a string value (must be one of MODE_EXEC_FILE, MODE_EXEC_STATEMENT, or MODE_EVAL_STATEMENT).
Returns:
dict: The result from running the remote command (see `command_result` from the protocol definition).
'''
self._send_message(_RemoteExecutionMessage(_TYPE_COMMAND, self._node_id, self._remote_node_id, {
'command': command,
'unattended': unattended,
'exec_mode': exec_mode,
}))
result = self._receive_message(_TYPE_COMMAND_RESULT)
return result.data
def _send_message(self, message):
'''
Send the given message over the TCP socket to the remote party.
Args:
message (_RemoteExecutionMessage): The message to send.
'''
self._command_channel_socket.sendall(message.to_json_bytes())
def _receive_message(self, expected_type):
'''
Receive a message over the TCP socket from the remote party.
Args:
expected_type (string): The type of message we expect to receive.
Returns:
The message that was received.
'''
data = self._command_channel_socket.recv(4096)
if data:
message = _RemoteExecutionMessage(None, None)
if message.from_json_bytes(data) and message.passes_receive_filter(
self._node_id) and message.type_ == expected_type:
return message
raise RuntimeError('Remote party failed to send a valid response!')
def _init_command_listen_socket(self):
'''
Initialize the TCP based command socket based on the current configuration, and set it to listen for an incoming connection.
'''
self._command_listen_socket = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM,
_socket.IPPROTO_TCP) # TCP/IP socket
if hasattr(_socket, 'SO_REUSEPORT'):
self._command_listen_socket.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEPORT, 1)
else:
self._command_listen_socket.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1)
self._command_listen_socket.bind(self._config.command_endpoint)
self._command_listen_socket.listen(1)
self._command_listen_socket.settimeout(5)
def _try_accept(self, broadcast_connection):
'''
Wait to accept a connection on the TCP based command connection. This makes 6 attempts to receive a connection, waiting for 5 seconds between each attempt (30 seconds total).
Args:
broadcast_connection (_RemoteExecutionBroadcastConnection): The broadcast connection to send UDP based messages over.
'''
for _n in range(6):
broadcast_connection.broadcast_open_connection(self._remote_node_id)
try:
self._command_channel_socket = self._command_listen_socket.accept()[0]
self._command_channel_socket.setblocking(True)
return
except _socket.timeout:
continue
raise RuntimeError('Remote party failed to attempt the command socket connection!')
class _RemoteExecutionMessage(object):
'''
A message sent or received by remote execution (on either the UDP or TCP connection), as UTF-8 encoded JSON.
Args:
type_ (string): The type of this message (see the `_TYPE_` constants).
source (string): The ID of the node that sent this message.
dest (string): The ID of the destination node of this message, or None to send to all nodes (for UDP broadcast).
data (dict): The message specific payload data.
'''
def __init__(self, type_, source, dest=None, data=None):
self.type_ = type_
self.source = source
self.dest = dest
self.data = data
def passes_receive_filter(self, node_id):
'''
Test to see whether this message should be received by the current node (wasn't sent to itself, and has a compatible destination ID).
Args:
node_id (string): The ID of the local "node" (this session).
Returns:
bool: True if this message should be received by the current node, False otherwise.
'''
return self.source != node_id and (not self.dest or self.dest == node_id)
def to_json(self):
'''
Convert this message to its JSON representation.
Returns:
str: The JSON representation of this message.
'''
if not self.type_:
raise ValueError('"type" cannot be empty!')
if not self.source:
raise ValueError('"source" cannot be empty!')
json_obj = {
'version': _PROTOCOL_VERSION,
'magic': _PROTOCOL_MAGIC,
'type': self.type_,
'source': self.source,
}
if self.dest:
json_obj['dest'] = self.dest
if self.data:
json_obj['data'] = self.data
return _json.dumps(json_obj, ensure_ascii=False)
def to_json_bytes(self):
'''
Convert this message to its JSON representation as UTF-8 bytes.
Returns:
bytes: The JSON representation of this message as UTF-8 bytes.
'''
json_str = self.to_json()
return json_str.encode('utf-8')
def from_json(self, json_str):
'''
Parse this message from its JSON representation.
Args:
json_str (str): The JSON representation of this message.
Returns:
bool: True if this message could be parsed, False otherwise.
'''
try:
json_obj = _json.loads(json_str)
# Read and validate required protocol version information
if json_obj['version'] != _PROTOCOL_VERSION:
raise ValueError(
'"version" is incorrect (got {0}, expected {1})!'.format(json_obj['version'], _PROTOCOL_VERSION))
if json_obj['magic'] != _PROTOCOL_MAGIC:
raise ValueError(
'"magic" is incorrect (got "{0}", expected "{1}")!'.format(json_obj['magic'], _PROTOCOL_MAGIC))
# Read required fields
local_type = json_obj['type']
local_source = json_obj['source']
self.type_ = local_type
self.source = local_source
# Read optional fields
self.dest = json_obj.get('dest')
self.data = json_obj.get('data')
except Exception as e:
_logger.error('Failed to deserialize JSON "{0}": {1}'.format(json_str, str(e)))
return False
return True
def from_json_bytes(self, json_bytes):
'''
Parse this message from its JSON representation as UTF-8 bytes.
Args:
json_bytes (bytes): The JSON representation of this message as UTF-8 bytes.
Returns:
bool: True if this message could be parsed, False otherwise.
'''
json_str = json_bytes.decode('utf-8')
return self.from_json(json_str)
def _time_now(now=None):
'''
Utility function to resolve a potentially cached time value.
Args:
now (float): The cached timestamp, or None to return the current time.
Returns:
float: The cached timestamp (if set), otherwise the current time.
'''
return _time.time() if now is None else now
# Log handling
_logger = _logging.getLogger(__name__)
_log_handler = _logging.StreamHandler()
_logger.addHandler(_log_handler)
def set_log_level(log_level):
_logger.setLevel(log_level)
_log_handler.setLevel(log_level)
|
the-stack_106_28386 | """
File: 1642.py
Title: Furthest Building You Can Reach
Difficulty: Medium
URL: https://leetcode.com/problems/furthest-building-you-can-reach/
"""
import unittest
import heapq
from typing import List, Tuple
class Solution:
def furthestBuilding(self,
heights: List[int],
bricks: int,
ladders: int) -> int:
jumps = []
current = 0
for i, height in enumerate(heights):
if current > 0 and height > current:
jumps.append((height - current, i))
current = height
if ladders == 0:
if bricks == 0:
if len(jumps) == 0:
return len(heights) - 1
else:
return jumps[0][1] - 1
return self.furtest_building(heights, jumps, bricks)
latest_jump_index = 0
ladder_used = []
remaining_bricks = bricks
remaining_ladders = ladders
for i, (jump, _) in enumerate(jumps):
if remaining_ladders > 0:
heapq.heappush(ladder_used, jump)
remaining_ladders -= 1
latest_jump_index = i
continue
smaller = min(jump, ladder_used[0])
if remaining_bricks < smaller:
break
remaining_bricks -= smaller
if jump > ladder_used[0]:
heapq.heappop(ladder_used)
heapq.heappush(ladder_used, jump)
latest_jump_index = i
if latest_jump_index == (len(jumps) - 1):
return len(heights) - 1
else:
return jumps[latest_jump_index + 1][1] - 1
def furtest_building(self,
heights: List[int],
jumps: List[Tuple[int, int]],
bricks: int) -> int:
latest_jump_index = 0
remaining_bricks = bricks
for i, (jump, _) in enumerate(jumps):
if remaining_bricks >= jump:
remaining_bricks -= jump
latest_jump_index = i
else:
break
if latest_jump_index == (len(jumps) - 1):
return len(heights) - 1
else:
return jumps[latest_jump_index + 1][1] - 1
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
heights = [4, 2, 7, 6, 9, 14, 12]
bricks = 5
ladders = 1
# Output
output = 4
solution = Solution()
self.assertEqual(solution.furthestBuilding(heights, bricks, ladders),
output)
def test_example2(self):
# Input
heights = [4, 12, 2, 7, 3, 18, 20, 3, 19]
bricks = 10
ladders = 2
# Output
output = 7
solution = Solution()
self.assertEqual(solution.furthestBuilding(heights, bricks, ladders),
output)
def test_example3(self):
# Input
heights = [14, 3, 19, 3]
bricks = 17
ladders = 0
# Output
output = 3
solution = Solution()
self.assertEqual(solution.furthestBuilding(heights, bricks, ladders),
output)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_28387 | # -*- coding: UTF-8 -*-
import dash
from dash import Dash
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
import dash_html_components as html
import dash_core_components as dcc
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from .IntegrationTests import IntegrationTests
from .utils import wait_for
from multiprocessing import Value
import time
import json
TIMEOUT = 20
class Tests(IntegrationTests):
def setUp(self):
pass
def wait_for_element_by_css_selector(self, selector, timeout=TIMEOUT):
return WebDriverWait(self.driver, timeout).until(
EC.presence_of_element_located((By.CSS_SELECTOR, selector)),
'Could not find element with selector "{}"'.format(selector)
)
def wait_for_text_to_equal(self, selector, assertion_text, timeout=TIMEOUT):
self.wait_for_element_by_css_selector(selector)
WebDriverWait(self.driver, timeout).until(
lambda *args: (
(str(self.wait_for_element_by_css_selector(selector).text)
== assertion_text) or
(str(self.wait_for_element_by_css_selector(
selector).get_attribute('value')) == assertion_text)
),
"Element '{}' text expects to equal '{}' but it didn't".format(
selector,
assertion_text
)
)
def request_queue_assertions(
self, check_rejected=True, expected_length=None):
request_queue = self.driver.execute_script(
'return window.store.getState().requestQueue'
)
self.assertTrue(
all([
(r['status'] == 200)
for r in request_queue
])
)
if check_rejected:
self.assertTrue(
all([
(r['rejected'] is False)
for r in request_queue
])
)
if expected_length is not None:
self.assertEqual(len(request_queue), expected_length)
def click_undo(self):
undo_selector = '._dash-undo-redo span:first-child div:last-child'
undo = self.wait_for_element_by_css_selector(undo_selector)
self.wait_for_text_to_equal(undo_selector, 'undo')
undo.click()
def click_redo(self):
redo_selector = '._dash-undo-redo span:last-child div:last-child'
self.wait_for_text_to_equal(redo_selector, 'redo')
redo = self.wait_for_element_by_css_selector(redo_selector)
redo.click()
def check_undo_redo_exist(self, has_undo, has_redo):
selector = '._dash-undo-redo span div:last-child'
els = self.driver.find_elements_by_css_selector(selector)
texts = (['undo'] if has_undo else []) + (['redo'] if has_redo else [])
self.assertEqual(len(els), len(texts))
for el, text in zip(els, texts):
self.assertEqual(el.text, text)
def test_undo_redo(self):
app = Dash(__name__, show_undo_redo=True)
app.layout = html.Div([dcc.Input(id='a'), html.Div(id='b')])
@app.callback(Output('b', 'children'), [Input('a', 'value')])
def set_b(a):
return a
self.startServer(app)
a = self.wait_for_element_by_css_selector('#a')
a.send_keys('xyz')
self.wait_for_text_to_equal('#b', 'xyz')
self.check_undo_redo_exist(True, False)
self.click_undo()
self.wait_for_text_to_equal('#b', 'xy')
self.check_undo_redo_exist(True, True)
self.click_undo()
self.wait_for_text_to_equal('#b', 'x')
self.check_undo_redo_exist(True, True)
self.click_redo()
self.wait_for_text_to_equal('#b', 'xy')
self.check_undo_redo_exist(True, True)
self.percy_snapshot(name='undo-redo')
self.click_undo()
self.click_undo()
self.wait_for_text_to_equal('#b', '')
self.check_undo_redo_exist(False, True)
def test_no_undo_redo(self):
app = Dash(__name__)
app.layout = html.Div([dcc.Input(id='a'), html.Div(id='b')])
@app.callback(Output('b', 'children'), [Input('a', 'value')])
def set_b(a):
return a
self.startServer(app)
a = self.wait_for_element_by_css_selector('#a')
a.send_keys('xyz')
self.wait_for_text_to_equal('#b', 'xyz')
toolbar = self.driver.find_elements_by_css_selector('._dash-undo-redo')
self.assertEqual(len(toolbar), 0)
def test_array_of_falsy_child(self):
app = Dash(__name__)
app.layout = html.Div(id='nully-wrapper', children=[0])
self.startServer(app)
self.wait_for_text_to_equal('#nully-wrapper', '0')
self.assertTrue(self.is_console_clean())
def test_of_falsy_child(self):
app = Dash(__name__)
app.layout = html.Div(id='nully-wrapper', children=0)
self.startServer(app)
self.wait_for_text_to_equal('#nully-wrapper', '0')
self.assertTrue(self.is_console_clean())
def test_event_properties(self):
app = Dash(__name__)
app.layout = html.Div([
html.Button('Click Me', id='button'),
html.Div(id='output')
])
call_count = Value('i', 0)
@app.callback(Output('output', 'children'),
[Input('button', 'n_clicks')])
def update_output(n_clicks):
if not n_clicks:
raise PreventUpdate
call_count.value += 1
return 'Click'
self.startServer(app)
btn = self.driver.find_element_by_id('button')
output = lambda: self.driver.find_element_by_id('output')
self.assertEqual(call_count.value, 0)
self.assertEqual(output().text, '')
btn.click()
wait_for(lambda: output().text == 'Click')
self.assertEqual(call_count.value, 1)
def test_chained_dependencies_direct_lineage(self):
app = Dash(__name__)
app.layout = html.Div([
dcc.Input(id='input-1', value='input 1'),
dcc.Input(id='input-2'),
html.Div('test', id='output')
])
input1 = lambda: self.driver.find_element_by_id('input-1')
input2 = lambda: self.driver.find_element_by_id('input-2')
output = lambda: self.driver.find_element_by_id('output')
call_counts = {
'output': Value('i', 0),
'input-2': Value('i', 0)
}
@app.callback(Output('input-2', 'value'), [Input('input-1', 'value')])
def update_input(input1):
call_counts['input-2'].value += 1
return '<<{}>>'.format(input1)
@app.callback(Output('output', 'children'), [
Input('input-1', 'value'),
Input('input-2', 'value')
])
def update_output(input1, input2):
call_counts['output'].value += 1
return '{} + {}'.format(input1, input2)
self.startServer(app)
wait_for(lambda: call_counts['output'].value == 1)
wait_for(lambda: call_counts['input-2'].value == 1)
self.assertEqual(input1().get_attribute('value'), 'input 1')
self.assertEqual(input2().get_attribute('value'), '<<input 1>>')
self.assertEqual(output().text, 'input 1 + <<input 1>>')
input1().send_keys('x')
wait_for(lambda: call_counts['output'].value == 2)
wait_for(lambda: call_counts['input-2'].value == 2)
self.assertEqual(input1().get_attribute('value'), 'input 1x')
self.assertEqual(input2().get_attribute('value'), '<<input 1x>>')
self.assertEqual(output().text, 'input 1x + <<input 1x>>')
input2().send_keys('y')
wait_for(lambda: call_counts['output'].value == 3)
wait_for(lambda: call_counts['input-2'].value == 2)
self.assertEqual(input1().get_attribute('value'), 'input 1x')
self.assertEqual(input2().get_attribute('value'), '<<input 1x>>y')
self.assertEqual(output().text, 'input 1x + <<input 1x>>y')
def test_chained_dependencies_branched_lineage(self):
app = Dash(__name__)
app.layout = html.Div([
dcc.Input(id='grandparent', value='input 1'),
dcc.Input(id='parent-a'),
dcc.Input(id='parent-b'),
html.Div(id='child-a'),
html.Div(id='child-b')
])
parenta = lambda: self.driver.find_element_by_id('parent-a')
parentb = lambda: self.driver.find_element_by_id('parent-b')
childa = lambda: self.driver.find_element_by_id('child-a')
childb = lambda: self.driver.find_element_by_id('child-b')
call_counts = {
'parent-a': Value('i', 0),
'parent-b': Value('i', 0),
'child-a': Value('i', 0),
'child-b': Value('i', 0)
}
@app.callback(Output('parent-a', 'value'),
[Input('grandparent', 'value')])
def update_parenta(value):
call_counts['parent-a'].value += 1
return 'a: {}'.format(value)
@app.callback(Output('parent-b', 'value'),
[Input('grandparent', 'value')])
def update_parentb(value):
time.sleep(0.5)
call_counts['parent-b'].value += 1
return 'b: {}'.format(value)
@app.callback(Output('child-a', 'children'),
[Input('parent-a', 'value'),
Input('parent-b', 'value')])
def update_childa(parenta_value, parentb_value):
time.sleep(1)
call_counts['child-a'].value += 1
return '{} + {}'.format(parenta_value, parentb_value)
@app.callback(Output('child-b', 'children'),
[Input('parent-a', 'value'),
Input('parent-b', 'value'),
Input('grandparent', 'value')])
def update_childb(parenta_value, parentb_value, grandparent_value):
call_counts['child-b'].value += 1
return '{} + {} + {}'.format(
parenta_value,
parentb_value,
grandparent_value
)
self.startServer(app)
wait_for(lambda: childa().text == 'a: input 1 + b: input 1')
wait_for(lambda: childb().text == 'a: input 1 + b: input 1 + input 1')
time.sleep(1) # wait for potential requests of app to settle down
self.assertEqual(parenta().get_attribute('value'), 'a: input 1')
self.assertEqual(parentb().get_attribute('value'), 'b: input 1')
self.assertEqual(call_counts['parent-a'].value, 1)
self.assertEqual(call_counts['parent-b'].value, 1)
self.assertEqual(call_counts['child-a'].value, 1)
self.assertEqual(call_counts['child-b'].value, 1)
def test_removing_component_while_its_getting_updated(self):
app = Dash(__name__)
app.layout = html.Div([
dcc.RadioItems(
id='toc',
options=[
{'label': i, 'value': i} for i in ['1', '2']
],
value='1'
),
html.Div(id='body')
])
app.config.suppress_callback_exceptions = True
call_counts = {
'body': Value('i', 0),
'button-output': Value('i', 0)
}
@app.callback(Output('body', 'children'), [Input('toc', 'value')])
def update_body(chapter):
call_counts['body'].value += 1
if chapter == '1':
return [
html.Div('Chapter 1'),
html.Button(
'clicking this button takes forever',
id='button'
),
html.Div(id='button-output')
]
elif chapter == '2':
return 'Chapter 2'
else:
raise Exception('chapter is {}'.format(chapter))
@app.callback(
Output('button-output', 'children'),
[Input('button', 'n_clicks')])
def this_callback_takes_forever(n_clicks):
if not n_clicks:
# initial value is quick, only new value is slow
# also don't let the initial value increment call_counts
return 'Initial Value'
time.sleep(5)
call_counts['button-output'].value += 1
return 'New value!'
body = lambda: self.driver.find_element_by_id('body')
self.startServer(app)
wait_for(lambda: call_counts['body'].value == 1)
time.sleep(0.5)
self.driver.find_element_by_id('button').click()
# while that callback is resolving, switch the chapter,
# hiding the `button-output` tag
def chapter2_assertions():
wait_for(lambda: body().text == 'Chapter 2')
layout = self.driver.execute_script(
'return JSON.parse(JSON.stringify('
'window.store.getState().layout'
'))'
)
dcc_radio = layout['props']['children'][0]
html_body = layout['props']['children'][1]
self.assertEqual(dcc_radio['props']['id'], 'toc')
self.assertEqual(dcc_radio['props']['value'], '2')
self.assertEqual(html_body['props']['id'], 'body')
self.assertEqual(html_body['props']['children'], 'Chapter 2')
(self.driver.find_elements_by_css_selector(
'input[type="radio"]'
)[1]).click()
chapter2_assertions()
self.assertEqual(call_counts['button-output'].value, 0)
time.sleep(5)
wait_for(lambda: call_counts['button-output'].value == 1)
time.sleep(2) # liberally wait for the front-end to process request
chapter2_assertions()
self.assertTrue(self.is_console_clean())
def test_rendering_layout_calls_callback_once_per_output(self):
app = Dash(__name__)
call_count = Value('i', 0)
app.config['suppress_callback_exceptions'] = True
app.layout = html.Div([
html.Div([
dcc.Input(
value='Input {}'.format(i),
id='input-{}'.format(i)
)
for i in range(10)
]),
html.Div(id='container'),
dcc.RadioItems()
])
@app.callback(
Output('container', 'children'),
[Input('input-{}'.format(i), 'value') for i in range(10)])
def dynamic_output(*args):
call_count.value += 1
return json.dumps(args, indent=2)
self.startServer(app)
time.sleep(5)
self.percy_snapshot(
name='test_rendering_layout_calls_callback_once_per_output'
)
self.assertEqual(call_count.value, 1)
def test_rendering_new_content_calls_callback_once_per_output(self):
app = Dash(__name__)
call_count = Value('i', 0)
app.config['suppress_callback_exceptions'] = True
app.layout = html.Div([
html.Button(
id='display-content',
children='Display Content',
n_clicks=0
),
html.Div(id='container'),
dcc.RadioItems()
])
@app.callback(
Output('container', 'children'),
[Input('display-content', 'n_clicks')])
def display_output(n_clicks):
if n_clicks == 0:
return ''
return html.Div([
html.Div([
dcc.Input(
value='Input {}'.format(i),
id='input-{}'.format(i)
)
for i in range(10)
]),
html.Div(id='dynamic-output')
])
@app.callback(
Output('dynamic-output', 'children'),
[Input('input-{}'.format(i), 'value') for i in range(10)])
def dynamic_output(*args):
call_count.value += 1
return json.dumps(args, indent=2)
self.startServer(app)
self.wait_for_element_by_css_selector('#display-content').click()
time.sleep(5)
self.percy_snapshot(
name='test_rendering_new_content_calls_callback_once_per_output'
)
self.assertEqual(call_count.value, 1)
def test_callbacks_called_multiple_times_and_out_of_order_multi_output(self):
app = Dash(__name__)
app.layout = html.Div([
html.Button(id='input', n_clicks=0),
html.Div(id='output1'),
html.Div(id='output2')
])
call_count = Value('i', 0)
@app.callback(
[Output('output1', 'children'),
Output('output2', 'children')],
[Input('input', 'n_clicks')]
)
def update_output(n_clicks):
call_count.value = call_count.value + 1
if n_clicks == 1:
time.sleep(4)
return n_clicks, n_clicks + 1
self.startServer(app)
button = self.wait_for_element_by_css_selector('#input')
button.click()
button.click()
time.sleep(8)
self.percy_snapshot(
name='test_callbacks_called_multiple_times'
'_and_out_of_order_multi_output'
)
self.assertEqual(call_count.value, 3)
self.wait_for_text_to_equal('#output1', '2')
self.wait_for_text_to_equal('#output2', '3')
request_queue = self.driver.execute_script(
'return window.store.getState().requestQueue'
)
self.assertFalse(request_queue[0]['rejected'])
self.assertEqual(len(request_queue), 1)
def test_callbacks_with_shared_grandparent(self):
app = dash.Dash()
app.layout = html.Div([
html.Div(id='session-id', children='id'),
dcc.Dropdown(id='dropdown-1'),
dcc.Dropdown(id='dropdown-2'),
])
options = [{'value': 'a', 'label': 'a'}]
call_counts = {
'dropdown_1': Value('i', 0),
'dropdown_2': Value('i', 0)
}
@app.callback(
Output('dropdown-1', 'options'),
[Input('dropdown-1', 'value'),
Input('session-id', 'children')])
def dropdown_1(value, session_id):
call_counts['dropdown_1'].value += 1
return options
@app.callback(
Output('dropdown-2', 'options'),
[Input('dropdown-2', 'value'),
Input('session-id', 'children')])
def dropdown_2(value, session_id):
call_counts['dropdown_2'].value += 1
return options
self.startServer(app)
self.wait_for_element_by_css_selector('#session-id')
time.sleep(2)
self.assertEqual(call_counts['dropdown_1'].value, 1)
self.assertEqual(call_counts['dropdown_2'].value, 1)
self.assertTrue(self.is_console_clean())
def test_callbacks_triggered_on_generated_output(self):
app = dash.Dash()
app.config['suppress_callback_exceptions'] = True
call_counts = {
'tab1': Value('i', 0),
'tab2': Value('i', 0)
}
app.layout = html.Div([
dcc.Dropdown(
id='outer-controls',
options=[{'label': i, 'value': i} for i in ['a', 'b']],
value='a'
),
dcc.RadioItems(
options=[
{'label': 'Tab 1', 'value': 1},
{'label': 'Tab 2', 'value': 2}
],
value=1,
id='tabs',
),
html.Div(id='tab-output')
])
@app.callback(Output('tab-output', 'children'),
[Input('tabs', 'value')])
def display_content(value):
return html.Div([
html.Div(id='tab-{}-output'.format(value))
])
@app.callback(Output('tab-1-output', 'children'),
[Input('outer-controls', 'value')])
def display_tab1_output(value):
call_counts['tab1'].value += 1
return 'Selected "{}" in tab 1'.format(value)
@app.callback(Output('tab-2-output', 'children'),
[Input('outer-controls', 'value')])
def display_tab2_output(value):
call_counts['tab2'].value += 1
return 'Selected "{}" in tab 2'.format(value)
self.startServer(app)
self.wait_for_element_by_css_selector('#tab-output')
time.sleep(2)
self.assertEqual(call_counts['tab1'].value, 1)
self.assertEqual(call_counts['tab2'].value, 0)
self.wait_for_text_to_equal('#tab-output', 'Selected "a" in tab 1')
self.wait_for_text_to_equal('#tab-1-output', 'Selected "a" in tab 1')
(self.driver.find_elements_by_css_selector(
'input[type="radio"]'
)[1]).click()
time.sleep(2)
self.wait_for_text_to_equal('#tab-output', 'Selected "a" in tab 2')
self.wait_for_text_to_equal('#tab-2-output', 'Selected "a" in tab 2')
self.assertEqual(call_counts['tab1'].value, 1)
self.assertEqual(call_counts['tab2'].value, 1)
self.assertTrue(self.is_console_clean())
def test_initialization_with_overlapping_outputs(self):
app = dash.Dash()
app.layout = html.Div([
html.Div(id='input-1', children='input-1'),
html.Div(id='input-2', children='input-2'),
html.Div(id='input-3', children='input-3'),
html.Div(id='input-4', children='input-4'),
html.Div(id='input-5', children='input-5'),
html.Div(id='output-1'),
html.Div(id='output-2'),
html.Div(id='output-3'),
html.Div(id='output-4'),
])
call_counts = {
'output-1': Value('i', 0),
'output-2': Value('i', 0),
'output-3': Value('i', 0),
'output-4': Value('i', 0),
}
def generate_callback(outputid):
def callback(*args):
call_counts[outputid].value += 1
return '{}, {}'.format(*args)
return callback
for i in range(1, 5):
outputid = 'output-{}'.format(i)
app.callback(
Output(outputid, 'children'),
[
Input('input-{}'.format(i), 'children'),
Input('input-{}'.format(i + 1), 'children')
]
)(generate_callback(outputid))
self.startServer(app)
self.wait_for_element_by_css_selector('#output-1')
time.sleep(5)
for i in range(1, 5):
outputid = 'output-{}'.format(i)
self.assertEqual(call_counts[outputid].value, 1)
self.wait_for_text_to_equal(
'#{}'.format(outputid),
"input-{}, input-{}".format(i, i + 1)
)
def test_generate_overlapping_outputs(self):
app = dash.Dash()
app.config['suppress_callback_exceptions'] = True
block = html.Div([
html.Div(id='input-1', children='input-1'),
html.Div(id='input-2', children='input-2'),
html.Div(id='input-3', children='input-3'),
html.Div(id='input-4', children='input-4'),
html.Div(id='input-5', children='input-5'),
html.Div(id='output-1'),
html.Div(id='output-2'),
html.Div(id='output-3'),
html.Div(id='output-4'),
])
app.layout = html.Div([
html.Div(id='input'),
html.Div(id='container')
])
call_counts = {
'container': Value('i', 0),
'output-1': Value('i', 0),
'output-2': Value('i', 0),
'output-3': Value('i', 0),
'output-4': Value('i', 0),
}
@app.callback(Output('container', 'children'),
[Input('input', 'children')])
def display_output(*args):
call_counts['container'].value += 1
return block
def generate_callback(outputid):
def callback(*args):
call_counts[outputid].value += 1
return '{}, {}'.format(*args)
return callback
for i in range(1, 5):
outputid = 'output-{}'.format(i)
app.callback(
Output(outputid, 'children'),
[Input('input-{}'.format(i), 'children'),
Input('input-{}'.format(i + 1), 'children')]
)(generate_callback(outputid))
self.startServer(app)
wait_for(lambda: call_counts['container'].value == 1)
self.wait_for_element_by_css_selector('#output-1')
time.sleep(5)
for i in range(1, 5):
outputid = 'output-{}'.format(i)
self.assertEqual(call_counts[outputid].value, 1)
self.wait_for_text_to_equal(
'#{}'.format(outputid),
"input-{}, input-{}".format(i, i + 1)
)
self.assertEqual(call_counts['container'].value, 1)
def test_multiple_properties_update_at_same_time_on_same_component(self):
call_count = Value('i', 0)
timestamp_1 = Value('d', -5)
timestamp_2 = Value('d', -5)
app = dash.Dash()
app.layout = html.Div([
html.Div(id='container'),
html.Button('Click', id='button-1', n_clicks=0, n_clicks_timestamp=-1),
html.Button('Click', id='button-2', n_clicks=0, n_clicks_timestamp=-1)
])
@app.callback(
Output('container', 'children'),
[Input('button-1', 'n_clicks'),
Input('button-1', 'n_clicks_timestamp'),
Input('button-2', 'n_clicks'),
Input('button-2', 'n_clicks_timestamp')])
def update_output(*args):
call_count.value += 1
timestamp_1.value = args[1]
timestamp_2.value = args[3]
return '{}, {}'.format(args[0], args[2])
self.startServer(app)
self.wait_for_element_by_css_selector('#container')
time.sleep(2)
self.wait_for_text_to_equal('#container', '0, 0')
self.assertEqual(timestamp_1.value, -1)
self.assertEqual(timestamp_2.value, -1)
self.assertEqual(call_count.value, 1)
self.percy_snapshot('button initialization 1')
self.driver.find_element_by_css_selector('#button-1').click()
time.sleep(2)
self.wait_for_text_to_equal('#container', '1, 0')
self.assertTrue(
timestamp_1.value >
((time.time() - (24 * 60 * 60)) * 1000))
self.assertEqual(timestamp_2.value, -1)
self.assertEqual(call_count.value, 2)
self.percy_snapshot('button-1 click')
prev_timestamp_1 = timestamp_1.value
self.driver.find_element_by_css_selector('#button-2').click()
time.sleep(2)
self.wait_for_text_to_equal('#container', '1, 1')
self.assertEqual(timestamp_1.value, prev_timestamp_1)
self.assertTrue(
timestamp_2.value >
((time.time() - 24 * 60 * 60) * 1000))
self.assertEqual(call_count.value, 3)
self.percy_snapshot('button-2 click')
prev_timestamp_2 = timestamp_2.value
self.driver.find_element_by_css_selector('#button-2').click()
time.sleep(2)
self.wait_for_text_to_equal('#container', '1, 2')
self.assertEqual(timestamp_1.value, prev_timestamp_1)
self.assertTrue(
timestamp_2.value >
prev_timestamp_2)
self.assertTrue(timestamp_2.value > timestamp_1.value)
self.assertEqual(call_count.value, 4)
self.percy_snapshot('button-2 click again')
def test_request_hooks(self):
app = Dash(__name__)
app.index_string = '''<!DOCTYPE html>
<html>
<head>
{%metas%}
<title>{%title%}</title>
{%favicon%}
{%css%}
</head>
<body>
<div>Testing custom DashRenderer</div>
{%app_entry%}
<footer>
{%config%}
{%scripts%}
<script id="_dash-renderer" type"application/json">
const renderer = new DashRenderer({
request_pre: (payload) => {
var output = document.getElementById('output-pre')
var outputPayload = document.getElementById('output-pre-payload')
if(output) {
output.innerHTML = 'request_pre changed this text!';
}
if(outputPayload) {
outputPayload.innerHTML = JSON.stringify(payload);
}
},
request_post: (payload, response) => {
var output = document.getElementById('output-post')
var outputPayload = document.getElementById('output-post-payload')
var outputResponse = document.getElementById('output-post-response')
if(output) {
output.innerHTML = 'request_post changed this text!';
}
if(outputPayload) {
outputPayload.innerHTML = JSON.stringify(payload);
}
if(outputResponse) {
outputResponse.innerHTML = JSON.stringify(response);
}
}
})
</script>
</footer>
<div>With request hooks</div>
</body>
</html>'''
app.layout = html.Div([
dcc.Input(
id='input',
value='initial value'
),
html.Div(
html.Div([
html.Div(id='output-1'),
html.Div(id='output-pre'),
html.Div(id='output-pre-payload'),
html.Div(id='output-post'),
html.Div(id='output-post-payload'),
html.Div(id='output-post-response')
])
)
])
@app.callback(Output('output-1', 'children'), [Input('input', 'value')])
def update_output(value):
return value
self.startServer(app)
input1 = self.wait_for_element_by_css_selector('#input')
initialValue = input1.get_attribute('value')
action = ActionChains(self.driver)
action.click(input1)
action = action.send_keys(Keys.BACKSPACE * len(initialValue))
action.send_keys('fire request hooks').perform()
self.wait_for_text_to_equal('#output-1', 'fire request hooks')
self.wait_for_text_to_equal('#output-pre', 'request_pre changed this text!')
self.wait_for_text_to_equal('#output-pre-payload', '{"output":"output-1.children","changedPropIds":["input.value"],"inputs":[{"id":"input","property":"value","value":"fire request hooks"}]}')
self.wait_for_text_to_equal('#output-post', 'request_post changed this text!')
self.wait_for_text_to_equal('#output-post-payload', '{"output":"output-1.children","changedPropIds":["input.value"],"inputs":[{"id":"input","property":"value","value":"fire request hooks"}]}')
self.wait_for_text_to_equal('#output-post-response', '{"props":{"children":"fire request hooks"}}')
self.percy_snapshot(name='request-hooks render')
def test_graphs_in_tabs_do_not_share_state(self):
app = dash.Dash()
app.config.suppress_callback_exceptions = True
app.layout = html.Div([
dcc.Tabs(
id="tabs",
children=[
dcc.Tab(label="Tab 1", value="tab1", id="tab1"),
dcc.Tab(label="Tab 2", value="tab2", id="tab2"),
],
value="tab1",
),
# Tab content
html.Div(id="tab_content"),
])
tab1_layout = [
html.Div([dcc.Graph(id='graph1',
figure={
'data': [{
'x': [1, 2, 3],
'y': [5, 10, 6],
'type': 'bar'
}]
})]),
html.Pre(id='graph1_info'),
]
tab2_layout = [
html.Div([dcc.Graph(id='graph2',
figure={
'data': [{
'x': [4, 3, 2],
'y': [5, 10, 6],
'type': 'bar'
}]
})]),
html.Pre(id='graph2_info'),
]
@app.callback(
Output(component_id='graph1_info', component_property='children'),
[Input(component_id='graph1', component_property='clickData')])
def display_hover_data(hover_data):
return json.dumps(hover_data)
@app.callback(
Output(component_id='graph2_info', component_property='children'),
[Input(component_id='graph2', component_property='clickData')])
def display_hover_data_2(hover_data):
return json.dumps(hover_data)
@app.callback(Output("tab_content", "children"), [Input("tabs", "value")])
def render_content(tab):
if tab == "tab1":
return tab1_layout
elif tab == "tab2":
return tab2_layout
else:
return tab1_layout
self.startServer(app)
self.wait_for_element_by_css_selector('#graph1')
self.driver.find_elements_by_css_selector(
'#graph1'
)[0].click()
graph_1_expected_clickdata = {
"points": [{"curveNumber": 0, "pointNumber": 1, "pointIndex": 1, "x": 2, "y": 10, "label": 2, "value": 10}]
}
graph_2_expected_clickdata = {
"points": [{"curveNumber": 0, "pointNumber": 1, "pointIndex": 1, "x": 3, "y": 10, "label": 3, "value": 10}]
}
self.wait_for_text_to_equal('#graph1_info', json.dumps(graph_1_expected_clickdata))
self.driver.find_elements_by_css_selector(
'#tab2'
)[0].click()
self.wait_for_element_by_css_selector('#graph2')
self.driver.find_elements_by_css_selector(
'#graph2'
)[0].click()
self.wait_for_text_to_equal('#graph2_info', json.dumps(graph_2_expected_clickdata))
|
the-stack_106_28388 | # -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:40 ms, 在所有 Python3 提交中击败了80.24% 的用户
内存消耗:13.5 MB, 在所有 Python3 提交中击败了97.09% 的用户
解题思路:
使用双指针分别指向两字符串
具体实现见代码注释
"""
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
if needle == '':
return 0
len_i, len_j = len(haystack), len(needle)
record = -1 # 初始化为-1, 用于记录字符相等时的下标
i, j = 0, 0 # 双指针
while i < len_i: # 循环遍历字符串haystack
if haystack[i] == needle[j]: # 若haystack当前下标i指向的字符与needle字符串当前下标j所指向的字符相等
record = i # record记录当前i下标
j += 1 # 后移needle字符串下标
else:
i = i-j # 否则,将i下标移动到之前开始匹配的后一项
j = 0 # j指针指向needle第一个字符
record = -1 # record归-1
if j > len_j-1: # 如needle字符串遍历完毕,则跳出循环
break
else:
record = -1
i += 1
if record > 0: # 匹配成功时,record指向匹配字符串的末尾,需要移动到匹配字符串的开始
record -= len_j-1
return record |
the-stack_106_28389 | # @ ConfigEditor.py
#
# Copyright(c) 2018 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import sys
import marshal
import tkinter
import tkinter.ttk as ttk
import tkinter.messagebox as messagebox
import tkinter.filedialog as filedialog
from pathlib import Path
from GenYamlCfg import CGenYamlCfg, bytes_to_value, \
bytes_to_bracket_str, value_to_bytes, array_str_to_value
from ctypes import sizeof, Structure, ARRAY, c_uint8, c_uint64, c_char, \
c_uint32, c_uint16
from functools import reduce
sys.path.insert(0, '..')
from FspDscBsf2Yaml import bsf_to_dsc, dsc_to_yaml # noqa
sys.dont_write_bytecode = True
class create_tool_tip(object):
'''
create a tooltip for a given widget
'''
in_progress = False
def __init__(self, widget, text=''):
self.top_win = None
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
def enter(self, event=None):
if self.in_progress:
return
if self.widget.winfo_class() == 'Treeview':
# Only show help when cursor is on row header.
rowid = self.widget.identify_row(event.y)
if rowid != '':
return
else:
x, y, cx, cy = self.widget.bbox("insert")
cursor = self.widget.winfo_pointerxy()
x = self.widget.winfo_rootx() + 35
y = self.widget.winfo_rooty() + 20
if cursor[1] > y and cursor[1] < y + 20:
y += 20
# creates a toplevel window
self.top_win = tkinter.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.top_win.wm_overrideredirect(True)
self.top_win.wm_geometry("+%d+%d" % (x, y))
label = tkinter.Message(self.top_win,
text=self.text,
justify='left',
background='bisque',
relief='solid',
borderwidth=1,
font=("times", "10", "normal"))
label.pack(ipadx=1)
self.in_progress = True
def leave(self, event=None):
if self.top_win:
self.top_win.destroy()
self.in_progress = False
class validating_entry(tkinter.Entry):
def __init__(self, master, **kw):
tkinter.Entry.__init__(*(self, master), **kw)
self.parent = master
self.old_value = ''
self.last_value = ''
self.variable = tkinter.StringVar()
self.variable.trace("w", self.callback)
self.config(textvariable=self.variable)
self.config({"background": "#c0c0c0"})
self.bind("<Return>", self.move_next)
self.bind("<Tab>", self.move_next)
self.bind("<Escape>", self.cancel)
for each in ['BackSpace', 'Delete']:
self.bind("<%s>" % each, self.ignore)
self.display(None)
def ignore(self, even):
return "break"
def move_next(self, event):
if self.row < 0:
return
row, col = self.row, self.col
txt, row_id, col_id = self.parent.get_next_cell(row, col)
self.display(txt, row_id, col_id)
return "break"
def cancel(self, event):
self.variable.set(self.old_value)
self.display(None)
def display(self, txt, row_id='', col_id=''):
if txt is None:
self.row = -1
self.col = -1
self.place_forget()
else:
row = int('0x' + row_id[1:], 0) - 1
col = int(col_id[1:]) - 1
self.row = row
self.col = col
self.old_value = txt
self.last_value = txt
x, y, width, height = self.parent.bbox(row_id, col)
self.place(x=x, y=y, w=width)
self.variable.set(txt)
self.focus_set()
self.icursor(0)
def callback(self, *Args):
cur_val = self.variable.get()
new_val = self.validate(cur_val)
if new_val is not None and self.row >= 0:
self.last_value = new_val
self.parent.set_cell(self.row, self.col, new_val)
self.variable.set(self.last_value)
def validate(self, value):
if len(value) > 0:
try:
int(value, 16)
except Exception:
return None
# Normalize the cell format
self.update()
cell_width = self.winfo_width()
max_len = custom_table.to_byte_length(cell_width) * 2
cur_pos = self.index("insert")
if cur_pos == max_len + 1:
value = value[-max_len:]
else:
value = value[:max_len]
if value == '':
value = '0'
fmt = '%%0%dX' % max_len
return fmt % int(value, 16)
class custom_table(ttk.Treeview):
_Padding = 20
_Char_width = 6
def __init__(self, parent, col_hdr, bins):
cols = len(col_hdr)
col_byte_len = []
for col in range(cols): # Columns
col_byte_len.append(int(col_hdr[col].split(':')[1]))
byte_len = sum(col_byte_len)
rows = (len(bins) + byte_len - 1) // byte_len
self.rows = rows
self.cols = cols
self.col_byte_len = col_byte_len
self.col_hdr = col_hdr
self.size = len(bins)
self.last_dir = ''
style = ttk.Style()
style.configure("Custom.Treeview.Heading",
font=('calibri', 10, 'bold'),
foreground="blue")
ttk.Treeview.__init__(self, parent, height=rows,
columns=[''] + col_hdr, show='headings',
style="Custom.Treeview",
selectmode='none')
self.bind("<Button-1>", self.click)
self.bind("<FocusOut>", self.focus_out)
self.entry = validating_entry(self, width=4, justify=tkinter.CENTER)
self.heading(0, text='LOAD')
self.column(0, width=60, stretch=0, anchor=tkinter.CENTER)
for col in range(cols): # Columns
text = col_hdr[col].split(':')[0]
byte_len = int(col_hdr[col].split(':')[1])
self.heading(col+1, text=text)
self.column(col+1, width=self.to_cell_width(byte_len),
stretch=0, anchor=tkinter.CENTER)
idx = 0
for row in range(rows): # Rows
text = '%04X' % (row * len(col_hdr))
vals = ['%04X:' % (cols * row)]
for col in range(cols): # Columns
if idx >= len(bins):
break
byte_len = int(col_hdr[col].split(':')[1])
value = bytes_to_value(bins[idx:idx+byte_len])
hex = ("%%0%dX" % (byte_len * 2)) % value
vals.append(hex)
idx += byte_len
self.insert('', 'end', values=tuple(vals))
if idx >= len(bins):
break
@staticmethod
def to_cell_width(byte_len):
return byte_len * 2 * custom_table._Char_width + custom_table._Padding
@staticmethod
def to_byte_length(cell_width):
return(cell_width - custom_table._Padding) \
// (2 * custom_table._Char_width)
def focus_out(self, event):
self.entry.display(None)
def refresh_bin(self, bins):
if not bins:
return
# Reload binary into widget
bin_len = len(bins)
for row in range(self.rows):
iid = self.get_children()[row]
for col in range(self.cols):
idx = row * sum(self.col_byte_len) + \
sum(self.col_byte_len[:col])
byte_len = self.col_byte_len[col]
if idx + byte_len <= self.size:
byte_len = int(self.col_hdr[col].split(':')[1])
if idx + byte_len > bin_len:
val = 0
else:
val = bytes_to_value(bins[idx:idx+byte_len])
hex_val = ("%%0%dX" % (byte_len * 2)) % val
self.set(iid, col + 1, hex_val)
def get_cell(self, row, col):
iid = self.get_children()[row]
txt = self.item(iid, 'values')[col]
return txt
def get_next_cell(self, row, col):
rows = self.get_children()
col += 1
if col > self.cols:
col = 1
row += 1
cnt = row * sum(self.col_byte_len) + sum(self.col_byte_len[:col])
if cnt > self.size:
# Reached the last cell, so roll back to beginning
row = 0
col = 1
txt = self.get_cell(row, col)
row_id = rows[row]
col_id = '#%d' % (col + 1)
return(txt, row_id, col_id)
def set_cell(self, row, col, val):
iid = self.get_children()[row]
self.set(iid, col, val)
def load_bin(self):
# Load binary from file
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load binary file",
filetypes=(("Binary files", "*.bin"), (
"binary files", "*.bin")))
if path:
self.last_dir = os.path.dirname(path)
fd = open(path, 'rb')
bins = bytearray(fd.read())[:self.size]
fd.close()
bins.extend(b'\x00' * (self.size - len(bins)))
return bins
return None
def click(self, event):
row_id = self.identify_row(event.y)
col_id = self.identify_column(event.x)
if row_id == '' and col_id == '#1':
# Clicked on "LOAD" cell
bins = self.load_bin()
self.refresh_bin(bins)
return
if col_id == '#1':
# Clicked on column 1(Offset column)
return
item = self.identify('item', event.x, event.y)
if not item or not col_id:
# Not clicked on valid cell
return
# Clicked cell
row = int('0x' + row_id[1:], 0) - 1
col = int(col_id[1:]) - 1
if row * self.cols + col > self.size:
return
vals = self.item(item, 'values')
if col < len(vals):
txt = self.item(item, 'values')[col]
self.entry.display(txt, row_id, col_id)
def get(self):
bins = bytearray()
row_ids = self.get_children()
for row_id in row_ids:
row = int('0x' + row_id[1:], 0) - 1
for col in range(self.cols):
idx = row * sum(self.col_byte_len) + \
sum(self.col_byte_len[:col])
byte_len = self.col_byte_len[col]
if idx + byte_len > self.size:
break
hex = self.item(row_id, 'values')[col + 1]
values = value_to_bytes(int(hex, 16)
& ((1 << byte_len * 8) - 1), byte_len)
bins.extend(values)
return bins
class c_uint24(Structure):
"""Little-Endian 24-bit Unsigned Integer"""
_pack_ = 1
_fields_ = [('Data', (c_uint8 * 3))]
def __init__(self, val=0):
self.set_value(val)
def __str__(self, indent=0):
return '0x%.6x' % self.value
def __int__(self):
return self.get_value()
def set_value(self, val):
self.Data[0:3] = Val2Bytes(val, 3)
def get_value(self):
return Bytes2Val(self.Data[0:3])
value = property(get_value, set_value)
class EFI_FIRMWARE_VOLUME_HEADER(Structure):
_fields_ = [
('ZeroVector', ARRAY(c_uint8, 16)),
('FileSystemGuid', ARRAY(c_uint8, 16)),
('FvLength', c_uint64),
('Signature', ARRAY(c_char, 4)),
('Attributes', c_uint32),
('HeaderLength', c_uint16),
('Checksum', c_uint16),
('ExtHeaderOffset', c_uint16),
('Reserved', c_uint8),
('Revision', c_uint8)
]
class EFI_FIRMWARE_VOLUME_EXT_HEADER(Structure):
_fields_ = [
('FvName', ARRAY(c_uint8, 16)),
('ExtHeaderSize', c_uint32)
]
class EFI_FFS_INTEGRITY_CHECK(Structure):
_fields_ = [
('Header', c_uint8),
('File', c_uint8)
]
class EFI_FFS_FILE_HEADER(Structure):
_fields_ = [
('Name', ARRAY(c_uint8, 16)),
('IntegrityCheck', EFI_FFS_INTEGRITY_CHECK),
('Type', c_uint8),
('Attributes', c_uint8),
('Size', c_uint24),
('State', c_uint8)
]
class EFI_COMMON_SECTION_HEADER(Structure):
_fields_ = [
('Size', c_uint24),
('Type', c_uint8)
]
class EFI_SECTION_TYPE:
"""Enumeration of all valid firmware file section types."""
ALL = 0x00
COMPRESSION = 0x01
GUID_DEFINED = 0x02
DISPOSABLE = 0x03
PE32 = 0x10
PIC = 0x11
TE = 0x12
DXE_DEPEX = 0x13
VERSION = 0x14
USER_INTERFACE = 0x15
COMPATIBILITY16 = 0x16
FIRMWARE_VOLUME_IMAGE = 0x17
FREEFORM_SUBTYPE_GUID = 0x18
RAW = 0x19
PEI_DEPEX = 0x1b
SMM_DEPEX = 0x1c
class FSP_COMMON_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint32)
]
class FSP_INFORMATION_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint32),
('Reserved1', c_uint16),
('SpecVersion', c_uint8),
('HeaderRevision', c_uint8),
('ImageRevision', c_uint32),
('ImageId', ARRAY(c_char, 8)),
('ImageSize', c_uint32),
('ImageBase', c_uint32),
('ImageAttribute', c_uint16),
('ComponentAttribute', c_uint16),
('CfgRegionOffset', c_uint32),
('CfgRegionSize', c_uint32),
('Reserved2', c_uint32),
('TempRamInitEntryOffset', c_uint32),
('Reserved3', c_uint32),
('NotifyPhaseEntryOffset', c_uint32),
('FspMemoryInitEntryOffset', c_uint32),
('TempRamExitEntryOffset', c_uint32),
('FspSiliconInitEntryOffset', c_uint32)
]
class FSP_EXTENDED_HEADER(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint32),
('Revision', c_uint8),
('Reserved', c_uint8),
('FspProducerId', ARRAY(c_char, 6)),
('FspProducerRevision', c_uint32),
('FspProducerDataSize', c_uint32)
]
class FSP_PATCH_TABLE(Structure):
_fields_ = [
('Signature', ARRAY(c_char, 4)),
('HeaderLength', c_uint16),
('HeaderRevision', c_uint8),
('Reserved', c_uint8),
('PatchEntryNum', c_uint32)
]
class Section:
def __init__(self, offset, secdata):
self.SecHdr = EFI_COMMON_SECTION_HEADER.from_buffer(secdata, 0)
self.SecData = secdata[0:int(self.SecHdr.Size)]
self.Offset = offset
def AlignPtr(offset, alignment=8):
return (offset + alignment - 1) & ~(alignment - 1)
def Bytes2Val(bytes):
return reduce(lambda x, y: (x << 8) | y, bytes[:: -1])
def Val2Bytes(value, blen):
return [(value >> (i*8) & 0xff) for i in range(blen)]
class FirmwareFile:
def __init__(self, offset, filedata):
self.FfsHdr = EFI_FFS_FILE_HEADER.from_buffer(filedata, 0)
self.FfsData = filedata[0:int(self.FfsHdr.Size)]
self.Offset = offset
self.SecList = []
def ParseFfs(self):
ffssize = len(self.FfsData)
offset = sizeof(self.FfsHdr)
if self.FfsHdr.Name != '\xff' * 16:
while offset < (ffssize - sizeof(EFI_COMMON_SECTION_HEADER)):
sechdr = EFI_COMMON_SECTION_HEADER.from_buffer(
self.FfsData, offset)
sec = Section(
offset, self.FfsData[offset:offset + int(sechdr.Size)])
self.SecList.append(sec)
offset += int(sechdr.Size)
offset = AlignPtr(offset, 4)
class FirmwareVolume:
def __init__(self, offset, fvdata):
self.FvHdr = EFI_FIRMWARE_VOLUME_HEADER.from_buffer(fvdata, 0)
self.FvData = fvdata[0: self.FvHdr.FvLength]
self.Offset = offset
if self.FvHdr.ExtHeaderOffset > 0:
self.FvExtHdr = EFI_FIRMWARE_VOLUME_EXT_HEADER.from_buffer(
self.FvData, self.FvHdr.ExtHeaderOffset)
else:
self.FvExtHdr = None
self.FfsList = []
def ParseFv(self):
fvsize = len(self.FvData)
if self.FvExtHdr:
offset = self.FvHdr.ExtHeaderOffset + self.FvExtHdr.ExtHeaderSize
else:
offset = self.FvHdr.HeaderLength
offset = AlignPtr(offset)
while offset < (fvsize - sizeof(EFI_FFS_FILE_HEADER)):
ffshdr = EFI_FFS_FILE_HEADER.from_buffer(self.FvData, offset)
if (ffshdr.Name == '\xff' * 16) and \
(int(ffshdr.Size) == 0xFFFFFF):
offset = fvsize
else:
ffs = FirmwareFile(
offset, self.FvData[offset:offset + int(ffshdr.Size)])
ffs.ParseFfs()
self.FfsList.append(ffs)
offset += int(ffshdr.Size)
offset = AlignPtr(offset)
class FspImage:
def __init__(self, offset, fih, fihoff, patch):
self.Fih = fih
self.FihOffset = fihoff
self.Offset = offset
self.FvIdxList = []
self.Type = "XTMSXXXXOXXXXXXX"[(fih.ComponentAttribute >> 12) & 0x0F]
self.PatchList = patch
self.PatchList.append(fihoff + 0x1C)
def AppendFv(self, FvIdx):
self.FvIdxList.append(FvIdx)
def Patch(self, delta, fdbin):
count = 0
applied = 0
for idx, patch in enumerate(self.PatchList):
ptype = (patch >> 24) & 0x0F
if ptype not in [0x00, 0x0F]:
raise Exception('ERROR: Invalid patch type %d !' % ptype)
if patch & 0x80000000:
patch = self.Fih.ImageSize - (0x1000000 - (patch & 0xFFFFFF))
else:
patch = patch & 0xFFFFFF
if (patch < self.Fih.ImageSize) and \
(patch + sizeof(c_uint32) <= self.Fih.ImageSize):
offset = patch + self.Offset
value = Bytes2Val(fdbin[offset:offset+sizeof(c_uint32)])
value += delta
fdbin[offset:offset+sizeof(c_uint32)] = Val2Bytes(
value, sizeof(c_uint32))
applied += 1
count += 1
# Don't count the FSP base address patch entry appended at the end
if count != 0:
count -= 1
applied -= 1
return (count, applied)
class FirmwareDevice:
def __init__(self, offset, FdData):
self.FvList = []
self.FspList = []
self.FspExtList = []
self.FihList = []
self.BuildList = []
self.OutputText = ""
self.Offset = 0
self.FdData = FdData
def ParseFd(self):
offset = 0
fdsize = len(self.FdData)
self.FvList = []
while offset < (fdsize - sizeof(EFI_FIRMWARE_VOLUME_HEADER)):
fvh = EFI_FIRMWARE_VOLUME_HEADER.from_buffer(self.FdData, offset)
if b'_FVH' != fvh.Signature:
raise Exception("ERROR: Invalid FV header !")
fv = FirmwareVolume(
offset, self.FdData[offset:offset + fvh.FvLength])
fv.ParseFv()
self.FvList.append(fv)
offset += fv.FvHdr.FvLength
def CheckFsp(self):
if len(self.FspList) == 0:
return
fih = None
for fsp in self.FspList:
if not fih:
fih = fsp.Fih
else:
newfih = fsp.Fih
if (newfih.ImageId != fih.ImageId) or \
(newfih.ImageRevision != fih.ImageRevision):
raise Exception(
"ERROR: Inconsistent FSP ImageId or "
"ImageRevision detected !")
def ParseFsp(self):
flen = 0
for idx, fv in enumerate(self.FvList):
# Check if this FV contains FSP header
if flen == 0:
if len(fv.FfsList) == 0:
continue
ffs = fv.FfsList[0]
if len(ffs.SecList) == 0:
continue
sec = ffs.SecList[0]
if sec.SecHdr.Type != EFI_SECTION_TYPE.RAW:
continue
fihoffset = ffs.Offset + sec.Offset + sizeof(sec.SecHdr)
fspoffset = fv.Offset
offset = fspoffset + fihoffset
fih = FSP_INFORMATION_HEADER.from_buffer(self.FdData, offset)
self.FihList.append(fih)
if b'FSPH' != fih.Signature:
continue
offset += fih.HeaderLength
offset = AlignPtr(offset, 2)
Extfih = FSP_EXTENDED_HEADER.from_buffer(self.FdData, offset)
self.FspExtList.append(Extfih)
offset = AlignPtr(offset, 4)
plist = []
while True:
fch = FSP_COMMON_HEADER.from_buffer(self.FdData, offset)
if b'FSPP' != fch.Signature:
offset += fch.HeaderLength
offset = AlignPtr(offset, 4)
else:
fspp = FSP_PATCH_TABLE.from_buffer(
self.FdData, offset)
offset += sizeof(fspp)
start_offset = offset + 32
end_offset = offset + 32
while True:
end_offset += 1
if(self.FdData[
end_offset: end_offset + 1] == b'\xff'):
break
self.BuildList.append(
self.FdData[start_offset:end_offset])
pdata = (c_uint32 * fspp.PatchEntryNum).from_buffer(
self.FdData, offset)
plist = list(pdata)
break
fsp = FspImage(fspoffset, fih, fihoffset, plist)
fsp.AppendFv(idx)
self.FspList.append(fsp)
flen = fsp.Fih.ImageSize - fv.FvHdr.FvLength
else:
fsp.AppendFv(idx)
flen -= fv.FvHdr.FvLength
if flen < 0:
raise Exception("ERROR: Incorrect FV size in image !")
self.CheckFsp()
def OutputFsp(self):
def copy_text_to_clipboard():
window.clipboard_clear()
window.clipboard_append(self.OutputText)
window = tkinter.Tk()
window.title("Fsp Headers")
window.resizable(0, 0)
# Window Size
window.geometry("300x400+350+150")
frame = tkinter.Frame(window)
frame.pack(side=tkinter.BOTTOM)
# Vertical (y) Scroll Bar
scroll = tkinter.Scrollbar(window)
scroll.pack(side=tkinter.RIGHT, fill=tkinter.Y)
text = tkinter.Text(window,
wrap=tkinter.NONE, yscrollcommand=scroll.set)
i = 0
self.OutputText = self.OutputText + "Fsp Header Details \n\n"
while i < len(self.FihList):
try:
self.OutputText += str(self.BuildList[i].decode()) + "\n"
except Exception:
self.OutputText += "No description found\n"
self.OutputText += "FSP Header :\n "
self.OutputText += "Signature : " + \
str(self.FihList[i].Signature.decode('utf-8')) + "\n "
self.OutputText += "Header Length : " + \
str(hex(self.FihList[i].HeaderLength)) + "\n "
self.OutputText += "Header Revision : " + \
str(hex(self.FihList[i].HeaderRevision)) + "\n "
self.OutputText += "Spec Version : " + \
str(hex(self.FihList[i].SpecVersion)) + "\n "
self.OutputText += "Image Revision : " + \
str(hex(self.FihList[i].ImageRevision)) + "\n "
self.OutputText += "Image Id : " + \
str(self.FihList[i].ImageId.decode('utf-8')) + "\n "
self.OutputText += "Image Size : " + \
str(hex(self.FihList[i].ImageSize)) + "\n "
self.OutputText += "Image Base : " + \
str(hex(self.FihList[i].ImageBase)) + "\n "
self.OutputText += "Image Attribute : " + \
str(hex(self.FihList[i].ImageAttribute)) + "\n "
self.OutputText += "Cfg Region Offset : " + \
str(hex(self.FihList[i].CfgRegionOffset)) + "\n "
self.OutputText += "Cfg Region Size : " + \
str(hex(self.FihList[i].CfgRegionSize)) + "\n "
self.OutputText += "API Entry Num : " + \
str(hex(self.FihList[i].Reserved2)) + "\n "
self.OutputText += "Temp Ram Init Entry : " + \
str(hex(self.FihList[i].TempRamInitEntryOffset)) + "\n "
self.OutputText += "FSP Init Entry : " + \
str(hex(self.FihList[i].Reserved3)) + "\n "
self.OutputText += "Notify Phase Entry : " + \
str(hex(self.FihList[i].NotifyPhaseEntryOffset)) + "\n "
self.OutputText += "Fsp Memory Init Entry : " + \
str(hex(self.FihList[i].FspMemoryInitEntryOffset)) + "\n "
self.OutputText += "Temp Ram Exit Entry : " + \
str(hex(self.FihList[i].TempRamExitEntryOffset)) + "\n "
self.OutputText += "Fsp Silicon Init Entry : " + \
str(hex(self.FihList[i].FspSiliconInitEntryOffset)) + "\n\n"
self.OutputText += "FSP Extended Header:\n "
self.OutputText += "Signature : " + \
str(self.FspExtList[i].Signature.decode('utf-8')) + "\n "
self.OutputText += "Header Length : " + \
str(hex(self.FspExtList[i].HeaderLength)) + "\n "
self.OutputText += "Header Revision : " + \
str(hex(self.FspExtList[i].Revision)) + "\n "
self.OutputText += "Fsp Producer Id : " + \
str(self.FspExtList[i].FspProducerId.decode('utf-8')) + "\n "
self.OutputText += "FspProducerRevision : " + \
str(hex(self.FspExtList[i].FspProducerRevision)) + "\n\n"
i += 1
text.insert(tkinter.INSERT, self.OutputText)
text.pack()
# Configure the scrollbars
scroll.config(command=text.yview)
copy_button = tkinter.Button(
window, text="Copy to Clipboard", command=copy_text_to_clipboard)
copy_button.pack(in_=frame, side=tkinter.LEFT, padx=20, pady=10)
exit_button = tkinter.Button(
window, text="Close", command=window.destroy)
exit_button.pack(in_=frame, side=tkinter.RIGHT, padx=20, pady=10)
window.mainloop()
class state:
def __init__(self):
self.state = False
def set(self, value):
self.state = value
def get(self):
return self.state
class application(tkinter.Frame):
def __init__(self, master=None):
root = master
self.debug = True
self.mode = 'FSP'
self.last_dir = '.'
self.page_id = ''
self.page_list = {}
self.conf_list = {}
self.cfg_page_dict = {}
self.cfg_data_obj = None
self.org_cfg_data_bin = None
self.in_left = state()
self.in_right = state()
self.search_text = ''
# Check if current directory contains a file with a .yaml extension
# if not default self.last_dir to a Platform directory where it is
# easier to locate *BoardPkg\CfgData\*Def.yaml files
self.last_dir = '.'
if not any(fname.endswith('.yaml') for fname in os.listdir('.')):
platform_path = Path(os.path.realpath(__file__)).parents[2].\
joinpath('Platform')
if platform_path.exists():
self.last_dir = platform_path
tkinter.Frame.__init__(self, master, borderwidth=2)
self.menu_string = [
'Save Config Data to Binary', 'Load Config Data from Binary',
'Show Binary Information',
'Load Config Changes from Delta File',
'Save Config Changes to Delta File',
'Save Full Config Data to Delta File',
'Open Config BSF file'
]
root.geometry("1200x800")
# Search string
fram = tkinter.Frame(root)
# adding label to search box
tkinter.Label(fram, text='Text to find:').pack(side=tkinter.LEFT)
# adding of single line text box
self.edit = tkinter.Entry(fram, width=30)
# positioning of text box
self.edit.pack(
side=tkinter.LEFT, fill=tkinter.BOTH, expand=1, padx=(4, 4))
# setting focus
self.edit.focus_set()
# adding of search button
butt = tkinter.Button(fram, text='Search', relief=tkinter.GROOVE,
command=self.search_bar)
butt.pack(side=tkinter.RIGHT, padx=(4, 4))
fram.pack(side=tkinter.TOP, anchor=tkinter.SE)
paned = ttk.Panedwindow(root, orient=tkinter.HORIZONTAL)
paned.pack(fill=tkinter.BOTH, expand=True, padx=(4, 4))
status = tkinter.Label(master, text="", bd=1, relief=tkinter.SUNKEN,
anchor=tkinter.W)
status.pack(side=tkinter.BOTTOM, fill=tkinter.X)
frame_left = ttk.Frame(paned, height=800, relief="groove")
self.left = ttk.Treeview(frame_left, show="tree")
# Set up tree HScroller
pady = (10, 10)
self.tree_scroll = ttk.Scrollbar(frame_left,
orient="vertical",
command=self.left.yview)
self.left.configure(yscrollcommand=self.tree_scroll.set)
self.left.bind("<<TreeviewSelect>>", self.on_config_page_select_change)
self.left.bind("<Enter>", lambda e: self.in_left.set(True))
self.left.bind("<Leave>", lambda e: self.in_left.set(False))
self.left.bind("<MouseWheel>", self.on_tree_scroll)
self.left.pack(side='left',
fill=tkinter.BOTH,
expand=True,
padx=(5, 0),
pady=pady)
self.tree_scroll.pack(side='right', fill=tkinter.Y,
pady=pady, padx=(0, 5))
frame_right = ttk.Frame(paned, relief="groove")
self.frame_right = frame_right
self.conf_canvas = tkinter.Canvas(frame_right, highlightthickness=0)
self.page_scroll = ttk.Scrollbar(frame_right,
orient="vertical",
command=self.conf_canvas.yview)
self.right_grid = ttk.Frame(self.conf_canvas)
self.conf_canvas.configure(yscrollcommand=self.page_scroll.set)
self.conf_canvas.pack(side='left',
fill=tkinter.BOTH,
expand=True,
pady=pady,
padx=(5, 0))
self.page_scroll.pack(side='right', fill=tkinter.Y,
pady=pady, padx=(0, 5))
self.conf_canvas.create_window(0, 0, window=self.right_grid,
anchor='nw')
self.conf_canvas.bind('<Enter>', lambda e: self.in_right.set(True))
self.conf_canvas.bind('<Leave>', lambda e: self.in_right.set(False))
self.conf_canvas.bind("<Configure>", self.on_canvas_configure)
self.conf_canvas.bind_all("<MouseWheel>", self.on_page_scroll)
paned.add(frame_left, weight=2)
paned.add(frame_right, weight=10)
style = ttk.Style()
style.layout("Treeview", [('Treeview.treearea', {'sticky': 'nswe'})])
menubar = tkinter.Menu(root)
file_menu = tkinter.Menu(menubar, tearoff=0)
file_menu.add_command(label="Open Config YAML file",
command=self.load_from_yaml)
file_menu.add_command(label=self.menu_string[6],
command=self.load_from_bsf_file)
file_menu.add_command(label=self.menu_string[2],
command=self.load_from_fd)
file_menu.add_command(label=self.menu_string[0],
command=self.save_to_bin,
state='disabled')
file_menu.add_command(label=self.menu_string[1],
command=self.load_from_bin,
state='disabled')
file_menu.add_command(label=self.menu_string[3],
command=self.load_from_delta,
state='disabled')
file_menu.add_command(label=self.menu_string[4],
command=self.save_to_delta,
state='disabled')
file_menu.add_command(label=self.menu_string[5],
command=self.save_full_to_delta,
state='disabled')
file_menu.add_command(label="About", command=self.about)
menubar.add_cascade(label="File", menu=file_menu)
self.file_menu = file_menu
root.config(menu=menubar)
if len(sys.argv) > 1:
path = sys.argv[1]
if not path.endswith('.yaml') and not path.endswith('.pkl'):
messagebox.showerror('LOADING ERROR',
"Unsupported file '%s' !" % path)
return
else:
self.load_cfg_file(path)
if len(sys.argv) > 2:
path = sys.argv[2]
if path.endswith('.dlt'):
self.load_delta_file(path)
elif path.endswith('.bin'):
self.load_bin_file(path)
else:
messagebox.showerror('LOADING ERROR',
"Unsupported file '%s' !" % path)
return
def search_bar(self):
# get data from text box
self.search_text = self.edit.get()
# Clear the page and update it according to search value
self.refresh_config_data_page()
def set_object_name(self, widget, name):
self.conf_list[id(widget)] = name
def get_object_name(self, widget):
if id(widget) in self.conf_list:
return self.conf_list[id(widget)]
else:
return None
def limit_entry_size(self, variable, limit):
value = variable.get()
if len(value) > limit:
variable.set(value[:limit])
def on_canvas_configure(self, event):
self.right_grid.grid_columnconfigure(0, minsize=event.width)
def on_tree_scroll(self, event):
if not self.in_left.get() and self.in_right.get():
# This prevents scroll event from being handled by both left and
# right frame at the same time.
self.on_page_scroll(event)
return 'break'
def on_page_scroll(self, event):
if self.in_right.get():
# Only scroll when it is in active area
min, max = self.page_scroll.get()
if not((min == 0.0) and (max == 1.0)):
self.conf_canvas.yview_scroll(-1 * int(event.delta / 120),
'units')
def update_visibility_for_widget(self, widget, args):
visible = True
item = self.get_config_data_item_from_widget(widget, True)
if item is None:
return visible
elif not item:
return visible
if self.cfg_data_obj.binseg_dict:
str_split = item['path'].split('.')
if str_split[-2] not in CGenYamlCfg.available_fv and \
str_split[-2] not in CGenYamlCfg.missing_fv:
if self.cfg_data_obj.binseg_dict[str_split[-3]] == -1:
visible = False
widget.grid_remove()
return visible
else:
if self.cfg_data_obj.binseg_dict[str_split[-2]] == -1:
visible = False
widget.grid_remove()
return visible
result = 1
if item['condition']:
result = self.evaluate_condition(item)
if result == 2:
# Gray
widget.configure(state='disabled')
elif result == 0:
# Hide
visible = False
widget.grid_remove()
else:
# Show
widget.grid()
widget.configure(state='normal')
if visible and self.search_text != '':
name = item['name']
if name.lower().find(self.search_text.lower()) == -1:
visible = False
widget.grid_remove()
return visible
def update_widgets_visibility_on_page(self):
self.walk_widgets_in_layout(self.right_grid,
self.update_visibility_for_widget)
def combo_select_changed(self, event):
self.update_config_data_from_widget(event.widget, None)
self.update_widgets_visibility_on_page()
def edit_num_finished(self, event):
widget = event.widget
item = self.get_config_data_item_from_widget(widget)
if not item:
return
parts = item['type'].split(',')
if len(parts) > 3:
min = parts[2].lstrip()[1:]
max = parts[3].rstrip()[:-1]
min_val = array_str_to_value(min)
max_val = array_str_to_value(max)
text = widget.get()
if ',' in text:
text = '{ %s }' % text
try:
value = array_str_to_value(text)
if value < min_val or value > max_val:
raise Exception('Invalid input!')
self.set_config_item_value(item, text)
except Exception:
pass
text = item['value'].strip('{').strip('}').strip()
widget.delete(0, tkinter.END)
widget.insert(0, text)
self.update_widgets_visibility_on_page()
def update_page_scroll_bar(self):
# Update scrollbar
self.frame_right.update()
self.conf_canvas.config(scrollregion=self.conf_canvas.bbox("all"))
def on_config_page_select_change(self, event):
self.update_config_data_on_page()
sel = self.left.selection()
if len(sel) > 0:
page_id = sel[0]
self.build_config_data_page(page_id)
self.update_widgets_visibility_on_page()
self.update_page_scroll_bar()
def walk_widgets_in_layout(self, parent, callback_function, args=None):
for widget in parent.winfo_children():
callback_function(widget, args)
def clear_widgets_inLayout(self, parent=None):
if parent is None:
parent = self.right_grid
for widget in parent.winfo_children():
widget.destroy()
parent.grid_forget()
self.conf_list.clear()
def build_config_page_tree(self, cfg_page, parent):
for page in cfg_page['child']:
page_id = next(iter(page))
# Put CFG items into related page list
self.page_list[page_id] = self.cfg_data_obj.get_cfg_list(page_id)
self.page_list[page_id].sort(key=lambda x: x['order'])
page_name = self.cfg_data_obj.get_page_title(page_id)
child = self.left.insert(
parent, 'end',
iid=page_id, text=page_name,
value=0)
if len(page[page_id]) > 0:
self.build_config_page_tree(page[page_id], child)
def is_config_data_loaded(self):
return True if len(self.page_list) else False
def set_current_config_page(self, page_id):
self.page_id = page_id
def get_current_config_page(self):
return self.page_id
def get_current_config_data(self):
page_id = self.get_current_config_page()
if page_id in self.page_list:
return self.page_list[page_id]
else:
return []
invalid_values = {}
def build_config_data_page(self, page_id):
self.clear_widgets_inLayout()
self.set_current_config_page(page_id)
disp_list = []
for item in self.get_current_config_data():
disp_list.append(item)
row = 0
disp_list.sort(key=lambda x: x['order'])
for item in disp_list:
self.add_config_item(item, row)
row += 2
if self.invalid_values:
string = 'The following contails invalid options/values \n\n'
for i in self.invalid_values:
string += i + ": " + str(self.invalid_values[i]) + "\n"
reply = messagebox.showwarning('Warning!', string)
if reply == 'ok':
self.invalid_values.clear()
fsp_version = ''
def load_config_data(self, file_name):
gen_cfg_data = CGenYamlCfg()
if file_name.endswith('.pkl'):
with open(file_name, "rb") as pkl_file:
gen_cfg_data.__dict__ = marshal.load(pkl_file)
gen_cfg_data.prepare_marshal(False)
elif file_name.endswith('.yaml'):
if gen_cfg_data.load_yaml(file_name) != 0:
raise Exception(gen_cfg_data.get_last_error())
else:
raise Exception('Unsupported file "%s" !' % file_name)
# checking fsp version
if gen_cfg_data.detect_fsp():
self.fsp_version = '2.X'
else:
self.fsp_version = '1.X'
return gen_cfg_data
def about(self):
msg = 'Configuration Editor\n--------------------------------\n \
Version 0.8\n2021'
lines = msg.split('\n')
width = 30
text = []
for line in lines:
text.append(line.center(width, ' '))
messagebox.showinfo('Config Editor', '\n'.join(text))
def update_last_dir(self, path):
self.last_dir = os.path.dirname(path)
def get_open_file_name(self, ftype):
if self.is_config_data_loaded():
if ftype == 'dlt':
question = ''
elif ftype == 'bin':
question = 'All configuration will be reloaded from BIN file, \
continue ?'
elif ftype == 'yaml':
question = ''
elif ftype == 'bsf':
question = ''
else:
raise Exception('Unsupported file type !')
if question:
reply = messagebox.askquestion('', question, icon='warning')
if reply == 'no':
return None
if ftype == 'yaml':
if self.mode == 'FSP':
file_type = 'YAML'
file_ext = 'yaml'
else:
file_type = 'YAML or PKL'
file_ext = 'pkl *.yaml'
else:
file_type = ftype.upper()
file_ext = ftype
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load file",
filetypes=(("%s files" % file_type, "*.%s" % file_ext), (
"all files", "*.*")))
if path:
self.update_last_dir(path)
return path
else:
return None
def load_from_delta(self):
path = self.get_open_file_name('dlt')
if not path:
return
self.load_delta_file(path)
def load_delta_file(self, path):
self.reload_config_data_from_bin(self.org_cfg_data_bin)
try:
self.cfg_data_obj.override_default_value(path)
except Exception as e:
messagebox.showerror('LOADING ERROR', str(e))
return
self.update_last_dir(path)
self.refresh_config_data_page()
def load_from_bin(self):
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load file",
filetypes={("Binaries", "*.fv *.fd *.bin *.rom")})
if not path:
return
self.load_bin_file(path)
def load_bin_file(self, path):
with open(path, 'rb') as fd:
bin_data = bytearray(fd.read())
if len(bin_data) < len(self.org_cfg_data_bin):
messagebox.showerror('Binary file size is smaller than what \
YAML requires !')
return
try:
self.reload_config_data_from_bin(bin_data)
except Exception as e:
messagebox.showerror('LOADING ERROR', str(e))
return
def load_from_bsf_file(self):
path = self.get_open_file_name('bsf')
if not path:
return
self.load_bsf_file(path)
def load_bsf_file(self, path):
bsf_file = path
dsc_file = os.path.splitext(bsf_file)[0] + '.dsc'
yaml_file = os.path.splitext(bsf_file)[0] + '.yaml'
bsf_to_dsc(bsf_file, dsc_file)
dsc_to_yaml(dsc_file, yaml_file)
self.load_cfg_file(yaml_file)
return
def load_from_fd(self):
path = filedialog.askopenfilename(
initialdir=self.last_dir,
title="Load file",
filetypes={("Binaries", "*.fv *.fd *.bin *.rom")})
if not path:
return
self.load_fd_file(path)
def load_fd_file(self, path):
with open(path, 'rb') as fd:
bin_data = bytearray(fd.read())
fd = FirmwareDevice(0, bin_data)
fd.ParseFd()
fd.ParseFsp()
fd.OutputFsp()
def load_cfg_file(self, path):
# Save current values in widget and clear database
self.clear_widgets_inLayout()
self.left.delete(*self.left.get_children())
self.cfg_data_obj = self.load_config_data(path)
self.update_last_dir(path)
self.org_cfg_data_bin = self.cfg_data_obj.generate_binary_array()
self.build_config_page_tree(self.cfg_data_obj.get_cfg_page()['root'],
'')
msg_string = 'Click YES if it is FULL FSP '\
+ self.fsp_version + ' Binary'
reply = messagebox.askquestion('Form', msg_string)
if reply == 'yes':
self.load_from_bin()
for menu in self.menu_string:
self.file_menu.entryconfig(menu, state="normal")
return 0
def load_from_yaml(self):
path = self.get_open_file_name('yaml')
if not path:
return
self.load_cfg_file(path)
def get_save_file_name(self, extension):
path = filedialog.asksaveasfilename(
initialdir=self.last_dir,
title="Save file",
defaultextension=extension)
if path:
self.last_dir = os.path.dirname(path)
return path
else:
return None
def save_delta_file(self, full=False):
path = self.get_save_file_name(".dlt")
if not path:
return
self.update_config_data_on_page()
new_data = self.cfg_data_obj.generate_binary_array()
self.cfg_data_obj.generate_delta_file_from_bin(path,
self.org_cfg_data_bin,
new_data, full)
def save_to_delta(self):
self.save_delta_file()
def save_full_to_delta(self):
self.save_delta_file(True)
def save_to_bin(self):
path = self.get_save_file_name(".bin")
if not path:
return
self.update_config_data_on_page()
bins = self.cfg_data_obj.save_current_to_bin()
with open(path, 'wb') as fd:
fd.write(bins)
def refresh_config_data_page(self):
self.clear_widgets_inLayout()
self.on_config_page_select_change(None)
def set_config_data_page(self):
page_id_list = []
for idx, page in enumerate(
self.cfg_data_obj._cfg_page['root']['child']):
page_id_list.append(list(page.keys())[0])
page_list = self.cfg_data_obj.get_cfg_list(page_id_list[idx])
self.cfg_page_dict[page_id_list[idx]] = 0
for item in page_list:
str_split = item['path'].split('.')
if str_split[-2] not in CGenYamlCfg.available_fv and \
str_split[-2] not in CGenYamlCfg.missing_fv:
if self.cfg_data_obj.binseg_dict[str_split[-3]] != -1:
self.cfg_page_dict[page_id_list[idx]] += 1
else:
if self.cfg_data_obj.binseg_dict[str_split[-2]] != -1:
self.cfg_page_dict[page_id_list[idx]] += 1
removed_page = 0
for idx, id in enumerate(page_id_list):
if self.cfg_page_dict[id] == 0:
del self.cfg_data_obj._cfg_page['root']['child'][idx-removed_page] # noqa: E501
removed_page += 1
def reload_config_data_from_bin(self, bin_dat):
self.cfg_data_obj.load_default_from_bin(bin_dat)
self.set_config_data_page()
self.left.delete(*self.left.get_children())
self.build_config_page_tree(self.cfg_data_obj.get_cfg_page()['root'],
'')
self.refresh_config_data_page()
def set_config_item_value(self, item, value_str):
itype = item['type'].split(',')[0]
if itype == "Table":
new_value = value_str
elif itype == "EditText":
length = (self.cfg_data_obj.get_cfg_item_length(item) + 7) // 8
new_value = value_str[:length]
if item['value'].startswith("'"):
new_value = "'%s'" % new_value
else:
try:
new_value = self.cfg_data_obj.reformat_value_str(
value_str,
self.cfg_data_obj.get_cfg_item_length(item),
item['value'])
except Exception:
print("WARNING: Failed to format value string '%s' for '%s' !"
% (value_str, item['path']))
new_value = item['value']
if item['value'] != new_value:
if self.debug:
print('Update %s from %s to %s !'
% (item['cname'], item['value'], new_value))
item['value'] = new_value
def get_config_data_item_from_widget(self, widget, label=False):
name = self.get_object_name(widget)
if not name or not len(self.page_list):
return None
if name.startswith('LABEL_'):
if label:
path = name[6:]
else:
return None
else:
path = name
item = self.cfg_data_obj.get_item_by_path(path)
return item
def update_config_data_from_widget(self, widget, args):
item = self.get_config_data_item_from_widget(widget)
if item is None:
return
elif not item:
if isinstance(widget, tkinter.Label):
return
raise Exception('Failed to find "%s" !' %
self.get_object_name(widget))
itype = item['type'].split(',')[0]
if itype == "Combo":
opt_list = self.cfg_data_obj.get_cfg_item_options(item)
tmp_list = [opt[0] for opt in opt_list]
idx = widget.current()
if idx != -1:
self.set_config_item_value(item, tmp_list[idx])
elif itype in ["EditNum", "EditText"]:
self.set_config_item_value(item, widget.get())
elif itype in ["Table"]:
new_value = bytes_to_bracket_str(widget.get())
self.set_config_item_value(item, new_value)
def evaluate_condition(self, item):
try:
result = self.cfg_data_obj.evaluate_condition(item)
except Exception:
print("WARNING: Condition '%s' is invalid for '%s' !"
% (item['condition'], item['path']))
result = 1
return result
def add_config_item(self, item, row):
parent = self.right_grid
name = tkinter.Label(parent, text=item['name'], anchor="w")
parts = item['type'].split(',')
itype = parts[0]
widget = None
if itype == "Combo":
# Build
opt_list = self.cfg_data_obj.get_cfg_item_options(item)
current_value = self.cfg_data_obj.get_cfg_item_value(item, False)
option_list = []
current = None
for idx, option in enumerate(opt_list):
option_str = option[0]
try:
option_value = self.cfg_data_obj.get_value(
option_str,
len(option_str), False)
except Exception:
option_value = 0
print('WARNING: Option "%s" has invalid format for "%s" !'
% (option_str, item['path']))
if option_value == current_value:
current = idx
option_list.append(option[1])
widget = ttk.Combobox(parent, value=option_list, state="readonly")
widget.bind("<<ComboboxSelected>>", self.combo_select_changed)
widget.unbind_class("TCombobox", "<MouseWheel>")
if current is None:
print('WARNING: Value "%s" is an invalid option for "%s" !' %
(current_value, item['path']))
self.invalid_values[item['path']] = current_value
else:
widget.current(current)
elif itype in ["EditNum", "EditText"]:
txt_val = tkinter.StringVar()
widget = tkinter.Entry(parent, textvariable=txt_val)
value = item['value'].strip("'")
if itype in ["EditText"]:
txt_val.trace(
'w',
lambda *args: self.limit_entry_size
(txt_val, (self.cfg_data_obj.get_cfg_item_length(item)
+ 7) // 8))
elif itype in ["EditNum"]:
value = item['value'].strip("{").strip("}").strip()
widget.bind("<FocusOut>", self.edit_num_finished)
txt_val.set(value)
elif itype in ["Table"]:
bins = self.cfg_data_obj.get_cfg_item_value(item, True)
col_hdr = item['option'].split(',')
widget = custom_table(parent, col_hdr, bins)
else:
if itype and itype not in ["Reserved"]:
print("WARNING: Type '%s' is invalid for '%s' !" %
(itype, item['path']))
self.invalid_values[item['path']] = itype
if widget:
create_tool_tip(widget, item['help'])
self.set_object_name(name, 'LABEL_' + item['path'])
self.set_object_name(widget, item['path'])
name.grid(row=row, column=0, padx=10, pady=5, sticky="nsew")
widget.grid(row=row + 1, rowspan=1, column=0,
padx=10, pady=5, sticky="nsew")
def update_config_data_on_page(self):
self.walk_widgets_in_layout(self.right_grid,
self.update_config_data_from_widget)
if __name__ == '__main__':
root = tkinter.Tk()
app = application(master=root)
root.title("Config Editor")
root.mainloop()
|
the-stack_106_28390 | #!/usr/bin/env python3
import sys
import re
import argparse
import subprocess
from pathlib import Path
from snakemake.utils import read_job_properties
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--help", help="Display help message.", action="store_true")
parser.add_argument(
"positional", action="append",
nargs="?", metavar="POS",
help="additional arguments not in slurm parser group to pass to sbatch")
# A subset of SLURM-specific arguments
slurm_parser = parser.add_argument_group("slurm-specific arguments")
slurm_parser.add_argument(
"-a", "--array", help="job array index values")
slurm_parser.add_argument(
"-A", "--account", help="charge job to specified account")
slurm_parser.add_argument(
"--begin", help="defer job until HH:MM MM/DD/YY")
slurm_parser.add_argument(
"-c", "--cpus-per-task", help="number of cpus required per task")
slurm_parser.add_argument(
"-d", "--dependency",
help="defer job until condition on jobid is satisfied")
slurm_parser.add_argument(
"-D", "--workdir", help="set working directory for batch script")
slurm_parser.add_argument(
"-e", "--error", help="file for batch script's standard error")
slurm_parser.add_argument(
"-J", "--job-name", help="name of job")
slurm_parser.add_argument(
"--mail-type", help="notify on state change: BEGIN, END, FAIL or ALL")
slurm_parser.add_argument(
"--mail-user", help="who to send email notification for job state changes")
slurm_parser.add_argument(
"-n", "--ntasks", help="number of tasks to run")
slurm_parser.add_argument(
"-N", "--nodes", help="number of nodes on which to run (N = min[-max])")
slurm_parser.add_argument(
"-o", "--output", help="file for batch script's standard output")
slurm_parser.add_argument(
"-p", "--partition", help="partition requested")
slurm_parser.add_argument(
"-q", "--qos", help="quality of service")
slurm_parser.add_argument(
"-Q", "--quiet", help="quiet mode (suppress informational messages)")
slurm_parser.add_argument(
"-t", "--time", help="time limit")
slurm_parser.add_argument(
"--wrap", help="wrap command string in a sh script and submit")
slurm_parser.add_argument(
"-C", "--constraint", help="specify a list of constraints")
slurm_parser.add_argument(
"--mem", help="minimum amount of real memory")
args = parser.parse_args()
if args.help:
parser.print_help()
sys.exit(0)
jobscript = sys.argv[-1]
job_properties = read_job_properties(jobscript)
extras = ""
if args.positional:
for m in args.positional:
if m is not None:
extras = extras + " " + m
arg_dict = dict(args.__dict__)
# Process resources
if "resources" in job_properties:
resources = job_properties["resources"]
if arg_dict["time"] is None:
if "runtime" in resources:
arg_dict["time"] = resources["runtime"]
elif "walltime" in resources:
arg_dict["time"] = resources["walltime"]
if "mem" in resources and arg_dict["mem"] is None:
arg_dict["mem"] = resources["mem"]
# Threads
if "threads" in job_properties:
arg_dict["ntasks"] = job_properties["threads"]
opt_keys = {"array", "account", "begin", "cpus_per_task",
"depedency", "workdir", "error", "job_name", "mail_type",
"mail_user", "ntasks", "nodes", "output", "partition",
"quiet", "time", "wrap", "constraint", "mem"}
# Set default partition
if arg_dict["partition"] is None:
if not "core":
# partitions and SLURM - If not specified, the default behavior is to
# allow the slurm controller to select the default partition as
# designated by the system administrator.
opt_keys.remove("partition")
else:
arg_dict["partition"] = "core"
# Set default account
if arg_dict["account"] is None:
raise Exception("Cannot submit Slurm jobs without account!")
# Ensure output folder for Slurm log files exist
# This is a bit hacky; it will try to create the folder
# for every Slurm submission...
if "output" in arg_dict:
stdout_folder = Path(arg_dict["output"]).parent
stdout_folder.mkdir(exist_ok=True, parents=True)
if "error" in arg_dict:
stdout_folder = Path(arg_dict["error"]).parent
stdout_folder.mkdir(exist_ok=True, parents=True)
opts = ""
for k, v in arg_dict.items():
if k not in opt_keys:
continue
if v is not None:
opts += " --{} \"{}\" ".format(k.replace("_", "-"), v)
if arg_dict["wrap"] is not None:
cmd = "sbatch {opts}".format(opts=opts)
else:
cmd = "sbatch {opts} {extras}".format(opts=opts, extras=extras)
try:
res = subprocess.run(cmd, check=True, shell=True, stdout=subprocess.PIPE)
except subprocess.CalledProcessError as e:
raise e
# Get jobid
res = res.stdout.decode()
try:
m = re.search("Submitted batch job (\d+)", res)
jobid = m.group(1)
print(jobid)
except Exception as e:
print(e)
raise
|
the-stack_106_28393 | """
Django Settings with Environment Variables | Cannlytics Console
Author: Keegan Skeate <[email protected]>
Created: 6/5/2021
Updated: 6/8/2021
Description:
Django settings secured by Google Cloud Secret Manager.
References:
https://docs.djangoproject.com/en/3.1/topics/settings/
https://docs.djangoproject.com/en/3.1/ref/settings/
https://cloud.google.com/secret-manager/docs/overview
https://codelabs.developers.google.com/codelabs/cloud-run-django
"""
# Standard imports
import json
import io
import os
import re
# External imports
import environ
import google.auth
from google.cloud import secretmanager as sm
from django.template import base
# Optional: Caching for production.
# https://docs.djangoproject.com/en/3.2/ref/templates/api/#django.template.loaders.cached.Loader
# Optional: Hashing for production.
# https://docs.djangoproject.com/en/3.2/ref/contrib/staticfiles/#manifeststaticfilesstorage
# ------------------------------------------------------------#
# Project variables
# ------------------------------------------------------------#
PROJECT_NAME = 'console'
ROOT_URLCONF = 'console.urls'
SETTINGS_NAME = 'cannlytics_settings'
WSGI_APPLICATION = 'console.wsgi.application'
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Get the version number.
with open(os.path.join(BASE_DIR, 'package.json')) as v_file:
package = json.loads(v_file.read())
APP_VERSION_NUMBER = package['version']
# ------------------------------------------------------------#
# Environment variables.
# Pulling django-environ settings file, stored in Secret Manager.
# ------------------------------------------------------------#
# Set Google Cloud credentials. (Is this needed?)
# env = environ.Env()
# env.read_env(os.path.join(BASE_DIR, '.env'))
# credentials = env('GOOGLE_APPLICATION_CREDENTIALS')
# os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials
# Load secrets stored as environment variables.
try:
env_file = os.path.join(BASE_DIR, '.env')
if not os.path.isfile('.env'):
import google.auth
from google.cloud import secretmanager as sm
_, project = google.auth.default()
if project:
client = sm.SecretManagerServiceClient()
# path = client.secret_version_path(project, SETTINGS_NAME, 'latest')
name = f"projects/{project}/secrets/{SETTINGS_NAME}/versions/latest"
payload = client.access_secret_version(name=name).payload.data.decode('UTF-8')
with open(env_file, 'w') as f:
f.write(payload)
env = environ.Env()
env.read_env(io.StringIO(payload))
SECRET_KEY = env('SECRET_KEY')
# DEBUG = env('DEBUG') # TODO: Set PRODUCTION in Secret Manager secret.
except:
# Create a default secret key for development.
# https://stackoverflow.com/questions/4664724/distributing-django-projects-with-unique-secret-keys
env = environ.Env()
try:
from console.secret_key import SECRET_KEY
except ImportError:
from console.utils import generate_secret_key
SETTINGS_DIR = os.path.abspath(os.path.dirname(__file__))
SECRET_KEY = generate_secret_key(os.path.join(SETTINGS_DIR, 'secret_key.py'))
# ------------------------------------------------------------#
# Ensure PRODUCTION is set to True in your .env when publishing!
# ------------------------------------------------------------#
PRODUCTION = env('PRODUCTION')
print('Production status:', PRODUCTION)
if PRODUCTION == 'True':
DEBUG = False
else:
DEBUG = True
# ------------------------------------------------------------#
# Apps
# https://docs.djangoproject.com/en/3.1/ref/applications/
# ------------------------------------------------------------#
INSTALLED_APPS = [
'api',
'cannlytics',
'console',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_feather',
'django_robohash',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# ------------------------------------------------------------#
# Middleware
# https://docs.djangoproject.com/en/3.1/topics/http/middleware/
# ------------------------------------------------------------#
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'console.core.middleware.AppendOrRemoveSlashMiddleware',
]
# ------------------------------------------------------------#
# Livereload
# https://github.com/tjwalch/django-livereload-server
# ------------------------------------------------------------#
if PRODUCTION == 'False':
INSTALLED_APPS.insert(0, 'livereload')
MIDDLEWARE.insert(0, 'livereload.middleware.LiveReloadScript')
MIDDLEWARE_CLASSES = 'livereload.middleware.LiveReloadScript'
# ------------------------------------------------------------#
# Templates
# https://docs.djangoproject.com/en/3.1/ref/templates/language/
# ------------------------------------------------------------#
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'console/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'console.core.context_processors.selected_settings', # Adds select settings to the context.
],
},
},
]
# ------------------------------------------------------------#
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
# ------------------------------------------------------------#
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'
},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
]
# ------------------------------------------------------------#
# Authentication
# Optional: Setup custom authentication backend with Firebase.
# https://www.oscaralsing.com/firebase-authentication-in-django/
# ------------------------------------------------------------#
# AUTHENTICATION_BACKENDS = []
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# 'REST_framework.authentication.SessionAuthentication',
# 'cannlytics_auth.authentication.FirebaseAuthentication',
# ),
# }
# ------------------------------------------------------------#
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
# ------------------------------------------------------------#
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# ------------------------------------------------------------#
# Security
# https://developer.mozilla.org/en-US/docs/Learn/Server-side/Django/web_application_security
# ------------------------------------------------------------#
ALLOWED_HOSTS = [
env('CUSTOM_DOMAIN'),
env('FIREBASE_HOSTING_URL'),
env('CLOUD_RUN_URL')
]
if PRODUCTION == 'False':
ALLOWED_HOSTS.extend(['*', 'localhost:8000', '127.0.0.1'])
SECURE_SSL_REDIRECT = False
# ------------------------------------------------------------#
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# ------------------------------------------------------------#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# ------------------------------------------------------------#
# Email
# https://docs.djangoproject.com/en/3.1/topics/email/
# ------------------------------------------------------------#
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = '587'
EMAIL_USE_TLS = True
EMAIL_HOST_USER = env('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD')
DEFAULT_FROM_EMAIL = env('EMAIL_HOST_USER')
LIST_OF_EMAIL_RECIPIENTS = [env('EMAIL_HOST_USER')]
# ------------------------------------------------------------#
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# ------------------------------------------------------------#
# List of directories where Django will also look for static files
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'console/static'),)
# The directory from where files are served. (web accessible folder)
STATIC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'public/static')
)
# The relative path to serve files.
STATIC_URL = '/static/'
# ------------------------------------------------------------#
# Google Cloud Storage alternative for serving static files
# Uncomment lines 237-267 to setup Firebase Storage.
# ------------------------------------------------------------#
# # Setup Google Cloud Storage for Django.
# # https://django-storages.readthedocs.io/en/latest/backends/gcloud.html
# INSTALLED_APPS += ['storages'] # for django-storages
# # Define static storage via django-storages[google]
# try:
# GOOGLE_APPLICATION_CREDENTIALS = env('GOOGLE_APPLICATION_CREDENTIALS')
# except:
# pass
# # Set the default storage and bucket name in your settings.py file:
# DEFAULT_FILE_STORAGE = 'storages.backends.gcloud.GoogleCloudStorage'
# try:
# GS_BUCKET_NAME = env('GS_BUCKET_NAME')
# except:
# pass
# # To allow django-admin collectstatic to automatically
# # put your static files in your bucket:
# STATICFILES_STORAGE = 'storages.backends.gcloud.GoogleCloudStorage'
# # Specify file permissions.
# GS_DEFAULT_ACL = 'publicRead'
# # Tell Django the base url to access the static files. Think of this as the 'prefix' of the URL
# # to where your static files are. Note that if you browse through your bucket and happen to see a
# # URL such as 'https://storage.cloud.google.com/<your_bucket_name>/someFileYouHaveUploaded', such
# # URL requires that whoever accesses it should be currently logged-in with their Google accounts. If
# # you want your static files to be publicly accessible by anyone whether they are logged-in or not,
# # use the link 'https://storage.googleapis.com/<your_bucket_name>/someFileYouHaveUploaded' instead.
# STATIC_URL = 'https://storage.googleapis.com/your-lims.appspot.com/'
# # If the command 'collectstatic' is invoked, tell Django where to place all the collected static
# # files from all the directories included in STATICFILES_DIRS. Be aware that configuring it with a
# # path outside your /home/me means that you need to have permissions to write to that folder later
# # on when you invoke 'collectstatic', so you might need to login as root first or run it as sudo.
# STATIC_ROOT = 'https://storage.googleapis.com/your-lims.appspot.com/public/static/'
# ------------------------------------------------------------#
# Sessions
# https://docs.djangoproject.com/en/3.1/topics/http/sessions/
# ------------------------------------------------------------#
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# ------------------------------------------------------------#
# Customization
# ------------------------------------------------------------#
# Remove trailing slash from URLs.
APPEND_SLASH = False
# Allow Django template tags to span multiple lines.
# https://stackoverflow.com/questions/49110044/django-template-tag-on-multiple-line
base.tag_re = re.compile(base.tag_re.pattern, re.DOTALL)
# Host static documentation.
DOCS_DIR = os.path.join(BASE_DIR, f'{PROJECT_NAME}/static/{PROJECT_NAME}/docs')
DOCS_STATIC_NAMESPACE = os.path.basename(DOCS_DIR)
# Optional: Re-write to read docs directory directly.
# MKDOCS_CONFIG = os.path.join(BASE_DIR, 'mkdocs.yml')
# DOCS_DIR = ''
# DOCS_STATIC_NAMESPACE = ''
# with open(MKDOCS_CONFIG, 'r') as f:
# DOCS_DIR = yaml.load(f, Loader=yaml.Loader)['site_dir']
# DOCS_STATIC_NAMESPACE = os.path.basename(DOCS_DIR)
|
the-stack_106_28396 | import numpy as np
class SumTreeV2(object):
"""
This SumTree code is modified version and the original code is from:
https://github.com/jaara/AI-blog/blob/master/SumTree.py
Story the data with it priority in tree and data frameworks.
"""
# TODO: Under construction
# TODO: STILL cannot figure out the fatal problem of deletion
# TODO: How to keep the structure of the tree after delection
data_pointer = 0
def __init__(self, capacity):
self.last_capacity = capacity
self.const_last_capacity = self.last_capacity
self.capacity = capacity # for all priority values
self.tree = np.zeros(2 * capacity - 1)
# [--------------Parent nodes-------------][-------leaves to recode priority-------]
# size: capacity - 1 size: capacity
self.data = np.zeros(capacity, dtype=object) # for all transitions
# [--------------data frame-------------]
# size: capacity
def add(self, p, data):
if self.data_pointer >= self.last_capacity and self.last_capacity < self.capacity:
tree_idx = self.data_pointer + self.last_capacity
self.data[self.data_pointer] = data # update data_frame
self.update(tree_idx, p) # update tree_frame
self.data_pointer += 1
self.last_capacity += 1
if self.data_pointer == self.capacity:
if self.capacity > self.const_last_capacity:
delta = self.capacity - self.const_last_capacity
self.synchronize(delta)
elif self.capacity < self.const_last_capacity:
delta = self.const_last_capacity - self.capacity
self.synchronize(delta)
# when we enlarge the tree, the original order will be disorganize
# the index of data does not match the index of the tree
# thus, we need to update the original data frame with the new order stored in the new tree
# only cope the leaf; i.e. from N-1 to the last one
else:
"""
if self.data_pointer >= self.capacity: # replace when exceed the capacity
for i in range(len(self.data)):
if self.data[i] != 0:
continue
else:
self.data[i] = data
tree_idx = (self.capacity - 1) + i
self.update(tree_idx, p)
break
print("--> the memory is full")
"""
tree_idx = self.data_pointer + self.capacity - 1
self.data[self.data_pointer] = data # update data_frame
self.update(tree_idx, p) # update tree_frame
self.data_pointer += 1
def update(self, tree_idx, p):
change = p - self.tree[tree_idx]
self.tree[tree_idx] = p
# then propagate the change through tree
while tree_idx != 0: # this method is faster than the recursive loop in the reference code
tree_idx = (tree_idx - 1) // 2 # // 向下取整
self.tree[tree_idx] += change
def capacity_enlarge(self, k):
"""
Increase to N + k
:param k:
:return:
"""
count = 0
idx = self.capacity - 1
while count < k:
left = self.tree[idx]
right = 0
insert_pos = self.tree.shape[0]
self.tree = np.insert(self.tree, insert_pos, [left, right])
# Data frame's order must be change too
idx += 1
count += 1
self.last_capacity = self.capacity # mark down the last capacity for adding operation
self.const_last_capacity = self.last_capacity # unchange
self.capacity += k # Update the value of capacity
self.data = np.insert(self.data, self.data.shape[0], np.zeros(k)) # The data frame also need to be extended
# synchronize the data frame after enlarge because of the change of the tree structure
#self.synchronize(k)
def synchronize(self,k):
"""
synchronize the data frame after enlarge because of the change of the tree structure
:param k: the increment k
:return:
"""
# synchronize the data frame after enlarge because of the change of the tree structure
tmp_data = np.delete(self.data, range(k)) # first delete first k elements in the data frame
original_tree_idx = self.const_last_capacity - 1 # get the old tree index of the first element in the data frame
for i in range(k):
changed_tree_idx = original_tree_idx * 2 + 1 # the current tree index of the first element of data frame
change_to_data_idx = changed_tree_idx - self.capacity + 1 # should be change the element to this data index
original_data_idx = i
tmp_data = np.insert(tmp_data, change_to_data_idx, self.data[original_data_idx])
original_tree_idx += 1
self.data = tmp_data
def delete(self, leaf_nodes):
num_delete = leaf_nodes.shape[0]
lp = self.get_leaves_part()
del_idx = leaf_nodes - (self.capacity - 1)
lp = np.delete(lp, del_idx)
self.last_capacity = self.capacity
self.const_last_capacity = self.last_capacity
self.capacity -= num_delete
self.data_pointer -= num_delete
self.reconstruct(self.capacity, lp)
def reconstruct(self, capacity, leave_part):
"""
reconstruct the tree
:return:
"""
self.tree = np.zeros(2 * capacity - 1)
for l in range(leave_part.shape[0]):
leaf = leave_part[l]
tp = l + capacity - 1
self.update(tp, leaf)
def delete_node(self, leaf_nodes):
# TODO: Under construction
# TODO: how to keep the structure of the tree after deletion
# TODO: How about reconstruct the tree? using the data frame?
"""
Delete k specific node
:param: del_nodes: an array of nodes which is to be deleted
:return:
"""
num_delete = leaf_nodes.shape[0]
del_data_idx = leaf_nodes - (self.capacity - 1)
all_delete_idx = np.copy(leaf_nodes) # save all nodes which need to be deleted
# Keep the tree structure as [ N -1 | N ]
# no matter how to delete
# Now, we have to create a list of deleting nodes
for idx in leaf_nodes:
if idx % 2 != 0:
parent_idx = (idx - 1) // 2
if (idx + 1) in leaf_nodes:
# idx is left child and (idx+1) is right child;
# if both are deleted, partent have to be delete as well
all_delete_idx = np.append(all_delete_idx, parent_idx)
# When parent is going to be deleted, their brother need to be deleted as well
if parent_idx % 2 != 0:
right_parent_idx = parent_idx + 1
all_delete_idx = np.append(all_delete_idx, right_parent_idx)
elif parent_idx % 2 == 0:
left_parent_idx = parent_idx - 1
all_delete_idx = np.append(all_delete_idx, left_parent_idx)
else:
right_idx = idx + 1
# If the left child is deleted, the right child should be deleted as well;
# Then the value of the right child will be assign to its parent
# The value of their parent is equal to the value of right child after the left being delete
# Thus, the right node is useless and should be deleted
all_delete_idx = np.append(all_delete_idx, right_idx)
# Update the value of parent; because the left node is deleted
# tree[parent_idx] = tree[parent_idx] - tree[idx]
while parent_idx != 0:
# propagate to the root
self.tree[parent_idx] -= self.tree[idx]
parent_idx = (idx - 1) // 2
elif idx % 2 == 0:
parent_idx = (idx - 1) // 2
left_idx = idx - 1
all_delete_idx = np.append(all_delete_idx, left_idx)
while parent_idx != 0:
# propagate to the root
self.tree[parent_idx] -= self.tree[idx]
parent_idx = (idx - 1) // 2
# Start to delete
self.tree = np.delete(self.tree, all_delete_idx)
self.data = np.delete(self.data, del_data_idx)
# Update parameters
self.last_capacity = self.capacity
self.const_last_capacity = self.last_capacity
self.capacity -= num_delete
self.data_pointer -= num_delete
def get_leaf(self, v):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for transitions
Array type for storing:
[0,1,2,3,4,5,6]
"""
def check():
if self.data_pointer != self.data.shape[0]:
raise Exception("Learning and memory adjustment MUST be synchronous. \
\ni.e.learn and adjust in every k step ")
parent_idx = 0
while True: # the while loop is faster than the method in the reference code
cl_idx = 2 * parent_idx + 1 # this leaf's left and right kids
cr_idx = cl_idx + 1
if cl_idx >= len(self.tree): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.tree[cl_idx]:
parent_idx = cl_idx
else:
v -= self.tree[cl_idx]
parent_idx = cr_idx
try:
# check whether training and memory adjustment
check()
except Exception as err:
print(1, err)
data_idx = leaf_idx - self.capacity + 1
return leaf_idx, self.tree[leaf_idx], self.data[data_idx]
def get_leaves_part(self):
lp = self.tree[self.capacity-1:]
return lp
@property
def total_p(self):
return self.tree[0] # the root
class TreeNode():
def __init__(self, priority, previous):
self.priority = priority
self.left = None
self.right = None
self.previous = previous # if previous is None, this node is root
if __name__ == '__main__':
"""
class Test_Memory(object):
epsilon = 0.01 # small amount to avoid zero priority
alpha = 0.6 # [0~1] convert the importance of TD error to priority
beta = 0.4 # importance-sampling, from initial value increasing to 1
beta_increment_per_sampling = 0.001
abs_err_upper = 1. # clipped abs error
def __init__(self, capacity):
self.tree = SumTreeV2(capacity)
# ========= Memory Operations ============
def store(self, transition):
max_p = np.max(self.tree.tree[-self.tree.capacity:])
if max_p == 0:
max_p = self.abs_err_upper
self.tree.add(max_p, transition) # set the max p for new p
def enlarge(self, k):
self.tree.capacity_enlarge(k)
def sample(self, n):
b_idx, b_memory, ISWeights = np.empty((n,), dtype=np.int32), np.empty(
(n, self.tree.data[0].size)), np.empty((n, 1))
pri_seg = self.tree.total_p / n # priority segment
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling]) # max = 1
min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p # for later calculate ISweight
for i in range(n):
a, b = pri_seg * i, pri_seg * (i + 1)
v = np.random.uniform(a, b)
idx, p, data = self.tree.get_leaf(v)
prob = p / self.tree.total_p
ISWeights[i, 0] = np.power(prob / min_prob, -self.beta)
b_idx[i], b_memory[i, :] = idx, data
return b_idx, b_memory, ISWeights
def batch_update(self, tree_idx, abs_errors):
abs_errors += self.epsilon # convert to abs and avoid 0
clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
ps = np.power(clipped_errors, self.alpha)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
def gen_t(k):
transitions = []
if k == 0:
transition = np.hstack((0, 0, 0, 0))
transitions.append(transition)
for i in range(k):
s = 1 + i
a = 2 + i
r = 3 + i
s_ = 4 + i
transition = np.hstack((s, a, r, s_))
transitions.append(transition)
return transitions
test_memory = Test_Memory(50)
for i in range(50):
s = 1 + i
a = 2 + i
r = 3 + i
s_ = 4 + i
ts = np.hstack((s, a, r, s_))
test_memory.store(ts)
#test_memory.batch_update()
test_memory.enlarge(5)
ts = np.hstack((100, 20, 30, 40))
for i in range(5):
ts = np.hstack((100+i, 20+i, 30+i, 40+i))
test_memory.store(ts)
b_idx, b_memory, ISWeights = test_memory.sample(10)
print()
"""
def test_1():
root = TreeNode(3, None)
root.left = TreeNode(1, root)
root.right = TreeNode(2, root)
print("Left child's priority: {0}".format(root.left.priority))
print("Right child's priority: {0}".format(root.right.priority))
print("Left child backtrack: {0}".format(root.left.previous.priority))
print("Right child backtrack: {0}".format(root.right.previous.priority))
def test_2():
tree = SumTreeV2(5)
for i in range(5):
i += 1
tree.add(p=i, data=i*100)
tree.capacity_enlarge(6)
for i in range(6):
tree.add(p=10 + i, data=20 + i)
return tree
tree = test_2()
#previous_tree = tree
delete_idx = np.array([7, 8, 10, 11])
tree.delete(delete_idx)
print(tree.get_leaves_part())
tree.capacity_enlarge(7)
for i in range(7):
tree.add(p=10 + i, data=20 + i)
l = tree.get_leaf(4)
print(l)
def test3():
tree = SumTreeV2(5)
for i in range(5):
i += 1
tree.add(p=i, data=i)
count = 0
while count < 20:
l, ll, lll = tree.get_leaf(4)
choice = np.random.uniform()
if count % 4 == 0:
tree.capacity_enlarge(5)
for i in range(5):
tree.add(p=10 + i, data=10 + i)
else:
capacity = tree.capacity
delete_idx = np.array([capacity + 1])
tree.delete(delete_idx)
print("{0} {1} {2}".format(l, ll, lll))
count += 1
#tree = test_2()
#tree.capacity_enlarge(5)
#print()
|
the-stack_106_28397 | import numpy as np
import pandas as pd
from .base_processing import path_data
"""
3081 Foot measured for bone density
19 Heel ultrasound method
3146 Speed of sound through heel
3143 Ankle spacing width
3144 Heel Broadband ultrasound attenuation, direct entry
3147 Heel quantitative ultrasound index (QUI), direct entry
3148 Heel bone mineral density (BMD)
78 Heel bone mineral density (BMD) T-score, automated
3086 Speed of sound through heel, manual entry
3085 Heel Broadband ultrasound attenuation (BUA), manual entry
3083 Heel quantitative ultrasound index (QUI), manual entry
3084 Heel bone mineral density (BMD), manual entry
77 Heel bone ultrasound T-score, manual entry
4092 Heel ultrasound method (left)
4095 Heel ultrasound method (right)
4100 Ankle spacing width (left)
4119 Ankle spacing width (right)
4103 Speed of sound through heel (left)
4122 Speed of sound through heel (right)
4142 Speed of sound through heel, manual entry (left)
4147 Speed of sound through heel, manual entry (right)
4141 Heel broadband ultrasound attenuation (BUA), manual entry (left)
4146 Heel broadband ultrasound attenuation (BUA), manual entry (right)
4101 Heel broadband ultrasound attenuation (left)
4120 Heel broadband ultrasound attenuation (right)
4139 Heel quantitative ultrasound index (QUI), manual entry (left)
4144 Heel quantitative ultrasound index (QUI), manual entry (right)
4104 Heel quantitative ultrasound index (QUI), direct entry (left)
4123 Heel quantitative ultrasound index (QUI), direct entry (right)
4140 Heel bone mineral density (BMD), manual entry (left)
4145 Heel bone mineral density (BMD), manual entry (right)
4105 Heel bone mineral density (BMD) (left)
4124 Heel bone mineral density (BMD) (right)
4138 Heel bone mineral density (BMD) T-score, manual entry (left)
4143 Heel bone mineral density (BMD) T-score, manual entry (right)
4106 Heel bone mineral density (BMD) T-score, automated (left)
4125 Heel bone mineral density (BMD) T-score, automated (right)
"""
def read_bone_densitometry_data(**kwargs):
## Read first half of the data for instance 0 :
instance = 0
age_col = '21003-' + str(instance) + '.0'
cols_age_eid_sex = ['eid', age_col, '31-0.0']
d = pd.read_csv(path_data, usecols = cols_age_eid_sex + ['19-0.0', '3146-0.0', '3143-0.0', '3144-0.0', '3147-0.0', '3148-0.0', '78-0.0',
'3086-0.0', '3085-0.0', '3083-0.0', '3084-0.0', '77-0.0'], **kwargs)
d = d[d['19-0.0'].isin([1, 2])]
def custom_apply(row):
method = row['19-0.0']
cols = ['eid', 'Age when attended assessment centre', 'Sex', 'Ankle spacing width', 'Speed of sound through heel', 'Heel Broadband ultrasound attenuation', 'Heel quantitative ultrasound index (QUI)',
'Heel bone mineral density (BMD)', 'Heel bone mineral density (BMD) T-score']
if method == 1:
values = str(int(row['eid'])), row[age_col], row['31-0.0'], row['3143-0.0'], row['3146-0.0'], row['3144-0.0'], row['3147-0.0'], row['3148-0.0'], row['78-0.0']
elif method == 2:
values = str(int(row['eid'])), row[age_col], row['31-0.0'], row['3143-0.0'], row['3086-0.0'], row['3085-0.0'], row['3083-0.0'], row['3084-0.0'], row['77-0.0']
return pd.Series(values, index = cols)
d = d.apply(custom_apply, axis = 1)
d['id'] = d['eid'] + '_' + str(0)
d['eid'] = d['eid'].astype('int')
d = d.set_index('id')
list_df = []
for instance in range(4):
age_col = '21003-' + str(instance) + '.0'
cols_age_eid_sex = ['eid', age_col, '31-0.0']
cols = [4092, 4095, 4100, 4119, 4103, 4122, 4142, 4147, 4141, 4146, 4101, 4120, 4139, 4144, 4104, 4123, 4140,
4145, 4105, 4124, 4138, 4143, 4106, 4125]
cols_instance = [str(elem) + '-%s.0' % instance for elem in cols]
map_col_to_col_instance = dict(zip(cols, cols_instance))
raw = pd.read_csv(path_data, usecols = cols_age_eid_sex + cols_instance, **kwargs)
raw = raw[raw[map_col_to_col_instance[4092]].isin([1, 2]) & raw[map_col_to_col_instance[4095]].isin([1, 2])]
def custom_apply2(row):
method_left = row[map_col_to_col_instance[4092]]
method_right = row[map_col_to_col_instance[4095]]
if method_left == 1:
values_left = row[map_col_to_col_instance[4100]], row[map_col_to_col_instance[4103]], row[map_col_to_col_instance[4101]], \
row[map_col_to_col_instance[4104]], row[map_col_to_col_instance[4105]], row[map_col_to_col_instance[4106]]
else : #method_left == 2:
values_left = row[map_col_to_col_instance[4100]], row[map_col_to_col_instance[4142]], row[map_col_to_col_instance[4141]], \
row[map_col_to_col_instance[4139]], row[map_col_to_col_instance[4140]], row[map_col_to_col_instance[4138]]
if method_right == 1:
values_right = row[map_col_to_col_instance[4119]], row[map_col_to_col_instance[4122]], row[map_col_to_col_instance[4120]], \
row[map_col_to_col_instance[4123]], row[map_col_to_col_instance[4124]], row[map_col_to_col_instance[4125]]
else:
values_right = row[map_col_to_col_instance[4119]], row[map_col_to_col_instance[4147]], row[map_col_to_col_instance[4146]], \
row[map_col_to_col_instance[4144]], row[map_col_to_col_instance[4145]], row[map_col_to_col_instance[4143]]
values = (np.array(values_left) + np.array(values_right))/2
cols = ['eid', 'Age when attended assessment centre', 'Sex', 'Ankle spacing width', 'Speed of sound through heel', 'Heel Broadband ultrasound attenuation', 'Heel quantitative ultrasound index (QUI)',
'Heel bone mineral density (BMD)', 'Heel bone mineral density (BMD) T-score']
values = [str(int(row['eid'])), row[age_col], row['31-0.0']] + list(values)
return pd.Series(values, index = cols)
df_instance = raw.apply(custom_apply2, axis = 1)
df_instance['id'] = df_instance['eid'] + '_' + str(instance)
df_instance = df_instance.set_index('id')
df_instance['eid'] = df_instance['eid'].astype('int')
list_df.append(df_instance)
return pd.concat([pd.concat(list_df), d])
|
the-stack_106_28398 | #!/usr/bin/env python3
#coding: utf-8
from ev3dev.ev3 import *
import time
m1 = LargeMotor('outC') #Esquerdo
m2 = LargeMotor('outD') #Direito
gy = GyroSensor('in1')
gy.mode = 'GYRO-ANG'
def Modulo(x):
if x < 0:
return x * -1
return x
def Girar(ang):
atual = gy.value()
while Modulo((gy.value() - atual)) < ang:
m1.run_forever(speed_sp=100)
m2.run_forever(speed_sp=-100)
m1.stop(stop_action="brake")
m2.stop(stop_action="brake")
while True:
Girar(90)
time.sleep(2)
|
the-stack_106_28399 | import time
import numpy as np
import tensorflow as tf
from gym.spaces import Discrete, Box
from stable_baselines import logger
from stable_baselines.a2c.utils import batch_to_seq, seq_to_batch, Scheduler, find_trainable_variables, EpisodeStats, \
get_by_index, check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger
from stable_baselines.acer.buffer import Buffer
from stable_baselines.common import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.common.policies import LstmPolicy, ActorCriticPolicy
def strip(var, n_envs, n_steps, flat=False):
"""
Removes the last step in the batch
:param var: (TensorFlow Tensor) The input Tensor
:param n_envs: (int) The number of environments
:param n_steps: (int) The number of steps to run for each environment
:param flat: (bool) If the input Tensor is flat
:return: (TensorFlow Tensor) the input tensor, without the last step in the batch
"""
out_vars = batch_to_seq(var, n_envs, n_steps + 1, flat)
return seq_to_batch(out_vars[:-1], flat)
def q_retrace(rewards, dones, q_i, values, rho_i, n_envs, n_steps, gamma):
"""
Calculates the target Q-retrace
:param rewards: ([TensorFlow Tensor]) The rewards
:param dones: ([TensorFlow Tensor])
:param q_i: ([TensorFlow Tensor]) The Q values for actions taken
:param values: ([TensorFlow Tensor]) The output of the value functions
:param rho_i: ([TensorFlow Tensor]) The importance weight for each action
:param n_envs: (int) The number of environments
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) The discount value
:return: ([TensorFlow Tensor]) the target Q-retrace
"""
rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), n_envs, n_steps, True) # list of len steps, shape [n_envs]
reward_seq = batch_to_seq(rewards, n_envs, n_steps, True) # list of len steps, shape [n_envs]
done_seq = batch_to_seq(dones, n_envs, n_steps, True) # list of len steps, shape [n_envs]
q_is = batch_to_seq(q_i, n_envs, n_steps, True)
value_sequence = batch_to_seq(values, n_envs, n_steps + 1, True)
final_value = value_sequence[-1]
qret = final_value
qrets = []
for i in range(n_steps - 1, -1, -1):
check_shape([qret, done_seq[i], reward_seq[i], rho_bar[i], q_is[i], value_sequence[i]], [[n_envs]] * 6)
qret = reward_seq[i] + gamma * qret * (1.0 - done_seq[i])
qrets.append(qret)
qret = (rho_bar[i] * (qret - q_is[i])) + value_sequence[i]
qrets = qrets[::-1]
qret = seq_to_batch(qrets, flat=True)
return qret
class ACER(ActorCriticRLModel):
"""
The ACER (Actor-Critic with Experience Replay) model class, https://arxiv.org/abs/1611.01224
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) The discount value
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param num_procs: (int) The number of threads for TensorFlow operations
:param q_coef: (float) The weight for the loss on the Q value
:param ent_coef: (float) The weight for the entropic loss
:param max_grad_norm: (float) The clipping value for the maximum gradient
:param learning_rate: (float) The initial learning rate for the RMS prop optimizer
:param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',
'double_linear_con', 'middle_drop' or 'double_middle_drop')
:param rprop_epsilon: (float) RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update)
(default: 1e-5)
:param rprop_alpha: (float) RMSProp decay parameter (default: 0.99)
:param buffer_size: (int) The buffer size in number of steps
:param replay_ratio: (float) The number of replay learning per on policy learning on average,
using a poisson distribution
:param replay_start: (int) The minimum number of steps in the buffer, before learning replay
:param correction_term: (float) Importance weight clipping factor (default: 10)
:param trust_region: (bool) Whether or not algorithms estimates the gradient KL divergence
between the old and updated policy and uses it to determine step size (default: True)
:param alpha: (float) The decay rate for the Exponential moving average of the parameters
:param delta: (float) max KL divergence between the old policy and updated policy (default: 1)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
"""
def __init__(self, policy, env, gamma=0.99, n_steps=20, num_procs=1, q_coef=0.5, ent_coef=0.01, max_grad_norm=10,
learning_rate=7e-4, lr_schedule='linear', rprop_alpha=0.99, rprop_epsilon=1e-5, buffer_size=5000,
replay_ratio=4, replay_start=1000, correction_term=10.0, trust_region=True, alpha=0.99, delta=1,
verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None):
super(ACER, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs)
self.n_steps = n_steps
self.replay_ratio = replay_ratio
self.buffer_size = buffer_size
self.replay_start = replay_start
self.gamma = gamma
self.alpha = alpha
self.correction_term = correction_term
self.q_coef = q_coef
self.ent_coef = ent_coef
self.trust_region = trust_region
self.delta = delta
self.max_grad_norm = max_grad_norm
self.rprop_alpha = rprop_alpha
self.rprop_epsilon = rprop_epsilon
self.learning_rate = learning_rate
self.lr_schedule = lr_schedule
self.num_procs = num_procs
self.tensorboard_log = tensorboard_log
self.graph = None
self.sess = None
self.action_ph = None
self.done_ph = None
self.reward_ph = None
self.mu_ph = None
self.learning_rate_ph = None
self.params = None
self.polyak_model = None
self.learning_rate_schedule = None
self.run_ops = None
self.names_ops = None
self.train_model = None
self.step_model = None
self.step = None
self.proba_step = None
self.initial_state = None
self.n_act = None
self.n_batch = None
self.summary = None
self.episode_reward = None
if _init_setup_model:
self.setup_model()
def set_env(self, env):
if env is not None:
assert self.n_envs == env.num_envs, \
"Error: the environment passed must have the same number of environments as the model was trained on." \
"This is due to ACER not being capable of changing the number of environments."
super().set_env(env)
def setup_model(self):
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the ACER model must be " \
"an instance of common.policies.ActorCriticPolicy."
if isinstance(self.action_space, Discrete):
self.n_act = self.action_space.n
continuous = False
elif isinstance(self.action_space, Box):
# self.n_act = self.action_space.shape[-1]
# continuous = True
raise NotImplementedError("WIP: Acer does not support Continuous actions yet.")
else:
raise ValueError("Error: ACER does not work with {} actions space.".format(self.action_space))
self.n_batch = self.n_envs * self.n_steps
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.make_session(num_cpu=self.num_procs, graph=self.graph)
n_batch_step = None
if issubclass(self.policy, LstmPolicy):
n_batch_step = self.n_envs
n_batch_train = self.n_envs * (self.n_steps + 1)
step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
n_batch_step, reuse=False, **self.policy_kwargs)
self.params = find_trainable_variables("model")
with tf.variable_scope("train_model", reuse=True,
custom_getter=tf_util.outer_scope_getter("train_model")):
train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs,
self.n_steps + 1, n_batch_train, reuse=True, **self.policy_kwargs)
with tf.variable_scope("moving_average"):
# create averaged model
ema = tf.train.ExponentialMovingAverage(self.alpha)
ema_apply_op = ema.apply(self.params)
def custom_getter(getter, name, *args, **kwargs):
name = name.replace("polyak_model/", "")
val = ema.average(getter(name, *args, **kwargs))
return val
with tf.variable_scope("polyak_model", reuse=True, custom_getter=custom_getter):
self.polyak_model = polyak_model = self.policy(self.sess, self.observation_space, self.action_space,
self.n_envs, self.n_steps + 1,
self.n_envs * (self.n_steps + 1), reuse=True,
**self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.done_ph = tf.placeholder(tf.float32, [self.n_batch]) # dones
self.reward_ph = tf.placeholder(tf.float32, [self.n_batch]) # rewards, not returns
self.mu_ph = tf.placeholder(tf.float32, [self.n_batch, self.n_act]) # mu's
self.action_ph = train_model.pdtype.sample_placeholder([self.n_batch])
self.learning_rate_ph = tf.placeholder(tf.float32, [])
eps = 1e-6
# Notation: (var) = batch variable, (var)s = sequence variable,
# (var)_i = variable index by action at step i
# shape is [n_envs * (n_steps + 1)]
if continuous:
value = train_model.value_fn[:, 0]
else:
value = tf.reduce_sum(train_model.policy_proba * train_model.q_value, axis=-1)
rho, rho_i_ = None, None
if continuous:
action_ = strip(train_model.proba_distribution.sample(), self.n_envs, self.n_steps)
distribution_f = tf.contrib.distributions.MultivariateNormalDiag(
loc=strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps),
scale_diag=strip(train_model.proba_distribution.logstd, self.n_envs, self.n_steps))
f_polyak = tf.contrib.distributions.MultivariateNormalDiag(
loc=strip(polyak_model.proba_distribution.mean, self.n_envs, self.n_steps),
scale_diag=strip(polyak_model.proba_distribution.logstd, self.n_envs, self.n_steps))
f_i = distribution_f.prob(self.action_ph)
f_i_ = distribution_f.prob(action_)
f_polyak_i = f_polyak.prob(self.action_ph)
phi_i = strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps)
q_value = strip(train_model.value_fn, self.n_envs, self.n_steps)
q_i = q_value[:, 0]
rho_i = tf.reshape(f_i, [-1, 1]) / (self.mu_ph + eps)
rho_i_ = tf.reshape(f_i_, [-1, 1]) / (self.mu_ph + eps)
qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, tf.pow(rho_i, 1/self.n_act),
self.n_envs, self.n_steps, self.gamma)
else:
# strip off last step
# f is a distribution, chosen to be Gaussian distributions
# with fixed diagonal covariance and mean \phi(x)
# in the paper
distribution_f, f_polyak, q_value = \
map(lambda variables: strip(variables, self.n_envs, self.n_steps),
[train_model.policy_proba, polyak_model.policy_proba, train_model.q_value])
# Get pi and q values for actions taken
f_i = get_by_index(distribution_f, self.action_ph)
f_i_ = distribution_f
phi_i = distribution_f
f_polyak_i = f_polyak
q_i = get_by_index(q_value, self.action_ph)
# Compute ratios for importance truncation
rho = distribution_f / (self.mu_ph + eps)
rho_i = get_by_index(rho, self.action_ph)
# Calculate Q_retrace targets
qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, rho_i, self.n_envs, self.n_steps,
self.gamma)
# Calculate losses
# Entropy
entropy = tf.reduce_sum(train_model.proba_distribution.entropy())
# Policy Gradient loss, with truncated importance sampling & bias correction
value = strip(value, self.n_envs, self.n_steps, True)
# check_shape([qret, value, rho_i, f_i], [[self.n_envs * self.n_steps]] * 4)
# check_shape([rho, distribution_f, q_value], [[self.n_envs * self.n_steps, self.n_act]] * 2)
# Truncated importance sampling
adv = qret - value
log_f = tf.log(f_i + eps)
# [n_envs * n_steps]
gain_f = log_f * tf.stop_gradient(adv * tf.minimum(self.correction_term, rho_i))
loss_f = -tf.reduce_mean(gain_f)
# Bias correction for the truncation
adv_bc = (q_value - tf.reshape(value, [self.n_envs * self.n_steps, 1])) # [n_envs * n_steps, n_act]
# check_shape([adv_bc, log_f_bc], [[self.n_envs * self.n_steps, self.n_act]] * 2)
if continuous:
gain_bc = tf.stop_gradient(adv_bc *
tf.nn.relu(1.0 - (self.correction_term / (rho_i_ + eps))) *
f_i_)
else:
log_f_bc = tf.log(f_i_ + eps) # / (f_old + eps)
gain_bc = tf.reduce_sum(log_f_bc *
tf.stop_gradient(
adv_bc *
tf.nn.relu(1.0 - (self.correction_term / (rho + eps))) *
f_i_),
axis=1)
# IMP: This is sum, as expectation wrt f
loss_bc = -tf.reduce_mean(gain_bc)
loss_policy = loss_f + loss_bc
# Value/Q function loss, and explained variance
check_shape([qret, q_i], [[self.n_envs * self.n_steps]] * 2)
explained_variance = q_explained_variance(tf.reshape(q_i, [self.n_envs, self.n_steps]),
tf.reshape(qret, [self.n_envs, self.n_steps]))
loss_q = tf.reduce_mean(tf.square(tf.stop_gradient(qret) - q_i) * 0.5)
# Net loss
check_shape([loss_policy, loss_q, entropy], [[]] * 3)
loss = loss_policy + self.q_coef * loss_q - self.ent_coef * entropy
tf.summary.scalar('entropy_loss', entropy)
tf.summary.scalar('policy_gradient_loss', loss_policy)
tf.summary.scalar('value_function_loss', loss_q)
tf.summary.scalar('loss', loss)
norm_grads_q, norm_grads_policy, avg_norm_grads_f = None, None, None
avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj = None, None, None, None
if self.trust_region:
# [n_envs * n_steps, n_act]
grad = tf.gradients(- (loss_policy - self.ent_coef * entropy) * self.n_steps * self.n_envs,
phi_i)
# [n_envs * n_steps, n_act] # Directly computed gradient of KL divergence wrt f
kl_grad = - f_polyak_i / (f_i_ + eps)
k_dot_g = tf.reduce_sum(kl_grad * grad, axis=-1)
adj = tf.maximum(0.0, (tf.reduce_sum(kl_grad * grad, axis=-1) - self.delta) / (
tf.reduce_sum(tf.square(kl_grad), axis=-1) + eps)) # [n_envs * n_steps]
# Calculate stats (before doing adjustment) for logging.
avg_norm_k = avg_norm(kl_grad)
avg_norm_g = avg_norm(grad)
avg_norm_k_dot_g = tf.reduce_mean(tf.abs(k_dot_g))
avg_norm_adj = tf.reduce_mean(tf.abs(adj))
grad = grad - tf.reshape(adj, [self.n_envs * self.n_steps, 1]) * kl_grad
# These are turst region adjusted gradients wrt f ie statistics of policy pi
grads_f = -grad / (self.n_envs * self.n_steps)
grads_policy = tf.gradients(f_i_, self.params, grads_f)
grads_q = tf.gradients(loss_q * self.q_coef, self.params)
grads = [gradient_add(g1, g2, param, verbose=self.verbose)
for (g1, g2, param) in zip(grads_policy, grads_q, self.params)]
avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs)
norm_grads_q = tf.global_norm(grads_q)
norm_grads_policy = tf.global_norm(grads_policy)
else:
grads = tf.gradients(loss, self.params)
norm_grads = None
if self.max_grad_norm is not None:
grads, norm_grads = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph))
tf.summary.histogram('rewards', self.reward_ph)
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate))
tf.summary.histogram('learning_rate', self.learning_rate)
tf.summary.scalar('advantage', tf.reduce_mean(adv))
tf.summary.histogram('advantage', adv)
tf.summary.scalar('action_probabilty', tf.reduce_mean(self.mu_ph))
tf.summary.histogram('action_probabilty', self.mu_ph)
if len(self.observation_space.shape) == 3:
tf.summary.image('observation', train_model.obs_ph)
else:
tf.summary.histogram('observation', train_model.obs_ph)
trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha,
epsilon=self.rprop_epsilon)
_opt_op = trainer.apply_gradients(grads)
# so when you call _train, you first do the gradient step, then you apply ema
with tf.control_dependencies([_opt_op]):
_train = tf.group(ema_apply_op)
# Ops/Summaries to run, and their names for logging
assert norm_grads is not None
run_ops = [_train, loss, loss_q, entropy, loss_policy, loss_f, loss_bc, explained_variance, norm_grads]
names_ops = ['loss', 'loss_q', 'entropy', 'loss_policy', 'loss_f', 'loss_bc', 'explained_variance',
'norm_grads']
if self.trust_region:
self.run_ops = run_ops + [norm_grads_q, norm_grads_policy, avg_norm_grads_f, avg_norm_k, avg_norm_g,
avg_norm_k_dot_g, avg_norm_adj]
self.names_ops = names_ops + ['norm_grads_q', 'norm_grads_policy', 'avg_norm_grads_f', 'avg_norm_k',
'avg_norm_g', 'avg_norm_k_dot_g', 'avg_norm_adj']
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.proba_step = step_model.proba_step
self.initial_state = step_model.initial_state
tf.global_variables_initializer().run(session=self.sess)
self.summary = tf.summary.merge_all()
def _train_step(self, obs, actions, rewards, dones, mus, states, masks, steps, writer=None):
"""
applies a training step to the model
:param obs: ([float]) The input observations
:param actions: ([float]) The actions taken
:param rewards: ([float]) The rewards from the environment
:param dones: ([bool]) Whether or not the episode is over (aligned with reward, used for reward calculation)
:param mus: ([float]) The logits values
:param states: ([float]) The states (used for recurrent policies)
:param masks: ([bool]) Whether or not the episode is over (used for recurrent policies)
:param steps: (int) the number of steps done so far (can be None)
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:return: ([str], [float]) the list of update operation name, and the list of the results of the operations
"""
cur_lr = self.learning_rate_schedule.value_steps(steps)
td_map = {self.train_model.obs_ph: obs, self.polyak_model.obs_ph: obs, self.action_ph: actions,
self.reward_ph: rewards, self.done_ph: dones, self.mu_ph: mus, self.learning_rate_ph: cur_lr}
if states is not None:
td_map[self.train_model.states_ph] = states
td_map[self.train_model.masks_ph] = masks
td_map[self.polyak_model.states_ph] = states
td_map[self.polyak_model.masks_ph] = masks
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)
if (1 + (steps / self.n_batch)) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
step_return = self.sess.run([self.summary] + self.run_ops, td_map, options=run_options,
run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % steps)
else:
step_return = self.sess.run([self.summary] + self.run_ops, td_map)
writer.add_summary(step_return[0], steps)
step_return = step_return[1:]
else:
step_return = self.sess.run(self.run_ops, td_map)
return self.names_ops, step_return[1:] # strip off _train
def learn(self, total_timesteps, callback=None, seed=None, log_interval=100, tb_log_name="ACER"):
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer:
self._setup_learn(seed)
self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps,
schedule=self.lr_schedule)
episode_stats = EpisodeStats(self.n_steps, self.n_envs)
runner = _Runner(env=self.env, model=self, n_steps=self.n_steps)
self.episode_reward = np.zeros((self.n_envs,))
if self.replay_ratio > 0:
buffer = Buffer(env=self.env, n_steps=self.n_steps, size=self.buffer_size)
else:
buffer = None
t_start = time.time()
# n_batch samples, 1 on_policy call and multiple off-policy calls
for steps in range(0, total_timesteps, self.n_batch):
enc_obs, obs, actions, rewards, mus, dones, masks = runner.run()
episode_stats.feed(rewards, dones)
if buffer is not None:
buffer.put(enc_obs, actions, rewards, mus, dones, masks)
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
rewards.reshape((self.n_envs, self.n_steps)),
dones.reshape((self.n_envs, self.n_steps)),
writer, steps)
# reshape stuff correctly
obs = obs.reshape(runner.batch_ob_shape)
actions = actions.reshape([runner.n_batch])
rewards = rewards.reshape([runner.n_batch])
mus = mus.reshape([runner.n_batch, runner.n_act])
dones = dones.reshape([runner.n_batch])
masks = masks.reshape([runner.batch_ob_shape[0]])
names_ops, values_ops = self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks,
steps, writer)
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) == False:
break
if self.verbose >= 1 and (int(steps / runner.n_batch) % log_interval == 0):
logger.record_tabular("total_timesteps", steps)
logger.record_tabular("fps", int(steps / (time.time() - t_start)))
# IMP: In EpisodicLife env, during training, we get done=True at each loss of life,
# not just at the terminal state. Thus, this is mean until end of life, not end of episode.
# For true episode rewards, see the monitor files in the log folder.
logger.record_tabular("mean_episode_length", episode_stats.mean_length())
logger.record_tabular("mean_episode_reward", episode_stats.mean_reward())
for name, val in zip(names_ops, values_ops):
logger.record_tabular(name, float(val))
logger.dump_tabular()
if self.replay_ratio > 0 and buffer.has_atleast(self.replay_start):
samples_number = np.random.poisson(self.replay_ratio)
for _ in range(samples_number):
# get obs, actions, rewards, mus, dones from buffer.
obs, actions, rewards, mus, dones, masks = buffer.get()
# reshape stuff correctly
obs = obs.reshape(runner.batch_ob_shape)
actions = actions.reshape([runner.n_batch])
rewards = rewards.reshape([runner.n_batch])
mus = mus.reshape([runner.n_batch, runner.n_act])
dones = dones.reshape([runner.n_batch])
masks = masks.reshape([runner.batch_ob_shape[0]])
self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks, steps)
return self
def save(self, save_path):
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"q_coef": self.q_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"lr_schedule": self.lr_schedule,
"rprop_alpha": self.rprop_alpha,
"rprop_epsilon": self.rprop_epsilon,
"replay_ratio": self.replay_ratio,
"replay_start": self.replay_start,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params = self.sess.run(self.params)
self._save_to_file(save_path, data=data, params=params)
class _Runner(AbstractEnvRunner):
def __init__(self, env, model, n_steps):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
"""
super(_Runner, self).__init__(env=env, model=model, n_steps=n_steps)
self.env = env
self.model = model
self.n_env = n_env = env.num_envs
if isinstance(env.action_space, Discrete):
self.n_act = env.action_space.n
else:
self.n_act = env.action_space.shape[-1]
self.n_batch = n_env * n_steps
if len(env.observation_space.shape) > 1:
self.raw_pixels = True
obs_height, obs_width, obs_num_channels = env.observation_space.shape
self.batch_ob_shape = (n_env * (n_steps + 1), obs_height, obs_width, obs_num_channels)
self.obs_dtype = np.uint8
self.obs = np.zeros((n_env, obs_height, obs_width, obs_num_channels), dtype=self.obs_dtype)
self.num_channels = obs_num_channels
else:
if len(env.observation_space.shape) == 1:
self.obs_dim = env.observation_space.shape[0]
else:
self.obs_dim = 1
self.raw_pixels = False
if isinstance(self.env.observation_space, Discrete):
self.batch_ob_shape = (n_env * (n_steps + 1),)
else:
self.batch_ob_shape = (n_env * (n_steps + 1), self.obs_dim)
self.obs_dtype = np.float32
self.n_steps = n_steps
self.states = model.initial_state
self.dones = [False for _ in range(n_env)]
def run(self):
"""
Run a step leaning of the model
:return: ([float], [float], [float], [float], [float], [bool], [float])
encoded observation, observations, actions, rewards, mus, dones, masks
"""
enc_obs = [self.obs]
mb_obs, mb_actions, mb_mus, mb_dones, mb_rewards = [], [], [], [], []
for _ in range(self.n_steps):
actions, _, states, _ = self.model.step(self.obs, self.states, self.dones)
mus = self.model.proba_step(self.obs, self.states, self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_mus.append(mus)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
obs, rewards, dones, _ = self.env.step(clipped_actions)
# states information for statefull models like LSTM
self.states = states
self.dones = dones
self.obs = obs
mb_rewards.append(rewards)
enc_obs.append(obs)
mb_obs.append(np.copy(self.obs))
mb_dones.append(self.dones)
enc_obs = np.asarray(enc_obs, dtype=self.obs_dtype).swapaxes(1, 0)
mb_obs = np.asarray(mb_obs, dtype=self.obs_dtype).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_mus = np.asarray(mb_mus, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones # Used for statefull models like LSTM's to mask state when done
mb_dones = mb_dones[:, 1:] # Used for calculating returns. The dones array is now aligned with rewards
# shapes are now [nenv, nsteps, []]
# When pulling from buffer, arrays will now be reshaped in place, preventing a deep copy.
return enc_obs, mb_obs, mb_actions, mb_rewards, mb_mus, mb_dones, mb_masks
|
the-stack_106_28404 | """Support for interfacing to iTunes API."""
import logging
import requests
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET, SUPPORT_SHUFFLE_SET)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_SSL, STATE_IDLE, STATE_OFF, STATE_ON,
STATE_PAUSED, STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'iTunes'
DEFAULT_PORT = 8181
DEFAULT_SSL = False
DEFAULT_TIMEOUT = 10
DOMAIN = 'itunes'
SUPPORT_ITUNES = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | \
SUPPORT_PLAY_MEDIA | SUPPORT_PLAY | SUPPORT_TURN_OFF | SUPPORT_SHUFFLE_SET
SUPPORT_AIRPLAY = SUPPORT_VOLUME_SET | SUPPORT_TURN_ON | SUPPORT_TURN_OFF
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
})
class Itunes:
"""The iTunes API client."""
def __init__(self, host, port, use_ssl):
"""Initialize the iTunes device."""
self.host = host
self.port = port
self.use_ssl = use_ssl
@property
def _base_url(self):
"""Return the base URL for endpoints."""
if self.use_ssl:
uri_scheme = 'https://'
else:
uri_scheme = 'http://'
if self.port:
return '{}{}:{}'.format(uri_scheme, self.host, self.port)
return '{}{}'.format(uri_scheme, self.host)
def _request(self, method, path, params=None):
"""Make the actual request and return the parsed response."""
url = '{}{}'.format(self._base_url, path)
try:
if method == 'GET':
response = requests.get(url, timeout=DEFAULT_TIMEOUT)
elif method == 'POST':
response = requests.put(url, params, timeout=DEFAULT_TIMEOUT)
elif method == 'PUT':
response = requests.put(url, params, timeout=DEFAULT_TIMEOUT)
elif method == 'DELETE':
response = requests.delete(url, timeout=DEFAULT_TIMEOUT)
return response.json()
except requests.exceptions.HTTPError:
return {'player_state': 'error'}
except requests.exceptions.RequestException:
return {'player_state': 'offline'}
def _command(self, named_command):
"""Make a request for a controlling command."""
return self._request('PUT', '/' + named_command)
def now_playing(self):
"""Return the current state."""
return self._request('GET', '/now_playing')
def set_volume(self, level):
"""Set the volume and returns the current state, level 0-100."""
return self._request('PUT', '/volume', {'level': level})
def set_muted(self, muted):
"""Mute and returns the current state, muted True or False."""
return self._request('PUT', '/mute', {'muted': muted})
def set_shuffle(self, shuffle):
"""Set the shuffle mode, shuffle True or False."""
return self._request('PUT', '/shuffle',
{'mode': ('songs' if shuffle else 'off')})
def play(self):
"""Set playback to play and returns the current state."""
return self._command('play')
def pause(self):
"""Set playback to paused and returns the current state."""
return self._command('pause')
def next(self):
"""Skip to the next track and returns the current state."""
return self._command('next')
def previous(self):
"""Skip back and returns the current state."""
return self._command('previous')
def stop(self):
"""Stop playback and return the current state."""
return self._command('stop')
def play_playlist(self, playlist_id_or_name):
"""Set a playlist to be current and returns the current state."""
response = self._request('GET', '/playlists')
playlists = response.get('playlists', [])
found_playlists = \
[playlist for playlist in playlists if
(playlist_id_or_name in [playlist["name"], playlist["id"]])]
if found_playlists:
playlist = found_playlists[0]
path = '/playlists/' + playlist['id'] + '/play'
return self._request('PUT', path)
def artwork_url(self):
"""Return a URL of the current track's album art."""
return self._base_url + '/artwork'
def airplay_devices(self):
"""Return a list of AirPlay devices."""
return self._request('GET', '/airplay_devices')
def airplay_device(self, device_id):
"""Return an AirPlay device."""
return self._request('GET', '/airplay_devices/' + device_id)
def toggle_airplay_device(self, device_id, toggle):
"""Toggle airplay device on or off, id, toggle True or False."""
command = 'on' if toggle else 'off'
path = '/airplay_devices/' + device_id + '/' + command
return self._request('PUT', path)
def set_volume_airplay_device(self, device_id, level):
"""Set volume, returns current state of device, id,level 0-100."""
path = '/airplay_devices/' + device_id + '/volume'
return self._request('PUT', path, {'level': level})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the iTunes platform."""
add_entities([
ItunesDevice(
config.get(CONF_NAME),
config.get(CONF_HOST),
config.get(CONF_PORT),
config.get(CONF_SSL),
add_entities
)
])
class ItunesDevice(MediaPlayerDevice):
"""Representation of an iTunes API instance."""
def __init__(self, name, host, port, use_ssl, add_entities):
"""Initialize the iTunes device."""
self._name = name
self._host = host
self._port = port
self._use_ssl = use_ssl
self._add_entities = add_entities
self.client = Itunes(self._host, self._port, self._use_ssl)
self.current_volume = None
self.muted = None
self.shuffled = None
self.current_title = None
self.current_album = None
self.current_artist = None
self.current_playlist = None
self.content_id = None
self.player_state = None
self.airplay_devices = {}
self.update()
def update_state(self, state_hash):
"""Update all the state properties with the passed in dictionary."""
self.player_state = state_hash.get('player_state', None)
self.current_volume = state_hash.get('volume', 0)
self.muted = state_hash.get('muted', None)
self.current_title = state_hash.get('name', None)
self.current_album = state_hash.get('album', None)
self.current_artist = state_hash.get('artist', None)
self.current_playlist = state_hash.get('playlist', None)
self.content_id = state_hash.get('id', None)
_shuffle = state_hash.get('shuffle', None)
self.shuffled = (_shuffle == 'songs')
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self.player_state == 'offline' or self.player_state is None:
return 'offline'
if self.player_state == 'error':
return 'error'
if self.player_state == 'stopped':
return STATE_IDLE
if self.player_state == 'paused':
return STATE_PAUSED
return STATE_PLAYING
def update(self):
"""Retrieve latest state."""
now_playing = self.client.now_playing()
self.update_state(now_playing)
found_devices = self.client.airplay_devices()
found_devices = found_devices.get('airplay_devices', [])
new_devices = []
for device_data in found_devices:
device_id = device_data.get('id')
if self.airplay_devices.get(device_id):
# update it
airplay_device = self.airplay_devices.get(device_id)
airplay_device.update_state(device_data)
else:
# add it
airplay_device = AirPlayDevice(device_id, self.client)
airplay_device.update_state(device_data)
self.airplay_devices[device_id] = airplay_device
new_devices.append(airplay_device)
if new_devices:
self._add_entities(new_devices)
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.muted
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self.current_volume/100.0
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.player_state in (STATE_PLAYING, STATE_IDLE, STATE_PAUSED) and \
self.current_title is not None:
return self.client.artwork_url() + '?id=' + self.content_id
return 'https://cloud.githubusercontent.com/assets/260/9829355' \
'/33fab972-58cf-11e5-8ea2-2ca74bdaae40.png'
@property
def media_title(self):
"""Title of current playing media."""
return self.current_title
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self.current_artist
@property
def media_album_name(self):
"""Album of current playing media (Music track only)."""
return self.current_album
@property
def media_playlist(self):
"""Title of the currently playing playlist."""
return self.current_playlist
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self.shuffled
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ITUNES
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
response = self.client.set_volume(int(volume * 100))
self.update_state(response)
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
response = self.client.set_muted(mute)
self.update_state(response)
def set_shuffle(self, shuffle):
"""Shuffle (true) or no shuffle (false) media player."""
response = self.client.set_shuffle(shuffle)
self.update_state(response)
def media_play(self):
"""Send media_play command to media player."""
response = self.client.play()
self.update_state(response)
def media_pause(self):
"""Send media_pause command to media player."""
response = self.client.pause()
self.update_state(response)
def media_next_track(self):
"""Send media_next command to media player."""
response = self.client.next()
self.update_state(response)
def media_previous_track(self):
"""Send media_previous command media player."""
response = self.client.previous()
self.update_state(response)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
if media_type == MEDIA_TYPE_PLAYLIST:
response = self.client.play_playlist(media_id)
self.update_state(response)
def turn_off(self):
"""Turn the media player off."""
response = self.client.stop()
self.update_state(response)
class AirPlayDevice(MediaPlayerDevice):
"""Representation an AirPlay device via an iTunes API instance."""
def __init__(self, device_id, client):
"""Initialize the AirPlay device."""
self._id = device_id
self.client = client
self.device_name = "AirPlay"
self.kind = None
self.active = False
self.selected = False
self.volume = 0
self.supports_audio = False
self.supports_video = False
self.player_state = None
def update_state(self, state_hash):
"""Update all the state properties with the passed in dictionary."""
if 'player_state' in state_hash:
self.player_state = state_hash.get('player_state', None)
if 'name' in state_hash:
name = state_hash.get('name', '')
self.device_name = (name + ' AirTunes Speaker').strip()
if 'kind' in state_hash:
self.kind = state_hash.get('kind', None)
if 'active' in state_hash:
self.active = state_hash.get('active', None)
if 'selected' in state_hash:
self.selected = state_hash.get('selected', None)
if 'sound_volume' in state_hash:
self.volume = state_hash.get('sound_volume', 0)
if 'supports_audio' in state_hash:
self.supports_audio = state_hash.get('supports_audio', None)
if 'supports_video' in state_hash:
self.supports_video = state_hash.get('supports_video', None)
@property
def name(self):
"""Return the name of the device."""
return self.device_name
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
if self.selected is True:
return 'mdi:volume-high'
return 'mdi:volume-off'
@property
def state(self):
"""Return the state of the device."""
if self.selected is True:
return STATE_ON
return STATE_OFF
def update(self):
"""Retrieve latest state."""
@property
def volume_level(self):
"""Return the volume."""
return float(self.volume)/100.0
@property
def media_content_type(self):
"""Flag of media content that is supported."""
return MEDIA_TYPE_MUSIC
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_AIRPLAY
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
volume = int(volume * 100)
response = self.client.set_volume_airplay_device(self._id, volume)
self.update_state(response)
def turn_on(self):
"""Select AirPlay."""
self.update_state({"selected": True})
self.schedule_update_ha_state()
response = self.client.toggle_airplay_device(self._id, True)
self.update_state(response)
def turn_off(self):
"""Deselect AirPlay."""
self.update_state({"selected": False})
self.schedule_update_ha_state()
response = self.client.toggle_airplay_device(self._id, False)
self.update_state(response)
|
the-stack_106_28405 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.internal.app import android_process
from telemetry.internal.backends import android_browser_backend_settings
from telemetry.internal.backends import android_command_line_backend
from telemetry.internal.backends import app_backend
from devil.android import app_ui
from devil.android.sdk import intent
import py_utils
class AndroidAppBackend(app_backend.AppBackend):
def __init__(self, android_platform_backend, start_intent,
is_app_ready_predicate=None, app_has_webviews=True):
super(AndroidAppBackend, self).__init__(
start_intent.package, android_platform_backend)
self._default_process_name = start_intent.package
self._start_intent = start_intent
self._is_app_ready_predicate = is_app_ready_predicate
self._is_running = False
self._app_has_webviews = app_has_webviews
self._existing_processes_by_pid = {}
self._app_ui = None
@property
def device(self):
return self.platform_backend.device
def GetAppUi(self):
if self._app_ui is None:
self._app_ui = app_ui.AppUi(self.device, self._start_intent.package)
return self._app_ui
def _LaunchAndWaitForApplication(self):
"""Launch the app and wait for it to be ready."""
def is_app_ready():
return self._is_app_ready_predicate(self.app)
# When "is_app_ready_predicate" is provided, we use it to wait for the
# app to become ready, otherwise "blocking=True" is used as a fall back.
# TODO(slamm): check if the wait for "ps" check is really needed, or
# whether the "blocking=True" fall back is sufficient.
has_ready_predicate = self._is_app_ready_predicate is not None
self.device.StartActivity(
self._start_intent,
blocking=not has_ready_predicate,
force_stop=True, # Ensure app was not running.
)
if has_ready_predicate:
py_utils.WaitFor(is_app_ready, timeout=60)
def Start(self):
"""Start an Android app and wait for it to finish launching.
If the app has webviews, the app is launched with the suitable
command line arguments.
AppStory derivations can customize the wait-for-ready-state to wait
for a more specific event if needed.
"""
if self._app_has_webviews:
webview_startup_args = self.GetWebviewStartupArgs()
backend_settings = (
android_browser_backend_settings.WebviewBackendSettings(
'android-webview'))
with android_command_line_backend.SetUpCommandLineFlags(
self.device, backend_settings, webview_startup_args):
self._LaunchAndWaitForApplication()
else:
self._LaunchAndWaitForApplication()
self._is_running = True
def Foreground(self):
self.device.StartActivity(
intent.Intent(package=self._start_intent.package,
activity=self._start_intent.activity,
action=None,
flags=[intent.FLAG_ACTIVITY_RESET_TASK_IF_NEEDED]),
blocking=True)
def Background(self):
package = 'org.chromium.push_apps_to_background'
activity = package + '.PushAppsToBackgroundActivity'
self.device.StartActivity(
intent.Intent(
package=package,
activity=activity,
action=None,
flags=[intent.FLAG_ACTIVITY_RESET_TASK_IF_NEEDED]),
blocking=True)
def Close(self):
self._is_running = False
self.platform_backend.KillApplication(self._start_intent.package)
def IsAppRunning(self):
return self._is_running
def GetStandardOutput(self):
raise NotImplementedError
def GetStackTrace(self):
raise NotImplementedError
def GetProcesses(self, process_filter=None):
if process_filter is None:
# Match process names of the form: 'process_name[:subprocess]'.
process_filter = re.compile(
'^%s(:|$)' % re.escape(self._default_process_name)).match
processes = set()
ps_output = self.platform_backend.GetPsOutput(['pid', 'name'])
for pid, name in ps_output:
if not process_filter(name):
continue
if pid not in self._existing_processes_by_pid:
self._existing_processes_by_pid[pid] = android_process.AndroidProcess(
self, pid, name)
processes.add(self._existing_processes_by_pid[pid])
return processes
def GetProcess(self, subprocess_name):
assert subprocess_name.startswith(':')
process_name = self._default_process_name + subprocess_name
return self.GetProcesses(lambda n: n == process_name).pop()
def GetWebViews(self):
assert self._app_has_webviews
webviews = set()
for process in self.GetProcesses():
webviews.update(process.GetWebViews())
return webviews
def GetWebviewStartupArgs(self):
assert self._app_has_webviews
args = []
# Turn on GPU benchmarking extension for all runs. The only side effect of
# the extension being on is that render stats are tracked. This is believed
# to be effectively free. And, by doing so here, it avoids us having to
# programmatically inspect a pageset's actions in order to determine if it
# might eventually scroll.
args.append('--enable-gpu-benchmarking')
return args
|
the-stack_106_28406 | #! /usr/bin/env python3
import argparse
import sys
import os
import csv
def get_arguments(argv):
parser = argparse.ArgumentParser(description="list headers for all given csv files")
parser.add_argument("files", nargs="+", help="a csv file to read")
return parser.parse_args(argv[1:])
def get_header(filename):
with open(filename, "r") as f:
reader = csv.DictReader(f)
return reader.fieldnames
def main(argv):
args = get_arguments(argv)
for filename in args.files:
fieldnames = get_header(filename)
print(f"{filename}:")
for name in fieldnames:
print(f'\t"{name}"')
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
the-stack_106_28409 | import turtle
x = 600
y = 600
turtle.setup(x, y)
wn = turtle.Screen()
wn.bgcolor("black")
wn.title("Hey!")
turtle.hideturtle()
turtle.penup()
turtle.pencolor("white")
turtle.pensize(3)
turtle.left(90)
turtle.forward(150)
turtle.pendown()
turtle.goto(150.00, 0.00)
turtle.goto(0.00, -150.00)
turtle.goto(-150.00, 0.00)
turtle.goto(0.00, 150.00)
print(turtle.pos())
input("Exit this shit up")
|
the-stack_106_28410 | import collections
import six
from . import logical
from .dict_wrapper import DictWrapper
from avro import schema
from avro import io
if six.PY3:
io_validate = io.Validate
else:
io_validate = io.validate
_PRIMITIVE_TYPES = set(schema.PRIMITIVE_TYPES)
class AvroJsonConverter(object):
def __init__(
self,
use_logical_types=False,
logical_types=logical.DEFAULT_LOGICAL_TYPES,
schema_types=None,
):
self.use_logical_types = use_logical_types
self.logical_types = logical_types or {}
self.schema_types = schema_types or {}
self.fastavro = False
# Register self with all the schema objects.
for klass in self.schema_types.values():
klass._json_converter = self
def with_tuple_union(self, enable=True) -> "AvroJsonConverter":
ret = AvroJsonConverter(
self.use_logical_types, self.logical_types, self.schema_types
)
ret.fastavro = enable
return ret
def validate(self, expected_schema, datum, skip_logical_types=False):
if (
self.use_logical_types
and expected_schema.props.get("logicalType")
and not skip_logical_types
and expected_schema.props.get("logicalType") in self.logical_types
):
return self.logical_types[
expected_schema.props.get("logicalType")
].can_convert(expected_schema) and self.logical_types[
expected_schema.props.get("logicalType")
].validate(
expected_schema, datum
)
schema_type = expected_schema.type
if schema_type == "array":
return isinstance(datum, list) and all(
self.validate(expected_schema.items, d, skip_logical_types)
for d in datum
)
elif schema_type == "map":
return (
isinstance(datum, dict)
and all(isinstance(k, six.string_types) for k in datum.keys())
and all(
self.validate(expected_schema.values, v, skip_logical_types)
for v in datum.values()
)
)
elif schema_type in ["union", "error_union"]:
if isinstance(datum, DictWrapper):
# Match the type based on the declared schema.
data_schema = self._get_record_schema_if_available(datum)
for i, candidate_schema in enumerate(expected_schema.schemas):
if (
candidate_schema.namespace == data_schema.namespace
and candidate_schema.name == data_schema.name
):
return self.validate(candidate_schema, datum)
# If the union type is using a "name" to distinguish the type, we
# must handle this specially during validation.
value_type = None
if not self.fastavro and isinstance(datum, dict):
if len(datum) == 1:
items = list(six.iteritems(datum))
if not items:
return None
value_type = items[0][0]
value = items[0][1]
elif self.fastavro and (
isinstance(datum, list) or isinstance(datum, tuple)
):
if len(datum) == 2:
value_type = datum[0]
value = datum[1]
if value_type is not None:
for s in expected_schema.schemas:
name = self._fullname(s)
if name == value_type:
if self.validate(s, value, skip_logical_types):
return True
# If the specialized validation fails, we still attempt normal validation.
return True in [
self.validate(s, datum, skip_logical_types)
for s in expected_schema.schemas
]
elif schema_type in ["record", "error", "request"]:
return (
isinstance(datum, dict) or isinstance(datum, DictWrapper)
) and all(
self.validate(f.type, datum.get(f.name), skip_logical_types)
for f in expected_schema.fields
)
return io_validate(expected_schema, datum)
def from_json_object(self, json_obj, writers_schema=None, readers_schema=None):
if readers_schema is None:
readers_schema = writers_schema
if writers_schema is None:
writers_schema = readers_schema
if writers_schema is None:
raise Exception("At least one schema must be specified")
if not io.DatumReader.match_schemas(writers_schema, readers_schema):
raise io.SchemaResolutionException(
"Could not match schemas", writers_schema, readers_schema
)
return self._generic_from_json(json_obj, writers_schema, readers_schema)
def to_json_object(self, data_obj, writers_schema=None):
if writers_schema is None:
writers_schema = self._get_record_schema_if_available(data_obj)
if writers_schema is None:
raise Exception(
"Could not determine writer's schema from the object type and schema was not passed"
)
assert isinstance(writers_schema, schema.Schema)
if not self.validate(writers_schema, data_obj):
raise io.AvroTypeException(writers_schema, data_obj)
return self._generic_to_json(data_obj, writers_schema)
def _fullname(self, schema_):
if isinstance(schema_, schema.NamedSchema):
return schema_.fullname if six.PY2 else schema_.fullname.lstrip(".")
return schema_.type
def _get_record_schema_if_available(self, data_obj):
if hasattr(type(data_obj), "RECORD_SCHEMA"):
return type(data_obj).RECORD_SCHEMA
return None
def _generic_to_json(self, data_obj, writers_schema, was_within_array=False):
if self.use_logical_types and writers_schema.props.get("logicalType"):
lt = self.logical_types.get(
writers_schema.props.get("logicalType")
) # type: logical.LogicalTypeProcessor
if lt.can_convert(writers_schema):
if lt.validate(writers_schema, data_obj):
data_obj = lt.convert(writers_schema, data_obj)
else:
raise schema.AvroException(
"Wrong object for %s logical type"
% writers_schema.props.get("logicalType")
)
if writers_schema.type in _PRIMITIVE_TYPES:
result = self._primitive_to_json(data_obj, writers_schema)
elif writers_schema.type == "fixed":
result = self._fixed_to_json(data_obj, writers_schema)
elif writers_schema.type == "enum":
result = self._enum_to_json(data_obj, writers_schema)
elif writers_schema.type == "array":
result = self._array_to_json(data_obj, writers_schema)
elif writers_schema.type == "map":
result = self._map_to_json(data_obj, writers_schema)
elif writers_schema.type in ["record", "error", "request"]:
result = self._record_to_json(data_obj, writers_schema)
elif writers_schema.type in ["union", "error_union"]:
result = self._union_to_json(data_obj, writers_schema, was_within_array)
else:
raise schema.AvroException("Invalid schema type: %s" % writers_schema.type)
return result
def _primitive_to_json(self, data_obj, writers_schema):
return data_obj
def _fixed_to_json(self, data_obj, writers_schema):
return data_obj
def _enum_to_json(self, data_obj, writers_schema):
return data_obj
def _array_to_json(self, data_obj, writers_schema):
return [self._generic_to_json(x, writers_schema.items, True) for x in data_obj]
def _map_to_json(self, data_obj, writers_schema):
return {
name: self._generic_to_json(x, writers_schema.values)
for name, x in six.iteritems(data_obj)
}
def _record_to_json(self, data_obj, writers_schema):
result = collections.OrderedDict()
for field in writers_schema.fields:
result[field.name] = self._generic_to_json(
data_obj.get(
field.name,
self.from_json_object(field.default, field.type)
if field.has_default
else None,
),
field.type,
)
return result
def _is_unambiguous_union(self, writers_schema) -> bool:
if any(
isinstance(candidate_schema, schema.EnumSchema)
for candidate_schema in writers_schema.schemas
):
if len(writers_schema.schemas) == 2 and any(
candidate_schema.type == "null"
for candidate_schema in writers_schema.schemas
):
# Enums and null do not conflict, so this is fine.
return True
else:
# Enum and string conflict, so this case is ambiguous.
return False
advanced_count = 0
for candidate_schema in writers_schema.schemas:
if not isinstance(candidate_schema, schema.PrimitiveSchema):
advanced_count += 1
if advanced_count <= 1:
return True
return False
def _union_to_json(self, data_obj, writers_schema, was_within_array=False):
index_of_schema = -1
data_schema = self._get_record_schema_if_available(data_obj)
for i, candidate_schema in enumerate(writers_schema.schemas):
# Check for exact matches first.
if (
data_schema
and candidate_schema.namespace == data_schema.namespace
and candidate_schema.name == data_schema.name
):
index_of_schema = i
break
# Fallback to schema guessing based on validation.
if self.validate(candidate_schema, data_obj):
index_of_schema = i
if candidate_schema.type == "boolean":
break
if index_of_schema < 0:
raise io.AvroTypeException(writers_schema, data_obj)
candidate_schema = writers_schema.schemas[index_of_schema]
if candidate_schema.type == "null":
return None
output_obj = self._generic_to_json(data_obj, candidate_schema)
if (
not self.fastavro
and not was_within_array
and self._is_unambiguous_union(writers_schema)
):
# If the union is unambiguous, we can avoid wrapping it in
# an extra layer of tuples or dicts. Fastavro doesn't like this though.
# Arrays with unions inside must specify the type.
return output_obj
if self.fastavro:
# Fastavro likes tuples instead of dicts for union types.
return (self._fullname(candidate_schema), output_obj)
return {self._fullname(candidate_schema): output_obj}
def _generic_from_json(self, json_obj, writers_schema, readers_schema):
if (
writers_schema.type
not in [
"union",
"error_union",
]
and readers_schema.type in ["union", "error_union"]
):
for s in readers_schema.schemas:
if io.DatumReader.match_schemas(writers_schema, s):
return self._generic_from_json(json_obj, writers_schema, s)
raise io.SchemaResolutionException(
"Schemas do not match", writers_schema, readers_schema
)
result = None
if writers_schema.type == "null":
result = None
elif writers_schema.type in _PRIMITIVE_TYPES:
result = self._primitive_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type == "fixed":
result = self._fixed_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type == "enum":
result = self._enum_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type == "array":
result = self._array_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type == "map":
result = self._map_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type in ("union", "error_union"):
result = self._union_from_json(json_obj, writers_schema, readers_schema)
elif writers_schema.type in ("record", "error", "request"):
result = self._record_from_json(json_obj, writers_schema, readers_schema)
result = self._logical_type_from_json(writers_schema, readers_schema, result)
return result
def _logical_type_from_json(self, writers_schema, readers_schema, result):
if self.use_logical_types and readers_schema.props.get("logicalType"):
lt = self.logical_types.get(
readers_schema.props.get("logicalType")
) # type: logical.LogicalTypeProcessor
if lt and lt.does_match(writers_schema, readers_schema):
result = lt.convert_back(writers_schema, readers_schema, result)
return result
def _primitive_from_json(self, json_obj, writers_schema, readers_schema):
return json_obj
def _fixed_from_json(self, json_obj, writers_schema, readers_schema):
return json_obj
def _enum_from_json(self, json_obj, writers_schema, readers_schema):
return json_obj
def _array_from_json(self, json_obj, writers_schema, readers_schema):
return [
self._generic_from_json(x, writers_schema.items, readers_schema.items)
for x in json_obj
]
def _map_from_json(self, json_obj, writers_schema, readers_schema):
return {
name: self._generic_from_json(
value, writers_schema.values, readers_schema.values
)
for name, value in six.iteritems(json_obj)
}
def _union_from_json(self, json_obj, writers_schema, readers_schema):
if json_obj is None:
return None
value_type = None
value = None
if not self.fastavro and isinstance(json_obj, dict):
items = list(six.iteritems(json_obj))
if not items:
return None
value_type = items[0][0]
value = items[0][1]
if self.fastavro and (
isinstance(json_obj, list) or isinstance(json_obj, tuple)
):
if len(json_obj) == 2:
value_type = json_obj[0]
value = json_obj[1]
if value_type is not None:
for s in writers_schema.schemas:
name = self._fullname(s)
if name == value_type:
return self._generic_from_json(value, s, readers_schema)
for s in writers_schema.schemas:
if self.validate(s, json_obj, skip_logical_types=True):
return self._generic_from_json(json_obj, s, readers_schema)
raise schema.AvroException("Datum union type not in schema: %s", value_type)
def _make_type(self, tp, record):
if issubclass(tp, DictWrapper):
return tp.construct(record)
return tp(record)
def _instantiate_record(self, decoded_record, writers_schema, readers_schema):
# First try the fullname, which includes namespaces.
readers_name = self._fullname(readers_schema)
if readers_name in self.schema_types:
return self._make_type(self.schema_types[readers_name], decoded_record)
# Fallback to the bare name, without namespace.
readers_name = readers_schema.name
if readers_name in self.schema_types:
return self._make_type(self.schema_types[readers_name], decoded_record)
return decoded_record
def _record_from_json(self, json_obj, writers_schema, readers_schema):
writer_fields = (
writers_schema.fields_dict if six.PY2 else writers_schema.field_map
)
result = {}
for field in readers_schema.fields:
writers_field = writer_fields.get(field.name)
if writers_field is None:
field_value = (
self._generic_from_json(field.default, field.type, field.type)
if field.has_default
else None
)
else:
if field.name in json_obj:
field_value = self._generic_from_json(
json_obj[field.name], writers_field.type, field.type
)
else:
field_value = (
self._generic_from_json(
writers_field.default, writers_field.type, field.type
)
if writers_field.has_default
else None
)
result[field.name] = field_value
return self._instantiate_record(result, writers_schema, readers_schema)
|
the-stack_106_28411 | import bpy
import os
import sys
import argparse
'''
Simplifies mesh to target number of faces
Requires Blender 2.8
Author: Rana Hanocka
@input:
<obj_file>
<target_faces> number of target faces
<outfile> name of simplified .obj file
@output:
simplified mesh .obj
to run it from cmd line:
/opt/blender/blender --background --python blender_process.py /home/rana/koala.obj 1000 /home/rana/koala_1000.obj
'''
class Process:
def __init__(self, obj_file, target_faces, export_name):
mesh = self.load_obj(obj_file)
self.simplify(mesh, target_faces)
self.export_obj(mesh, export_name)
def load_obj(self, obj_file):
bpy.ops.import_scene.obj(filepath=obj_file, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl", use_edges=True,
use_smooth_groups=True, use_split_objects=False, use_split_groups=False,
use_groups_as_vgroups=False, use_image_search=True, split_mode='ON')
ob = bpy.context.selected_objects[0]
return ob
def subsurf(self, mesh):
# subdivide mesh
bpy.context.view_layer.objects.active = mesh
mod = mesh.modifiers.new(name='Subsurf', type='SUBSURF')
mod.subdivision_type = 'SIMPLE'
bpy.ops.object.modifier_apply(modifier=mod.name)
# now triangulate
mod = mesh.modifiers.new(name='Triangluate', type='TRIANGULATE')
bpy.ops.object.modifier_apply(modifier=mod.name)
def simplify(self, mesh, target_faces):
bpy.context.view_layer.objects.active = mesh
mod = mesh.modifiers.new(name='Decimate', type='DECIMATE')
bpy.context.object.modifiers['Decimate'].use_collapse_triangulate = True
#
nfaces = len(mesh.data.polygons)
if nfaces < target_faces:
self.subsurf(mesh)
nfaces = len(mesh.data.polygons)
ratio = target_faces / float(nfaces)
mod.ratio = float('%s' % ('%.6g' % (ratio)))
print('faces: ', mod.face_count, mod.ratio)
bpy.ops.object.modifier_apply(modifier=mod.name)
def export_obj(self, mesh, export_name):
outpath = os.path.dirname(export_name)
if not os.path.isdir(outpath): os.makedirs(outpath)
print('EXPORTING', export_name)
bpy.ops.object.select_all(action='DESELECT')
mesh.select_set(state=True)
bpy.ops.export_scene.obj(filepath=export_name, check_existing=False, filter_glob="*.obj;*.mtl",
use_selection=True, use_animation=False, use_mesh_modifiers=True, use_edges=True,
use_smooth_groups=False, use_smooth_groups_bitflags=False, use_normals=True,
use_uvs=False, use_materials=False, use_triangles=True, use_nurbs=False,
use_vertex_groups=False, use_blen_objects=True, group_by_object=False,
group_by_material=False, keep_vertex_order=True, global_scale=1, path_mode='AUTO',
axis_forward='-Z', axis_up='Y')
if __name__ == "__main__":
obj_file = sys.argv[-3]
target_faces = int(sys.argv[-2])
export_name = sys.argv[-1]
blender = Process(obj_file, target_faces, export_name)
|
the-stack_106_28413 | from firedrake import *
import matplotlib.pyplot as plt
parameters["pyop2_options"]["lazy_evaluation"] = False
# Defining the mesh
N = 15
use_quads = True
mesh = UnitSquareMesh(N, N, quadrilateral=use_quads)
# Function space declaration
degree = 1
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
solution = Function(W)
# Mesh entities
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_velocity = -grad(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(exact_velocity)
f = Function(V).interpolate(f_expression)
# Boundary Conditions
bcs = DirichletBC(W[0], exact_velocity, "on_boundary")
# Least-squares terms
a = inner(u + grad(p), v + grad(q)) * dx
a += div(u) * div(v) * dx
a += inner(curl(u), curl(v)) * dx
L = f * div(v) * dx
# Solving the system
solver_parameters = {
# Solver parameters are configured to perform a SVD calculation.
# Those values are approximations from GMRES iterations in order to
# estimate the condition number related to FEM system. It is important
# to set "ksp_gmres_restart" to keep track of max and min singular
# values approximation through GMRES' Arnoldi iterations.
'ksp_type': 'gmres',
'pc_type': 'none',
'mat_type': 'aij',
'ksp_max_it': 2000,
'ksp_monitor_singular_value': None,
'ksp_gmres_restart': 1000
}
problem = LinearVariationalProblem(a, L, solution, bcs=bcs)
solver = LinearVariationalSolver(problem, solver_parameters=solver_parameters)
solver.snes.ksp.setConvergenceHistory()
solver.solve()
sigma_h, u_h = solution.split()
sigma_h.rename('Velocity', 'label')
u_h.rename('Pressure', 'label')
# Retrieving solver information
max_singular_value, min_singular_value = solver.snes.ksp.computeExtremeSingularValues()
condition_number = max_singular_value / min_singular_value
print(f"\n*** Condition number estimate = {condition_number}")
# Plotting solution field
tripcolor(u_h)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
|
the-stack_106_28414 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from monai.handlers import ROCAUC
from monai.transforms import Activations, AsDiscrete
class TestHandlerROCAUC(unittest.TestCase):
def test_compute(self):
auc_metric = ROCAUC()
act = Activations(softmax=True)
to_onehot = AsDiscrete(to_onehot=True, n_classes=2)
y_pred = torch.Tensor([[0.1, 0.9], [0.3, 1.4]])
y = torch.Tensor([[0], [1]])
y_pred = act(y_pred)
y = to_onehot(y)
auc_metric.update([y_pred, y])
y_pred = torch.Tensor([[0.2, 0.1], [0.1, 0.5]])
y = torch.Tensor([[0], [1]])
y_pred = act(y_pred)
y = to_onehot(y)
auc_metric.update([y_pred, y])
auc = auc_metric.compute()
np.testing.assert_allclose(0.75, auc)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_28415 | import numpy as np
import torch
import torch.distributed as dist
import json
class Merger():
def __init__(self, meta):
if isinstance(meta, str):
with open(meta) as f:
meta = json.load(f)
self.meta = meta
def merge(self, multi_labels):
"""
args:
multi_labels: [[str]]
returns:
labels
"""
num_item = len(multi_labels)
inter_mat = np.zeros((num_item, num_item))
for h in range(num_item):
for w in range(num_item):
inter = list(set(multi_labels[h]).intersection(set(multi_labels[w])))
inter_mat[h, w] = len(inter)
labels = [0 for i in range(num_item)]
next_label = 1
for h in range(num_item):
if labels[h] != 0: continue
tmp_list = [h]
res_list = []
visited = [False if labels[i] == 0 else True for i in range(num_item)]
while len(tmp_list) != 0:
anchor = tmp_list[0]
tmp_list.pop(0)
visited[anchor] = True
pos_indexes = np.where(inter_mat[anchor] >= 1)[0]
for pos in pos_indexes:
if not visited[pos]:
tmp_list.append(pos)
visited[pos] = True
res_list.append(anchor)
for res in res_list:
labels[res] = next_label
next_label += 1
return labels
def __call__(self, rank, world_size, x):
bsize = x.size()[0]
x_list = [torch.ones_like(x) for _ in range(world_size)]
dist.all_gather(x_list, x.contiguous())
indexes = torch.cat(x_list, dim=0)
indexes = indexes.detach().cpu().numpy().tolist()
multi_labels = []
for index in indexes:
idx = str(index)
multi_labels.append(self.meta[idx]['label'])
labels = self.merge(multi_labels)
labels = torch.tensor(labels[rank*bsize:(rank+1)*bsize], dtype=torch.int32)
return labels |
the-stack_106_28416 | #!/usr/bin/env python3
#
# Advent of Code 2020 - Day N
#
from pathlib import Path
INPUTFILE = "input.txt"
SAMPLE_INPUT = """
"""
SAMPLE_CASES = [
(arg1, expected1),
(arg2, expected2),
]
# Utility functions
## Use these if blank lines should be discarded.
def sample_input():
return filter_blank_lines(SAMPLE_INPUT.split("\n"))
def load_input(infile):
return filter_blank_lines(Path(infile).open())
def filter_blank_lines(lines):
return [line.strip() for line in lines if line.strip()]
## Use these if blank lines in input are meaningful.
def sample_input():
return SAMPLE_INPUT.strip("\n").split("\n")
def load_input(infile):
return [line.strip() for line in Path(infile).open()]
def parse_sections(lines):
result = []
sect = []
for line in lines:
line = line.strip()
if not line:
if sect:
result.append(sect)
sect = []
else:
sect.append(line)
if sect:
result.append(sect)
return result
# Solution
def solve(lines):
"""Solve the problem."""
pass
# PART 1
#!! DELETE THE example1 FUNCTION YOU'RE NOT GOING TO USE
def example1():
"""Run example for problem with input arguments."""
print("EXAMPLE 1:")
for arg, expected in SAMPLE_CASES:
result = solve(arg)
print(f"'{arg}' -> {result} (expected {expected})")
assert result == expected
print("= " * 32)
def example1():
"""Run example for problem with input lines."""
print("EXAMPLE 1:")
lines = filter_blank_lines(SAMPLE_INPUT.split("\n"))
result = solve(lines)
expected = 0
print(f"'sample-input' -> {result} (expected {expected})")
assert result == expected
print("= " * 32)
def part1(lines):
print("PART 1:")
result = solve(lines)
print(f"result is {result}")
print("= " * 32)
# PART 2
if __name__ == "__main__":
example1()
lines = load_input(INPUTFILE)
part1(lines)
example2()
part2(lines)
|
the-stack_106_28417 | from os import path
from setuptools import setup
import sys
class IncompatiblePackageError(Exception):
pass
# Make sure that the unrelated package by the name 'suitcase' is *not*
# installed because if it is installed it will break this suitcase's namespace
# package scheme.
try:
import suitcase
except ImportError:
pass
else:
if hasattr(suitcase, '__file__'):
raise IncompatiblePackageError(
"The package 'suitcase' must be uninstalled before "
"'suitcase-core' can be installed. The package distributed under "
"the name 'suitcase' is an unrelated project, and it creates "
"conflicts with suitcase-core's namespace packages.")
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
suitcase does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*sys.version_info[:2], *min_version)
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='suitcase-core',
version='0.8.0',
description="Exporters / serializers for bluesky documents.",
long_description=readme,
author="Brookhaven National Lab",
author_email='[email protected]',
url='https://github.com/NSLS-II/suitcase',
packages=[], # This is a namespace package with dependencies and docs.
entry_points={
'console_scripts': [
# 'some.module:some_function',
],
},
include_package_data=True,
package_data={
'suitcase': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
the-stack_106_28418 | #coding:utf-8
import os
import json
from django.core.urlresolvers import reverse
from django.test.client import encode_multipart
from seahub.test_utils import BaseTestCase
from seaserv import seafile_api
class FileTagTest(BaseTestCase):
def setUp(self):
self.login_as(self.user)
self.test_filepath ='/test_file.txt'
self.test_folderpath = '/test_folder'
self.test_parentpath = '/'
self.test_filename = 'test_file.txt'
self.test_folder_name = 'test_folder'
self.new_repo = seafile_api.get_repo(self.create_repo(
name='test-repo', desc='', username=self.user.username,
passwd=None))
self.endpoint = reverse('api-v2.1-filetags-view', args=[self.new_repo.id])
self._endpoint = reverse('api-v2.1-filetag-view', args=[self.new_repo.id, 'test_tagname'])
self.test_file = self.create_file( repo_id=self.new_repo.id,
parent_dir=self.test_parentpath, filename=self.test_filename,
username=self.user.username
)
self.test_folder = self.create_folder(repo_id = self.new_repo.id,
parent_dir=self.test_parentpath, dirname=self.test_folder_name,
username=self.user.username)
def test_default(self):
#test for create file
response = self.client.post(self.endpoint, { 'path': self.test_filepath,
'names': 'test_tagname', 'is_dir': False,
})
assert response.status_code == 200
self.filetag_id = response.data['tags'][0]['id']
self.filetag_name = response.data['tags'][0]['name']
self.filetag_username = response.data['tags'][0]['creator']
assert self.filetag_id
assert self.filetag_name
assert self.filetag_username
#test for create folder
folder_response = self.client.post(self.endpoint, {
'path': self.test_folderpath, 'names': 'test_tagname',
'is_dir': True,
})
assert folder_response.status_code == 200
self.foldertag_id = folder_response.data["tags"][0]['id']
self.foldertag_name = folder_response.data["tags"][0]['name']
self.foldertag_username = folder_response.data["tags"][0]['creator']
assert self.foldertag_id
assert self.foldertag_name
assert self.foldertag_username
#test for get file tag
response = self.client.get(self.endpoint, {
'path': self.test_filepath,
'is_dir': False,
})
assert response.status_code == 200
assert response.data['tags'][0]['id'] == self.filetag_id
assert response.data['tags'][0]['name'] == self.filetag_name
assert response.data['tags'][0]['creator'] == self.filetag_username
#test for get folder tag
response = self.client.get(self.endpoint, {
'path': self.test_folderpath,
'is_dir': True,
})
assert response.status_code == 200
assert response.data['tags'][0]['id'] == self.foldertag_id
assert response.data['tags'][0]['name'] == self.foldertag_name
assert response.data['tags'][0]['creator'] == self.foldertag_username
#test for del file tag
response = self.client.delete(self._endpoint + "?path=%s&is_dir=%s"
%(self.test_filepath, False))
assert response.status_code == 200
response = self.client.get(self.endpoint, {
'path': self.test_filepath,
'is_dir': False,
})
assert len(response.data['tags']) == 0
#test for del folder tag
response = self.client.delete(self._endpoint + "?path=%s&is_dir=%s"
%(self.test_folderpath, True))
assert response.status_code == 200
response = self.client.get(self.endpoint, {
'path': self.test_folderpath,
'is_dir': True,
})
assert len(response.data['tags']) == 0
def test_post(self):
# add one
response = self.client.post(self.endpoint, {
'path': self.test_filepath, 'names': 'test_tagname',
'is_dir': False,
})
assert response.status_code == 200
assert response.data["tags"][0]["id"]
assert response.data["tags"][0]["name"] == "test_tagname"
assert response.data["tags"][0]["creator"] == self.user.username
# add more
response = self.client.post(self.endpoint, {
'path': self.test_filepath,
'names': 'test_tagname, test_tagname1, test_tagnm天',
'is_dir': False,
})
assert response.status_code == 200
assert response.data["tags"][0]["id"]
tags_names = [tags["name"] for tags in response.data["tags"]]
assert "test_tagname" in tags_names
assert "test_tagname1" in tags_names
assert "test_tagnm天" in tags_names
assert response.data["tags"][0]["creator"] == self.user.username
response = self.client.get(self.endpoint, {
'path': self.test_filepath,
'is_dir': False,
})
tags_names = [tags["name"] for tags in response.data["tags"]]
assert "test_tagname" in tags_names
assert "test_tagname1" in tags_names
assert "test_tagnm天" in tags_names
#test delete all filetag and add specifiy tag
data = 'names=test_zm-.&path=%s&is_dir=%s' % (self.test_filepath, False)
response = self.client.put(self.endpoint, data, 'application/x-www-form-urlencoded')
assert response.status_code == 200
response = self.client.get(self.endpoint, { 'path': self.test_filepath,
'is_dir': False,
})
tags_names = [tags["name"] for tags in response.data["tags"]]
assert "test_tagname" not in tags_names
assert "test_tagname1" not in tags_names
assert "test_tagnm" not in tags_names
assert "test_zm-." in tags_names
#delete delete all filetag
data = 'names=&path=%s&is_dir=%s' % (self.test_filepath, False)
response = self.client.put(self.endpoint, data, 'application/x-www-form-urlencoded')
tags_names = [tags["name"] for tags in response.data["tags"]]
assert response.status_code == 200
assert "test_zm" not in tags_names
|
the-stack_106_28419 | from __future__ import print_function
# python imports
import datetime
import gzip
import logging
import os
import random
import shutil
# vizard imports
import viz
import vizproximity
import vizshape
import viztask
# local imports
import vrlab
import suit
import targets
# module constants
GAP_MINUTES = 50
TIMESTAMP_FORMAT = '%Y%m%d%H%M%S'
BASE_PATH = 'C:\\Documents and Settings\\vrlab\\Desktop\\target-data'
class Trial(vrlab.Trial):
'''Manage a single trial of the target-reaching experiment.'''
def __init__(self, block, targets):
super(Trial, self).__init__()
self.block = block
self.home = targets[0]
self.targets = targets[1:]
self.trial_label = ''.join('{:x}'.format(t.index) for t in targets)
self.current_target = self.previous_target = self.home
self.suit = block.experiment.suit
self.records = []
self._timer = self.add_periodic(1. / 100, self.record_data)
@property
def index(self):
if os.path.isdir(self.block.output):
return len(os.listdir(self.block.output))
return 0
def record_data(self):
self.records.append((
viz.tick() - self.start_tick,
self.previous_target,
self.current_target,
self.suit.get_markers(),
))
def wait_for_touch(self, target):
target.activate(self.block.experiment.prox)
#yield viztask.waitKeyDown(' ')
yield target.signal.wait()
def target_sequence(self):
for target in self.targets:
yield target
def setup(self):
yield self.wait_for_touch(self.home)
self.start_tick = viz.tick()
def run(self):
for target in self.target_sequence():
self.previous_target = self.current_target
self.current_target = target
yield self.wait_for_touch(target)
target.sphere.color(viz.WHITE)
def teardown(self):
vrlab.sounds.cowbell.play()
self.write_records()
def write_records(self):
stamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)
output = os.path.join(
self.block.output,
'{}-trial{:02d}-{}.csv.gz'.format(stamp, self.index, self.trial_label))
# open file and define helper to write data
handle = gzip.open(output, 'w')
def w(msg, *args, **kwargs):
handle.write(msg.format(*args, **kwargs))
# write csv file header
w('time,effector')
w(',source,source-x,source-y,source-z')
w(',target,target-x,target-y,target-z')
headers = ''.join(',marker{i:02d}-{label}-' + ax for ax in 'xyzc')
for i, label in enumerate(suit.MARKER_LABELS):
w(headers, i=i, label=label)
w('\n')
# write data frames
for elapsed, prev, curr, frame in self.records:
w('{},{}', elapsed, self.block.effector)
w(',{t.index},{t.center[0]},{t.center[1]},{t.center[2]}', t=prev)
w(',{t.index},{t.center[0]},{t.center[1]},{t.center[2]}', t=curr)
for i in range(len(frame)):
w(',{m.pos[0]},{m.pos[1]},{m.pos[2]},{m.cond}', m=frame[i])
w('\n')
# finish up
handle.close()
logging.info('wrote %d records to trial output %s',
len(self.records), os.path.basename(output))
class Block(vrlab.Block):
'''Manage a block of trials in the tracing experiment.
This class handles block setup (playing a sound, making a directory for
recording data) and generates trials in the block by choosing sequences of
targets.
'''
def __init__(self, experiment, effector):
super(Block, self).__init__()
self.experiment = experiment
self.effector = effector
idx = list(range(6))
random.shuffle(idx)
self.trials = [targets.CIRCUITS[i] for i in idx]
stamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)
self.output = os.path.join(
experiment.output, '{}-block{:02d}'.format(stamp, self.index))
logging.info('NEW BLOCK -- effector %s trials %s',
suit.MARKER_LABELS[self.effector],
'|'.join(''.join('{:x}'.format(t) for t in ts)
for ts in self.trials))
@property
def index(self):
if os.path.isdir(self.experiment.output):
return len(os.listdir(self.experiment.output))
return 0
def setup(self):
self.experiment.prox.addTarget(self.experiment.leds[self.effector])
if not os.path.isdir(self.output):
os.makedirs(self.output)
yield viztask.waitKeyDown(' ')
def teardown(self):
vrlab.sounds.gong.play()
self.experiment.prox.clearTargets()
def generate_trials(self):
for ts in self.trials:
yield Trial(self, [targets.NUMBERED[t] for t in ts])
class Experiment(vrlab.Experiment):
'''Manage a series of blocks in the reaching experiment.
This class handles global experiment setup for a single subject. To set up,
we turn on the motion-capture thread, create some experiment-relevant Vizard
objects for representing the mocap leds and targets, and create a virtual
environment for visualization.
'''
def find_output(self, threshold_min):
'''Locate an output directory for a subject.
This method looks at existing output directories and reuses an existing
directory if one was modified in the past "threshold" minutes. If no
such directory exists, it creates a new one.
'''
now = datetime.datetime.now()
for bn in os.listdir(BASE_PATH):
stamp, _ = bn.split('-')
then = datetime.datetime.strptime(stamp, TIMESTAMP_FORMAT)
if now - then < datetime.timedelta(seconds=60 * threshold_min):
return bn
return '{}-{:08x}'.format(now.strftime(TIMESTAMP_FORMAT),
random.randint(0, 0xffffffff))
def setup(self):
# set up a folder to store data for a subject.
dirname = self.find_output(threshold_min=GAP_MINUTES)
self.output = os.path.join(BASE_PATH, dirname)
logging.info('storing output in %s', self.output)
# configure the phasespace.
mocap = vrlab.Phasespace('192.168.1.230', freq=120)
self.suit = mocap.track_points(range(len(suit.MARKER_LABELS)))
self.leds = []
for i in range(50):
sphere = vizshape.addSphere(0.02, color=viz.RED)
self.suit.link_marker(i, sphere)
self.leds.append(sphere)
mocap.start_thread()
# set up a proximity manager to detect touch events.
self.prox = vizproximity.Manager()
#self.prox.setDebug(viz.ON)
# add an environment and navigation to help with visualization.
self.environment = viz.addChild('dojo.osgb')
viz.cam.setHandler(None)
def generate_blocks(self):
# just run one block at a time to prevent vizard from freezing.
yield Block(self, effector=suit.MARKERS.R_FING_INDEX)
if __name__ == '__main__':
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(levelname).1s %(asctime)s %(name)s:%(lineno)d %(message)s',
)
Experiment().main(fullscreen=False)
|
the-stack_106_28421 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import _pywrap_stacktrace_handler
from tensorflow.python import _pywrap_util_port
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable/disable MLIR
# compilation.
def is_mlir_bridge_enabled():
return False
try:
from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
# Uses the same mechanism as above to selectively enable TFRT.
def is_tfrt_enabled():
return False
try:
from tensorflow.python.framework.is_tfrt_test_true import is_tfrt_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
def _get_object_count_by_type():
return collections.Counter([type(obj).__name__ for obj in gc.get_objects()])
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
obj_count_by_type = _get_object_count_by_type()
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = _get_object_count_by_type() - obj_count_by_type
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def eager_lazy_remote_copy_on_and_off(f):
"""Execute the test method w/o lazy tensor copy for function remote inputs."""
@parameterized.named_parameters([("WithLazyRemoteCopy", True), ("", False)])
@functools.wraps(f)
def decorator(self, lazily_remote_copy, *args, **kwargs):
if lazily_remote_copy:
context.context().lazy_remote_inputs_copy = True
else:
context.context().lazy_remote_inputs_copy = False
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if is_tfrt_enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if is_tfrt_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tensor(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
if np.ndim(value) == 0:
return [prefix + "[0] : " + str(value)]
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
|
the-stack_106_28423 | """ conftest: global fixture file for tests """
import time
import pytest
@pytest.fixture(autouse=True)
def time_test():
"""Time a test and print out how long it took
Note, this fixture is for example purposes. This information
can also be achieved with the `--durations=n` command line flag.
"""
before = time.time()
yield
after = time.time()
print(f"Test took {after - before:.02f} seconds!")
@pytest.fixture(autouse=True, scope="session")
def time_all_tests():
""" Time all tests and print out how long they took """
before = time.time()
yield
after = time.time()
print(f"Total test time: {after - before:.02f} seconds!")
|
the-stack_106_28425 | ###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ....lib.gstreamer.msdk.util import *
from ....lib.gstreamer.msdk.vpp import VppTest
spec = load_test_spec("vpp", "sharpen")
@slash.requires(*platform.have_caps("vpp", "sharpen"))
class default(VppTest):
def before(self):
vars(self).update(
caps = platform.get_caps("vpp", "sharpen"),
vpp_op = "sharpen",
)
super(default, self).before()
@slash.parametrize(*gen_vpp_sharpen_parameters(spec))
def test(self, case, level):
vars(self).update(spec[case].copy())
if self.width == 1280 and self.height == 720:
if os.environ.get("LIBVA_DRIVER_NAME", "i965") == "i965":
slash.add_failure(
"1280x720 resolution is known to cause GPU HANG with i965 driver")
return
vars(self).update(case = case, level = level)
self.vpp()
def check_metrics(self):
psnr = calculate_psnr(
self.source, self.ofile,
self.width, self.height,
self.frames, self.format)
assert psnr[-2] == 100, "Cb(U) should not be affected by SHARPEN filter"
assert psnr[-1] == 100, "Cr(V) should not be affected by SHARPEN filter"
def compare(k, ref, actual):
assert ref is not None, "Invalid reference value"
assert abs(ref[-3] - actual[-3]) < 0.2, "Luma (Y) out of baseline range"
get_media().baseline.check_result(
compare = compare, context = self.refctx, psnr = psnr)
|
the-stack_106_28428 | #!/usr/bin/env python
import os, sys
import boto3
import config
from deepzoom import ImageCreator
import shutil
s3 = boto3.client('s3')
def to_zoom(event, context):
"""
Receives an S3 event record for a new object.
Downloads the object to the local filesystem,
uses Deepzoom to tile it, and then uploads the
results DZI and tileset to a specified bucket.
"""
bucket = event['Records'][0]['s3']['bucket']['name'] # Bucket where object was created
key = event['Records'][0]['s3']['object']['key'] # Key (relpath) of object in bucket
base_key = '.'.join(key.split('.')[:-1]) # Object filename without its extension
extension = key.split('.')[-1].lower() # File extension of object
dzi_key = base_key + '.dzi' # Key of DZI to be created
if extension not in config.ALLOWED_EXTENSIONS: # Abort early due to unsupported file extension
print('extension {} not allowed'.format(extension))
return False
local_file = os.path.join(config.TEMP_DIR, key) # Local file where object is stored
dzi_file = os.path.join(config.TEMP_DIR, dzi_key) # Local file where new DZI will be stored
tile_dir = os.path.join(config.TEMP_DIR, base_key + '_files') # Local directory where tiles will be stored
dzi_key = os.path.relpath(dzi_file, config.TEMP_DIR) # Relative path to DZI, used as key when uploaded
print(bucket, key, local_file, dzi_key)
s3.download_file(bucket, key, local_file) # Download object to local filesystem for processing
creator = ImageCreator(tile_size=config.DEEPZOOM_TILE_SIZE,
tile_format=config.DEEPZOOM_TILE_FORMAT,
image_quality=config.DEEPZOOM_IMAGE_QUALITY,
resize_filter=config.DEEPZOOM_RESIZE_FILTER)
creator.create(local_file, dzi_file) # Convert object to tileset using Deepzoom library
s3.upload_file(dzi_file, config.S3_ZOOM_BUCKET, dzi_key) # Upload DZI file
for directory, directories, files in os.walk(tile_dir, topdown=False): # Loop through tile_dir and upload all tiles
for name in files:
file_path = os.path.join(directory, name)
file_key = os.path.relpath(file_path, config.TEMP_DIR)
# Upload tile, retrying if exception is thrown
retry = True
while retry:
try:
s3.upload_file(file_path, config.S3_ZOOM_BUCKET, file_key)
retry = False
except:
pass
shutil.rmtree(tile_dir, ignore_errors=True) # Delete tile_dir before quitting |
the-stack_106_28429 | from pyrogram import Client, filters
from utils import save_file
from info import CHANNELS
media_filter = filters.document | filters.video | filters.audio
@Client.on_message(filters.chat(CHANNELS) & media_filter)
async def media(bot, message):
"""Media Handler"""
for file_type in ("document", "video", "audio"):
media = getattr(message, file_type, None)
if media is not None:
break
else:
return
media.file_type = file_type
media.caption = message.caption
await save_file(media) |
the-stack_106_28430 | '''
Created on Aug 9, 2016
@author: David Zwicker <[email protected]>
'''
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.ticker import Locator
import six
from six.moves import range
def doublearrow(xs, ys, w=0.1, **kwargs):
""" Plots a double arrow between the two given coordinates """
ax = kwargs.pop('ax', None)
if ax is None:
ax = plt.gca()
# set parameters of the arrow
arrowparams = {
'head_width': 2*w,
'head_length': w,
'length_includes_head': True,
'shape': 'full',
'head_starts_at_zero': False
}
arrowparams.update(kwargs)
# plot two arrows to mimic double arrow
dx = xs[1] - xs[0]
dy = ys[1] - ys[0]
ax.arrow(xs[0], ys[0], dx, dy, **arrowparams)
ax.arrow(xs[1], ys[1], -dx, -dy, **arrowparams)
def log_slope_indicator(xmin=1., xmax=2., factor=None, ymax=None, exponent=1.,
label_x='', label_y='', space=15, loc='lower', ax=None,
debug=False, **kwargs):
"""
Function adding a triangle to axes `ax`. This is useful for indicating
slopes in log-log-plots. `xmin` and `xmax` denote the x-extend of the
triangle. The y-coordinates are calculated according to the formula
y = factor*x**exponent
If supplied, the texts `label_x` and `label_y` are put next to the
catheti. The parameter `loc` determines whether the catheti are
above or below the diagonal. Additionally, kwargs can be used to
set the style of the triangle
`loc` determines whether the triangle appears above (`loc='upper'`) or below
(`loc='lower'; default) the diagonal line.
"""
# prepare the axes and determine
if ax is None:
ax = plt.gca()
if loc == 'lower':
lower = (exponent > 0)
elif loc == 'upper':
lower = (exponent < 0)
else:
raise ValueError('`loc` must be either `lower` or `upper`.')
if ymax is not None:
factor = ymax/max(xmin**exponent, xmax**exponent)
if factor is None:
factor = 1.
# get triangle coordinates
y = factor*np.array((xmin, xmax), np.double)**exponent
if lower:
pts = np.array([[xmin, y[0]], [xmax, y[0]], [xmax, y[1]]])
else:
pts = np.array([[xmin, y[0]], [xmax, y[1]], [xmin, y[1]]])
if debug:
print('The coordinates of the log slope indicator are %s' % pts)
# add triangle to axis
if not('facecolor' in kwargs or 'fc' in kwargs):
kwargs['facecolor'] = 'none'
if not('edgecolor' in kwargs or 'ec' in kwargs):
kwargs['edgecolor'] = 'k'
p = Polygon(pts, closed=True, **kwargs)
ax.add_patch(p)
# add labels
xt = np.exp(0.5*(np.log(xmin) + np.log(xmax)))
# dx = (xmax/xmin)**0.1
yt = np.exp(np.log(y).mean())
# dy = (y[1]/y[0])**0.1
sgn = np.sign(exponent)
if lower:
ax.annotate(
label_x, xy=(xt, y[0]), xytext=(0, -sgn*space),
textcoords='offset points', size='x-small',
horizontalalignment='center',
verticalalignment='top'
)
ax.annotate(
label_y, xy=(xmax, yt), xytext=(space, 0),
textcoords='offset points', size='x-small',
horizontalalignment='right',
verticalalignment='center'
)
else:
ax.annotate(
label_x, xy=(xt, y[1]), xytext=(0, sgn*space),
textcoords='offset points', size='x-small',
horizontalalignment='center',
verticalalignment='bottom'
)
ax.annotate(
label_y, xy=(xmin, yt), xytext=(-space, 0),
textcoords='offset points', size='x-small',
horizontalalignment='left',
verticalalignment='center'
)
class MinorSymLogLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks for a symlog scaling.
"""
def __init__(self, linthresh):
"""
Ticks will be placed between the major ticks.
The placement is linear for x between -linthresh and linthresh,
otherwise its logarithmically
"""
self.linthresh = linthresh
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
# iterate through minor locs
minorlocs = []
# handle the lowest part
for i in range(1, len(majorlocs)):
majorstep = majorlocs[i] - majorlocs[i-1]
if abs(majorlocs[i-1] + majorstep/2) < self.linthresh:
ndivs = 10
else:
ndivs = 9
minorstep = majorstep / ndivs
locs = np.arange(majorlocs[i-1], majorlocs[i], minorstep)[1:]
minorlocs.extend(locs)
return self.raise_if_exceeds(np.array(minorlocs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
def render_table(data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'],
edge_color='w', bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
"""
Renders the table given in `data` in a matplotlib axes.
Code inspired by http://stackoverflow.com/a/39358722/932593
"""
if ax is None:
size = ((np.array(data.shape[::-1]) + np.array([0, 1])) *
np.array([col_width, row_height]))
_, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox,
colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in six.iteritems(mpl_table._cells):
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0] % len(row_colors)])
return ax
def determine_label_positions(pos, sigma=0.05, repulsion=0.1, attraction=0.1,
steps=100, noise=1e-3):
""" determines label positions automatically by moving labels that are too
close a bit apart. The algorithm is based on a physical system with labels
connected to the given position by springs of stiffness `attraction`, while
all other labels possess repulsive potentials of strength `repulsion` and
range `sigma`. The physical system is solved by iterating `step` times and
we additionally put noise of strength `noise` to break some degenerate
situations.
Note that this function assumes that labels are positioned on a linear
scale. If labels should be positioned on a log-scale, the positions should
be transformed to a linear scale before and after applying this function.
"""
pos = np.array(pos, dtype=np.double) # turn into array and make a copy
if pos.ndim != 2 or pos.shape[1] != 2:
raise ValueError('Input data does not seem to be a 2d coordinate list')
dim = len(pos)
# scale positions to unity
pos_mean = pos.mean(axis=0)
pos -= pos_mean
pos_scale = np.abs(pos).max(axis=0)
pos /= pos_scale
pos_orig = pos.copy()
# iterate several times to find a good position
for _ in range(steps):
# apply noise term
pos += noise * (np.random.random(dim)[:, None] - 0.5)
# iterate over all positions
for i in range(dim):
# evaluate distance to original position
pos[i] -= attraction * (pos[i] - pos_orig[i])
# evaluate distance to all other positions
diff = pos[i] - pos
dist = np.linalg.norm(diff, axis=1)
j = (dist != 0)
force = diff[j] / dist[j, None] * np.exp(-(dist[j, None]/sigma)**2)
pos[i] += repulsion * np.sum(force, axis=0)
return pos * pos_scale + pos_mean
def add_scaled_colorbar(im, ax, aspect=20, pad_fraction=0.5, **kwargs):
""" add a vertical color bar to an image plot
The height of the colorbar is now adjusted to the plot, so that the width
determined by `aspect` is now given relative to the height. Moreover, the
gap between the colorbar and the plot is now given in units of the fraction
of the width by `pad_fraction`.
Inspired by https://stackoverflow.com/a/33505522/932593
"""
from mpl_toolkits import axes_grid1
divider = axes_grid1.make_axes_locatable(im.axes)
width = axes_grid1.axes_size.AxesY(im.axes, aspect=1./aspect)
pad = axes_grid1.axes_size.Fraction(pad_fraction, width)
cax = divider.append_axes("right", size=width, pad=pad)
plt.sca(ax)
return im.axes.figure.colorbar(im, cax=cax, **kwargs)
if __name__ == "__main__":
print('This file is intended to be used as a module.')
print('This code serves as a test for the defined methods.')
tests = (
'log_slope_indicator',
'log_slope_indicator_neg',
)
if 'log_slope_indicator' in tests:
test_x = np.logspace(0, 3, 20)
test_y = test_x**2
test_y *= (1 + 0.1*np.random.randn(20))
plt.loglog(test_x, test_y, '+')
log_slope_indicator(
xmin=10, xmax=100, factor=0.5, exponent=2.,
label_x='1', label_y='2', ec='red'
)
log_slope_indicator(
xmin=100, xmax=300, factor=2., exponent=2.,
label_x='1', label_y='2', loc='upper'
)
plt.show()
if 'log_slope_indicator_neg' in tests:
test_x = np.logspace(0, 3, 20)
test_y = test_x**-2
test_y *= (1 + 0.1*np.random.randn(20))
plt.loglog(test_x, test_y, '+')
log_slope_indicator(
xmin=10, xmax=100, factor=0.5, exponent=-2.,
label_x='1', label_y='2', ec='red'
)
log_slope_indicator(
xmin=100, xmax=300, factor=2., exponent=-2.,
label_x='1', label_y='2', loc='upper'
)
plt.show()
|
the-stack_106_28433 | # EcoDes-DK - Fix files missing from VRTs
# Jakob J. Assmann [email protected] 2 December 2021
# Most files missing from the VRTs are missing because they are the wrong raster
# file type (e.g. Int16 instead of float32) - the only tiles affected seem to
# be NA tiles.
# These originate from the processing workflow and OPALS output, but
# also from the fill_processing_gaps.py script that does not accoutn for
# differences in the file paths. This script is here to correct the raster
# type of those files.
# !!! This scripts requires check_vrt_completeness.py to be run beforehand !!!
# Prep environment:
# Dependencies
import pandas
import os
import re
import scandir
import shutil
import itertools
import subprocess
from osgeo import gdal
from dklidar import settings
# Function definitions
def get_data_type(file_name):
raster = gdal.Open(file_name)
dataType = gdal.GetDataTypeName(raster.GetRasterBand(1).DataType)
raster = None
return(dataType)
def translate_file(file_name, data_type):
# Copy file to temp folder:
temp_file = settings.scratch_folder + '/temp_raster.tif'
shutil.copy(file_name, temp_file)
# remove old file
os.remove(file_name)
# translate file
os.system(settings.gdal_translate_bin +
'-ot ' + data_type + ' ' +
temp_file + ' ' +
file_name)
# Load missing tiles
missing_files = pandas.read_csv(settings.log_folder +
'/missing_files_in_vrts.csv')
# Set DataTypes for non-int16 variables
data_types_df = pandas.DataFrame(
zip(*[
['solar_radiation',
'amplitude_mean',
'amplitude_sd',
'date_stamp_min',
'date_stamp_max',
'date_stamp_mode'],
['Int32',
'Float32',
'Float32',
'Int32',
'Int32',
'Int32']]),
columns = ['variable','data_type'])
# determine output folder structure based on original processing
folders = []
for folder in scandir.scandir(settings.output_folder):
if folder.is_dir():
sub_folders = [sub_folder.path for sub_folder in scandir.scandir(folder.path) if sub_folder.is_dir()]
if len(sub_folders) > 0:
for sub_folder in sub_folders:
folders.append(sub_folder)
else:
folders.append(folder.path)
# Clean up folder paths
folders = map(lambda folder: re.sub('\\\\', '/', folder), folders)
# Set up progres bar
progress = 0
for i in range(0,len(missing_files.index)):
# Grab folder name
folder = list(
itertools.compress(
folders,
[bool(re.search(missing_files.variable[i], folder)) for folder in folders]))
folder = folder[0]
# Grab data_type
data_type = list(
itertools.compress(
data_types_df.data_type,
[bool(re.search(missing_files.variable[i], variable)) for variable in data_types_df.variable]))
data_type = data_type[0]
# Set file path
file_name = folder + '/' + missing_files.file_name[i]
# Check wehether data types match
if data_type == get_data_type(file_name):
break
# Copy to temp file
temp_file = settings.scratch_folder + '/' + missing_files.file_name[i]
shutil.copy(file_name, temp_file)
# Break for debugging
# file_name = settings.scratch_folder + '/test_out/' + missing_files.file_name[i]
# Remove file from original folder
os.remove(file_name)
# Construct gdal command
cmd = settings.gdal_translate_bin + '-ot ' + data_type + ' ' + temp_file + ' ' + file_name
print(cmd)
# Execute gdal commannd and swallow output
os.system(cmd)
# Remove temp_file
os.remove(temp_file)
# Update progress
progress = float(i + 1) / float(len(missing_files.index))
# Update progress bar
print('\n\r|' +
'#' * int(round(progress * 54)) +
'-' * int(round((1 - progress) * 54)) +
'| ' +
str(int(round(progress * 100))) + '%\n'),
|
the-stack_106_28434 | import types
from functools import wraps
import numpy as np
import datetime
import collections
import warnings
import copy
from pandas.compat import(
zip, range, long, lzip,
callable, map
)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.compat.numpy import _np_version_under1p8
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import (Index, MultiIndex, CategoricalIndex,
_ensure_index)
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import (cache_readonly, Substitution, Appender,
make_signature, deprecate_kwarg)
from pandas.formats.printing import pprint_thing
from pandas.util.validators import validate_kwargs
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object,
is_datetime_or_timedelta_dtype, is_bool,
is_bool_dtype, AbstractMethodError,
_maybe_fill)
from pandas.core.config import option_context, is_callable
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift'])
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target
object
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
These are local specifications and will override 'global' settings,
that is the parameters axis and level which are passed to the groupby
itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see
`here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key],
axis=self.axis,
level=self.level,
sort=self.sort)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError(
"The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax.get_level_values(
level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(
"The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind='mergesort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis,
convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
""" default to the standard binner here """
group_axis = obj._get_axis(self.axis)
return Grouping(group_axis, None, obj=obj, name=self.key,
level=self.level, sort=self.sort, in_axis=False)
@property
def groups(self):
return self.grouper.groups
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
we create the grouper on instantiation
sub-classes may have a different policy
"""
pass
@property
def groups(self):
""" dict {group name -> group labels} """
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
safe get multiple indices, translate keys for
datelike to underlying repr
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple([f(n) for f, n in zip(converters, name)])
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp, 'groupings', None) is not None and \
self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(self.axis, index)
result = result.sort_index(axis=self.axis)
result.set_axis(self.axis, self.obj._get_axis(self.axis))
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise
# ValueError
# if we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Substitution(name='groupby')
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform"""
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if is_callable(func):
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Note
----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = _get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not lib.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_transform(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in values:
if v is not None:
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, 'name', None) is not None):
result.name = self.name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
class GroupBy(_GroupBy):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
def irow(self, i):
"""
DEPRECATED. Use ``.nth(i)`` instead
"""
# 10177
warnings.warn("irow(i) is deprecated. Please use .nth(i)",
FutureWarning, stacklevel=2)
return self.nth(i)
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
"""Compute count of group, excluding missing values"""
# defined here for API doc
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
nv.validate_groupby_func('mean', args, kwargs)
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
# TODO: implement at Cython level?
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
"""Compute group sizes"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper
Return a new grouper with our resampler appended
"""
from pandas.tseries.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling
functionaility per group
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionaility per group
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self.apply(lambda x: x.ffill(limit=limit))
ffill = pad
@Substitution(name='groupby')
@Appender(_doc_template)
def backfill(self, limit=None):
"""
Backward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self.apply(lambda x: x.bfill(limit=limit))
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
NaNs denote group exhausted when using dropna
>>> g.nth(1, dropna='any')
B
A
1 NaN
5 NaN
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_selection_from_grouper()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if isinstance(self._selected_obj, DataFrame) and \
dropna not in ['any', 'all']:
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on the dropped
# object
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
@Appender(_doc_template)
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
"""Cumulative product for each group"""
nv.validate_groupby_func('cumprod', args, kwargs)
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis))
return self._cython_transform('cumprod')
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
"""Cumulative sum for each group"""
nv.validate_groupby_func('cumsum', args, kwargs)
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis))
return self._cython_transform('cumsum')
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
Parameters
----------
periods : integer, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
labels, _, ngroups = self.grouper.group_info
# filled in by Cython
indexer = np.zeros_like(labels)
_algos.group_shift_indexer(indexer, labels, ngroups, periods)
output = {}
for name, obj in self._iterate_slices():
output[name] = algos.take_nd(obj.values, indexer)
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds
the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True,
mutated=False):
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except Exception:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index)
for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = com._ensure_platform_int(ids)
out = np.bincount(ids[ids != -1], minlength=ngroup or None)
return Series(out, index=self.result_index, dtype='int64')
@cache_readonly
def _max_groupsize(self):
"""
Compute size of largest group
"""
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape,
sort=True, xnull=True)
return _compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.labels, np.arange(len(ping.group_index))
@cache_readonly
def ngroups(self):
return len(self.result_index)
@property
def recons_labels(self):
comp_ids, obs_ids, _ = self.group_info
labels = (ping.labels for ping in self.groupings)
return decons_obs_group_ids(comp_ids,
obs_ids, self.shape, labels, xnull=True)
@cache_readonly
def result_index(self):
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].group_index.rename(self.names[0])
return MultiIndex(levels=[ping.group_index for ping in self.groupings],
labels=self.recons_labels,
verify_integrity=False,
names=self.names)
def get_group_levels(self):
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
name_list = []
for ping, labels in zip(self.groupings, self.recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'aggregate': {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'ohlc': 'group_ohlc',
},
'transform': {
'cumprod': 'group_cumprod',
'cumsum': 'group_cumsum',
}
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(_algos, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
ftype = self._cython_functions[kind][how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def _cython_operation(self, kind, values, how, axis):
assert kind in ['transform', 'aggregate']
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_numeric = is_numeric_dtype(values.dtype)
if is_datetime_or_timedelta_dtype(values.dtype):
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _algos.ensure_float64(values)
elif com.is_integer_dtype(values):
values = values.astype('int64', copy=False)
elif is_numeric and not com.is_complex_dtype(values):
values = _algos.ensure_float64(values)
else:
values = values.astype(object)
try:
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _algos.ensure_float64(values)
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
else:
raise
if is_numeric:
out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype),
fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
result, counts, values, labels, func, is_numeric)
elif kind == 'transform':
result = _maybe_fill(np.empty_like(values, dtype=out_dtype),
fill_value=np.nan)
# temporary storange for running-total type tranforms
accum = np.empty(out_shape, dtype=out_dtype)
result = self._transform(
result, accum, values, labels, func, is_numeric)
if com.is_integer_dtype(result):
if len(result[result == tslib.iNaT]) > 0:
result = result.astype('float64')
result[result == tslib.iNaT] = np.nan
if kind == 'aggregate' and \
self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
com._ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def aggregate(self, values, how, axis=0):
return self._cython_operation('aggregate', values, how, axis)
def transform(self, values, how, axis=0):
return self._cython_operation('transform', values, how, axis)
def _aggregate(self, result, counts, values, comp_ids, agg_func,
is_numeric):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def _transform(self, result, accum, values, comp_ids, transform_func,
is_numeric):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
transform_func(result[:, :, i], values,
comp_ids, accum)
else:
transform_func(result, values, comp_ids, accum)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = self._is_builtin_func(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer, convert=False)
group_index = algos.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False, mutated=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
self.mutated = mutated
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start, edge: data._slice(
slice(start, edge), axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start, edge: data[slice(start, edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = com._ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return comp_ids.astype('int64', copy=False), \
obs_group_ids.astype('int64', copy=False), ngroups
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isnull(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
return [Grouping(lvl, lvl, in_axis=False, level=None, name=name)
for lvl, name in zip(self.levels, self.names)]
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
# ----------------------------------------------------------------------
# cython aggregation
_cython_functions = copy.deepcopy(BaseGrouper._cython_functions)
_cython_functions['aggregate'].pop('median')
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._should_compress = True
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
# must have an ordered categorical
if self.sort:
if not self.grouper.ordered:
# technically we cannot group on an unordered
# Categorical
# but this a user convenience to do so; the ordering
# is preserved and if it's a reduction it doesn't make
# any difference
pass
# fix bug #GH8868 sort=False being ignored in categorical
# groupby
else:
cat = self.grouper.unique()
self.grouper = self.grouper.reorder_categories(
cat.categories)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
c = self.grouper.categories
self._group_index = CategoricalIndex(
Categorical.from_codes(np.arange(len(c)),
categories=c,
ordered=self.grouper.ordered))
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper,
(Series, Index, Categorical, np.ndarray)):
if getattr(self.grouper, 'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, 'dtype', None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping({0})'.format(self.name)
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._labels is None or self._group_index is None:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
@cache_readonly
def groups(self):
return self.index.groupby(self.grouper)
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
mutated=False):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate that the passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if not any_callable and not all_in_columns and \
not any_arraylike and not any_groupers and \
match_axis_length and level is None:
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != len(obj):
raise ValueError("Categorical dtype grouper must "
"have len(grouper) == len(data)")
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
in_axis=in_axis) \
if not isinstance(gpr, Grouping) else gpr
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val):
return (isinstance(val, compat.string_types) or
(val is not None and lib.isscalar(val)))
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
def _whitelist_method_generator(klass, whitelist):
"""
Yields all GroupBy member defs for DataFrame/Series names in _whitelist.
Parameters
----------
klass - class where members are defined. Should be Series or DataFrame
whitelist - list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(GroupBy, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
if isinstance(f, types.MethodType):
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name': name,
'doc': doc,
'sig': ','.join(decl),
'self': args[0],
'args': ','.join(args_by_name)}
else:
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = _series_apply_whitelist
for _def_str in _whitelist_method_generator(Series,
_series_apply_whitelist):
exec(_def_str)
@property
def name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
_level = kwargs.pop('_level', None)
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs,
(_level or 0) + 1)
else:
cyfunc = self._is_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
obj = self
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if isinstance(list(compat.itervalues(results))[0],
com.ABCDataFrame):
# let higher level handle
if _level:
return results
return list(compat.itervalues(results))[0]
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self.name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
result = DataFrame(values, index=index).stack()
result.name = self.name
return result
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
func = self._is_cython_func(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs))
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = (self.size().fillna(0) > 0).any()
out = algos.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notnull(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, \
'val.dtype must be object, got %s' % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
isnull = lambda a: a == -1
else:
isnull = com.isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
res = out if ids[0] != -1 else out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids] = out
return Series(res,
index=ri,
name=self.name)
@deprecate_kwarg('take_last', 'keep',
mapping={True: 'last', False: 'first'})
@Appender(Series.nlargest.__doc__)
def nlargest(self, n=5, keep='first'):
# ToDo: When we remove deprecate_kwargs, we can remote these methods
# and include nlargest and nsmallest to _series_apply_whitelist
return self.apply(lambda x: x.nlargest(n=n, keep=keep))
@deprecate_kwarg('take_last', 'keep',
mapping={True: 'last', False: 'first'})
@Appender(Series.nsmallest.__doc__)
def nsmallest(self, n=5, keep='first'):
return self.apply(lambda x: x.nsmallest(n=n, keep=keep))
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from functools import partial
from pandas.tools.tile import cut
from pandas.tools.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algos.factorize(val, sort=True)
else:
cat, bins = cut(val, bins, retbins=True)
# bins[:-1] for backward compat;
# o.w. cat.categories could be better
lab, lev, dropna = cat.codes, bins[:-1], False
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
inc = np.r_[True, lab[1:] != lab[:-1]]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self.name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
if _np_version_under1p8:
mi, ml = algos.factorize(m)
d[ml] = d[ml] - np.bincount(mi)
else:
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if com.is_integer_dtype(out):
out = com._ensure_int64(out)
return Series(out, index=mi, name=self.name)
# for compat. with algos.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if com.is_integer_dtype(out):
out = com._ensure_int64(out)
return Series(out, index=mi, name=self.name)
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isnull(val)
ids = com._ensure_platform_int(ids)
out = np.bincount(ids[mask], minlength=ngroups or None)
return Series(out,
index=self.grouper.result_index,
name=self.name,
dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(
how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[arg], _level=_level)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_non_None_value(values):
try:
v = next(v for v in values if v is not None)
except StopIteration:
return None
return v
v = first_non_None_value(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_non_None_value(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if isinstance(v.index,
MultiIndex) or key_index is None:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.tools.merge import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index, name=self.name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if (so.ndim == 2 and so.dtypes.isin(_DATELIKE_DTYPES).any()):
result = result._convert(numeric=True)
date_cols = self._selected_obj.select_dtypes(
include=list(_DATELIKE_DTYPES)).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
else:
# only coerce dates if we find at least 1 datetime
coerce = True if any([isinstance(x, Timestamp)
for x in values]) else False
return (Series(values, index=key_index, name=self.name)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = 'transform must return a scalar value for each group'
raise ValueError(msg)
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj)
def _transform_fast(self, result, obj):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = (self.size().fillna(0) > 0).any()
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algos.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns,
index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (lib.isscalar(res) and isnull(res)):
if res and notnull(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in _whitelist_method_generator(DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
@Substitution(name='groupby')
@Appender(SelectionMixin._see_also_template)
@Appender(SelectionMixin._agg_doc)
def aggregate(self, arg, *args, **kwargs):
return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(subset, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns,
columns=result_index).T
else:
return DataFrame(result, index=obj.index,
columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result.consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result.consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any([isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings]):
return result
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()
if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = [(i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis]
g_nums, g_names = zip(*in_axis_grps)
result = result.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)
return result.reset_index(drop=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.tools.merge import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
def count(self):
""" Compute count of group, excluding missing values """
from functools import partial
from pandas.lib import count_level_2d
from pandas.core.common import _isnull_ndarraylike as isnull
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~isnull(blk.get_values())) for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
from pandas.tools.plotting import boxplot_frame_groupby # noqa
DataFrameGroupBy.boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
@Substitution(name='groupby')
@Appender(SelectionMixin._see_also_template)
@Appender(SelectionMixin._agg_doc)
def aggregate(self, arg, *args, **kwargs):
return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError("axis other than 0 is not supported")
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise ValueError("axis value must be greater than 0")
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
class NDArrayGroupBy(GroupBy):
pass
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = com._ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return algos.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return _get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
raise StopIteration
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise AbstractMethodError(self)
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # ix[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
# ----------------------------------------------------------------------
# Misc utilities
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def loop(labels, shape):
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
return out
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = _compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return loop(labels, shape)
def maybe_lift(lab, size): # pormote nan values
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(com._ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
return loop(list(labels), list(shape))
_INT64_MAX = np.iinfo(np.int64).max
def _int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if _int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
from pandas.hashtable import unique_label_indices
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not _int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def _indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = _compress_group_index(ids, sort=True)
ngroups = len(obs)
return _get_group_index_sorter(ids, ngroups)
def _lexsort_indexer(keys, orders=None, na_position='last'):
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key, ordered=True)
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n - codes - 1)
elif na_position == 'first':
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return _indexer_from_factorized(labels, shape)
def _nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isnull(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, labels, levels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [_hash.Int64HashTable(ngroups) for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def _get_indices_dict(label_list, keys):
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if _int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = _get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def _get_group_index_sorter(group_index, ngroups):
"""
_algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
if alpha + beta * ngroups < count * np.log(count):
sorter, _ = _algos.groupsort_indexer(com._ensure_int64(group_index),
ngroups)
return com._ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def _compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), _hash._SIZE_HINT_LIMIT)
table = _hash.Int64HashTable(size_hint)
group_index = com._ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algos.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algos.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
def _groupby_indices(values):
if is_categorical_dtype(values):
# we have a categorical, so we can do quite a bit
# bit better than factorizing again
reverse = dict(enumerate(values.categories))
codes = values.codes.astype('int64')
_, counts = _hash.value_count_scalar64(codes, False)
else:
reverse, codes, counts = _algos.group_labels(
_values_from_object(com._ensure_object(values)))
return _algos.groupby_indices(reverse, codes, counts)
def numpy_groupby(data, labels, axis=0):
s = np.argsort(labels)
keys, inv = np.unique(labels, return_inverse=True)
i = inv.take(s)
groups_at = np.where(i != np.concatenate(([-1], i[:-1])))[0]
ordered_data = data.take(s, axis=axis)
group_sums = np.add.reduceat(ordered_data, groups_at, axis=axis)
return group_sums
|
the-stack_106_28437 | #!/usr/bin/env python
import os
import time
import stat
import json
import random
import ctypes
import inspect
import requests
import traceback
import threading
import subprocess
from collections import Counter
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import ROOT
from common.params import Params
from common.api import api_get
fake_upload = os.getenv("FAKEUPLOAD") is not None
def raise_on_thread(t, exctype):
for ctid, tobj in threading._active.items():
if tobj is t:
tid = ctid
break
else:
raise Exception("Could not find thread")
'''Raises an exception in the threads with id tid'''
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def listdir_with_creation_date(d):
lst = os.listdir(d)
for fn in lst:
try:
st = os.stat(os.path.join(d, fn))
ctime = st[stat.ST_CTIME]
yield (ctime, fn)
except OSError:
cloudlog.exception("listdir_with_creation_date: stat failed?")
yield (None, fn)
def listdir_by_creation_date(d):
times_and_paths = list(listdir_with_creation_date(d))
return [path for _, path in sorted(times_and_paths)]
def clear_locks(root):
for logname in os.listdir(root):
path = os.path.join(root, logname)
try:
for fname in os.listdir(path):
if fname.endswith(".lock"):
os.unlink(os.path.join(path, fname))
except OSError:
cloudlog.exception("clear_locks failed")
def is_on_wifi():
# ConnectivityManager.getActiveNetworkInfo()
result = subprocess.check_output(["service", "call", "connectivity", "2"]).strip().split("\n")
data = ''.join(''.join(w.decode("hex")[::-1] for w in l[14:49].split()) for l in result[1:])
return "\x00".join("WIFI") in data
class Uploader(object):
def __init__(self, dongle_id, access_token, root):
self.dongle_id = dongle_id
self.access_token = access_token
self.root = root
self.upload_thread = None
self.last_resp = None
self.last_exc = None
def clean_dirs(self):
try:
for logname in os.listdir(self.root):
path = os.path.join(self.root, logname)
# remove empty directories
if not os.listdir(path):
os.rmdir(path)
except OSError:
cloudlog.exception("clean_dirs failed")
def gen_upload_files(self):
if not os.path.isdir(self.root):
return
for logname in listdir_by_creation_date(self.root):
path = os.path.join(self.root, logname)
names = os.listdir(path)
if any(name.endswith(".lock") for name in names):
continue
for name in names:
key = os.path.join(logname, name)
fn = os.path.join(path, name)
yield (name, key, fn)
def get_data_stats(self):
name_counts = Counter()
total_size = 0
for name, key, fn in self.gen_upload_files():
name_counts[name] += 1
total_size += os.stat(fn).st_size
return dict(name_counts), total_size
def next_file_to_upload(self, with_video):
# try to upload log files first
for name, key, fn in self.gen_upload_files():
if name in ["rlog", "rlog.bz2"]:
return (key, fn, 0)
if with_video:
# then upload compressed camera file
for name, key, fn in self.gen_upload_files():
if name in ["fcamera.hevc"]:
return (key, fn, 1)
# then upload other files
for name, key, fn in self.gen_upload_files():
if not name.endswith('.lock') and not name.endswith(".tmp"):
return (key, fn, 1)
return None
def do_upload(self, key, fn):
try:
url_resp = api_get("v1.1/"+self.dongle_id+"/upload_url/", timeout=2, path=key, access_token=self.access_token)
url_resp_json = json.loads(url_resp.text)
url = url_resp_json['url']
headers = url_resp_json['headers']
cloudlog.info("upload_url v1.1 %s %s", url, str(headers))
if fake_upload:
cloudlog.info("*** WARNING, THIS IS A FAKE UPLOAD TO %s ***" % url)
class FakeResponse(object):
def __init__(self):
self.status_code = 200
self.last_resp = FakeResponse()
else:
with open(fn, "rb") as f:
self.last_resp = requests.put(url, data=f, headers=headers)
except Exception as e:
self.last_exc = (e, traceback.format_exc())
raise
def normal_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
try:
self.do_upload(key, fn)
except Exception:
pass
return self.last_resp
def killable_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
self.upload_thread = threading.Thread(target=lambda: self.do_upload(key, fn))
self.upload_thread.start()
self.upload_thread.join()
self.upload_thread = None
return self.last_resp
def abort_upload(self):
thread = self.upload_thread
if thread is None:
return
if not thread.is_alive():
return
raise_on_thread(thread, SystemExit)
thread.join()
def upload(self, key, fn):
# write out the bz2 compress
if fn.endswith("log"):
ext = ".bz2"
cloudlog.info("compressing %r to %r", fn, fn+ext)
if os.system("nice -n 19 bzip2 -c %s > %s.tmp && mv %s.tmp %s%s && rm %s" % (fn, fn, fn, fn, ext, fn)) != 0:
cloudlog.exception("upload: bzip2 compression failed")
return False
# assuming file is named properly
key += ext
fn += ext
try:
sz = os.path.getsize(fn)
except OSError:
cloudlog.exception("upload: getsize failed")
return False
cloudlog.event("upload", key=key, fn=fn, sz=sz)
cloudlog.info("checking %r with size %r", key, sz)
if sz == 0:
# can't upload files of 0 size
os.unlink(fn) # delete the file
success = True
else:
cloudlog.info("uploading %r", fn)
# stat = self.killable_upload(key, fn)
stat = self.normal_upload(key, fn)
if stat is not None and stat.status_code in (200, 201):
cloudlog.event("upload_success", key=key, fn=fn, sz=sz)
os.unlink(fn) # delete the file
success = True
else:
cloudlog.event("upload_failed", stat=stat, exc=self.last_exc, key=key, fn=fn, sz=sz)
success = False
self.clean_dirs()
return success
def uploader_fn(exit_event):
cloudlog.info("uploader_fn")
params = Params()
dongle_id, access_token = params.get("DongleId"), params.get("AccessToken")
if dongle_id is None or access_token is None:
cloudlog.info("uploader MISSING DONGLE_ID or ACCESS_TOKEN")
raise Exception("uploader can't start without dongle id and access token")
uploader = Uploader(dongle_id, access_token, ROOT)
backoff = 0.1
while True:
upload_video = (params.get("IsUploadVideoOverCellularEnabled") != "0") or is_on_wifi()
if exit_event.is_set():
return
d = uploader.next_file_to_upload(upload_video)
if d is None:
time.sleep(5)
continue
key, fn, _ = d
cloudlog.info("to upload %r", d)
success = uploader.upload(key, fn)
if success:
backoff = 0.1
else:
cloudlog.info("backoff %r", backoff)
time.sleep(backoff + random.uniform(0, backoff))
backoff = min(backoff*2, 120)
cloudlog.info("upload done, success=%r", success)
def main(gctx=None):
uploader_fn(threading.Event())
if __name__ == "__main__":
main()
|
the-stack_106_28439 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "atdict-"
cfg.versionfile_source = "atdict/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
the-stack_106_28440 | COUNT_TO = 1000
# A method arising from a literal interpretation of the question:
sum1 = 0
for i in range(1, COUNT_TO):
if ((i % 3 == 0) or (i % 5 == 0)):
sum1 += i
print(sum1)
# Another method arising from insight into the particular question
sum2 = 0
for i in range(3, COUNT_TO, 3):
sum2 += i
for i in range(5, COUNT_TO, 5):
sum2 += i
for i in range(15, COUNT_TO, 15):
sum2 -= i
print(sum2)
# Yet another method, further abstracting the problem using functions:
sum3 = 0
def triangleNumber(upperBound):
return int((upperBound+1)/2*upperBound)
def sumOfMultiples(upperBound, base):
return base*triangleNumber(upperBound//base)
sum3 = (
sumOfMultiples(COUNT_TO-1, 3) +
sumOfMultiples(COUNT_TO-1, 5) -
sumOfMultiples(COUNT_TO-1, 15)
)
print(sum3)
|
the-stack_106_28441 |
# coding: utf-8
# In[1]:
class CompareModels:
def __init__(self):
import pandas as pd
self._models = pd.DataFrame(
data=['r', 'R^2', 'RMSE', 'RMSRE', 'MAPE'],
columns=['Model']
).set_index(keys='Model')
def add(self, model_name, y_test, y_pred):
import numpy as np
from sklearn.metrics import r2_score, mean_squared_error
self._models[model_name] = np.array(
object=[
np.corrcoef(y_test, y_pred)[0, 1], # r
r2_score(y_true=y_test, y_pred=y_pred), # R^2
np.sqrt(mean_squared_error(y_true=y_test, y_pred=y_pred)), # RMSE
np.sqrt(np.mean(((y_test-y_pred)/y_test)**2)), # RMSRE
np.mean(np.abs((y_test-y_pred) / y_test)) * 100 # MAPE
]
)
def R2AndRMSE(y_test, y_pred):
import numpy as np
from sklearn.metrics import r2_score, mean_squared_error
return r2_score(y_true=y_test, y_pred=y_pred), np.sqrt(mean_squared_error(y_true=y_test, y_pred=y_pred))
@property
def models(self):
return self._models
@models.setter
def models(self, _):
print('Cannot perform such task.')
def show(self, **kwargs):
import matplotlib.pyplot as plt
kwargs['marker'] = kwargs.get('marker', 'X')
self._models.plot(**kwargs)
plt.xticks(range(len(self._models)), self._models.index)
plt.xlabel('')
plt.axis('auto')
plt.show()
|
the-stack_106_28443 | import Image
import ImageFont, ImageDraw
import sys
test_case_num = sys.argv[1]
node1 = sys.argv[2]
node2 = sys.argv[3]
#opens an image:
test_suite = "varied"
test_folder = "results_%s_1000" % test_suite
test_case = "%s_%s" % (test_suite, test_case_num)
test_case_text = "Test: %s %s" % (test_suite.upper(), test_case_num)
out_file = "case_%s_%s.png" % (test_suite, test_case_num)
im1 = Image.open("%s/%s/thread-1-assign_rate.png" % (test_folder, test_case))
im2 = Image.open("%s/%s/thread-1-jobs_remaining.png" % (test_folder, test_case))
im3 = Image.open("%s/%s/thread%s-queue_size.png" % (test_folder, test_case, node1))
im4 = Image.open("%s/%s/thread%s-queue_size.png" % (test_folder, test_case, node2))
#creates a new empty image, RGB mode, and size 400 by 400.
height_offset=40
new_im = Image.new('RGB', (1600, 1200 + height_offset), color="white")
new_im.paste(im1, (0, 0 + height_offset))
new_im.paste(im2, (800, 0 + height_offset))
new_im.paste(im3, (0, 600 + height_offset))
new_im.paste(im4, (800, 600 + height_offset))
draw = ImageDraw.Draw(new_im)
font = ImageFont.truetype("LiberationSans-Regular.ttf", 40)
draw.text((650, 10), test_case_text, font=font, fill="red")
new_im.thumbnail((400, 300))
new_im.save(out_file)
|
the-stack_106_28444 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import random
import pytest
from marshmallow import Schema, fields, utils, MarshalResult, UnmarshalResult
from marshmallow.exceptions import MarshallingError
from marshmallow.compat import unicode, binary_type
from tests.base import * # noqa
random.seed(1)
# Run tests with both verbose serializer and "meta" option serializer
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_serializing_basic_object(SchemaClass, user):
s = SchemaClass()
data, errors = s.dump(user)
assert data['name'] == user.name
assert_almost_equal(s.data['age'], 42.3)
assert data['registered']
def test_serializer_dump(user):
s = UserSchema()
result, errors = s.dump(user)
assert result['name'] == user.name
# Change strict mode
s.strict = True
bad_user = User(name='Monty', email='invalid')
with pytest.raises(MarshallingError):
s.dump(bad_user)
def test_dump_returns_dict_of_errors():
s = UserSchema()
bad_user = User(name='Monty', email='invalidemail', homepage='badurl')
result, errors = s.dump(bad_user)
assert 'email' in errors
assert 'homepage' in errors
def test_dump_returns_a_marshalresult(user):
s = UserSchema()
result = s.dump(user)
assert isinstance(result, MarshalResult)
data = result.data
assert isinstance(data, dict)
errors = result.errors
assert isinstance(errors, dict)
def test_dumps_returns_a_marshalresult(user):
s = UserSchema()
result = s.dumps(user)
assert isinstance(result, MarshalResult)
assert isinstance(result.data, binary_type)
assert isinstance(result.errors, dict)
def test_load_returns_an_unmarshalresult():
s = UserSchema()
result = s.load({'name': 'Monty'})
assert isinstance(result, UnmarshalResult)
assert isinstance(result.data, User)
assert isinstance(result.errors, dict)
def test_loads_returns_an_unmarshalresult(user):
s = UserSchema()
result = s.loads(json.dumps({'name': 'Monty'}))
assert isinstance(result, UnmarshalResult)
assert isinstance(result.data, User)
assert isinstance(result.errors, dict)
def test_serializing_none():
s = UserSchema(None)
assert s.data['name'] == ''
assert s.data['age'] == 0
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_fields_are_not_copies(SchemaClass):
s = SchemaClass(User('Monty', age=42))
s2 = SchemaClass(User('Monty', age=43))
assert s.fields is not s2.fields
def test_dumps_returns_json(user):
ser = UserSchema()
serialized, errors = ser.dump(user)
json_data, errors = ser.dumps(user)
expected = binary_type(json.dumps(serialized).encode("utf-8"))
assert json_data == expected
def test_dumps_returns_bytestring(user):
s = UserSchema()
result, errors = s.dumps(user)
assert isinstance(result, binary_type)
def test_naive_datetime_field(user, serialized_user):
expected = utils.isoformat(user.created)
assert serialized_user.data['created'] == expected
def test_datetime_formatted_field(user, serialized_user):
result = serialized_user.data['created_formatted']
assert result == user.created.strftime("%Y-%m-%d")
def test_datetime_iso_field(user, serialized_user):
assert serialized_user.data['created_iso'] == utils.isoformat(user.created)
def test_tz_datetime_field(user, serialized_user):
# Datetime is corrected back to GMT
expected = utils.isoformat(user.updated)
assert serialized_user.data['updated'] == expected
def test_local_datetime_field(user, serialized_user):
expected = utils.isoformat(user.updated, localtime=True)
assert serialized_user.data['updated_local'] == expected
def test_class_variable(serialized_user):
assert serialized_user.data['species'] == 'Homo sapiens'
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_serialize_many(SchemaClass):
user1 = User(name="Mick", age=123)
user2 = User(name="Keith", age=456)
users = [user1, user2]
serialized = SchemaClass(users, many=True)
assert len(serialized.data) == 2
assert serialized.data[0]['name'] == "Mick"
assert serialized.data[1]['name'] == "Keith"
def test_no_implicit_list_handling(recwarn):
users = [User(name='Mick'), User(name='Keith')]
with pytest.raises(TypeError):
UserSchema(users)
w = recwarn.pop()
assert issubclass(w.category, DeprecationWarning)
def test_inheriting_serializer(user):
serialized = ExtendedUserSchema(user)
assert serialized.data['name'] == user.name
assert not serialized.data['is_old']
def test_custom_field(serialized_user, user):
assert serialized_user.data['uppername'] == user.name.upper()
def test_url_field(serialized_user, user):
assert serialized_user.data['homepage'] == user.homepage
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_url_field_validation(SchemaClass):
invalid = User("John", age=42, homepage="/john")
s = SchemaClass(invalid)
assert s.is_valid(["homepage"]) is False
def test_relative_url_field():
u = User("John", age=42, homepage="/john")
serialized = UserRelativeUrlSchema(u)
assert serialized.is_valid()
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_stores_invalid_url_error(SchemaClass):
user = User(name="John Doe", homepage="www.foo.com")
serialized = SchemaClass(user)
assert "homepage" in serialized.errors
expected = '"www.foo.com" is not a valid URL. Did you mean: "http://www.foo.com"?'
assert serialized.errors['homepage'] == expected
def test_default():
user = User("John") # No ID set
serialized = UserSchema(user)
assert serialized.data['id'] == "no-id"
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_email_field(SchemaClass):
u = User("John", email="[email protected]")
s = SchemaClass(u)
assert s.data['email'] == "[email protected]"
def test_stored_invalid_email():
u = User("John", email="johnexample.com")
s = UserSchema(u)
assert "email" in s.errors
assert s.errors['email'] == '"johnexample.com" is not a valid email address.'
def test_integer_field():
u = User("John", age=42.3)
serialized = UserIntSchema(u)
assert type(serialized.data['age']) == int
assert serialized.data['age'] == 42
def test_integer_default():
user = User("John", age=None)
serialized = UserIntSchema(user)
assert type(serialized.data['age']) == int
assert serialized.data['age'] == 0
def test_fixed_field():
u = User("John", age=42.3)
serialized = UserFixedSchema(u)
assert serialized.data['age'] == "42.30"
def test_as_string():
u = User("John", age=42.3)
serialized = UserFloatStringSchema(u)
assert type(serialized.data['age']) == str
assert_almost_equal(float(serialized.data['age']), 42.3)
def test_decimal_field():
u = User("John", age=42.3)
s = UserDecimalSchema(u)
assert type(s.data['age']) == unicode
assert_almost_equal(float(s.data['age']), 42.3)
def test_price_field(serialized_user):
assert serialized_user.data['balance'] == "100.00"
def test_fields_param_must_be_list_or_tuple():
invalid = User("John", email="johnexample.com")
with pytest.raises(ValueError):
UserSchema(invalid).is_valid("name")
def test_extra():
user = User("Joe", email="[email protected]")
data, errors = UserSchema(extra={"fav_color": "blue"}).dump(user)
assert data['fav_color'] == "blue"
def test_extra_many():
users = [User('Fred'), User('Brian')]
data, errs = UserSchema(many=True, extra={'band': 'Queen'}).dump(users)
assert data[0]['band'] == 'Queen'
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_method_field(SchemaClass, serialized_user):
assert serialized_user.data['is_old'] is False
u = User("Joe", age=81)
assert SchemaClass(u).data['is_old'] is True
def test_function_field(serialized_user, user):
assert serialized_user.data['lowername'] == user.name.lower()
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_prefix(SchemaClass, user):
s = SchemaClass(user, prefix="usr_")
assert s.data['usr_name'] == user.name
def test_fields_must_be_declared_as_instances(user):
class BadUserSchema(Schema):
name = fields.String
with pytest.raises(TypeError):
BadUserSchema(user)
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_serializing_generator(SchemaClass):
users = [User("Foo"), User("Bar")]
user_gen = (u for u in users)
s = SchemaClass(user_gen, many=True)
assert len(s.data) == 2
assert s.data[0] == SchemaClass(users[0]).data
def test_serializing_empty_list_returns_empty_list():
assert UserSchema([], many=True).data == []
assert UserMetaSchema([], many=True).data == []
def test_serializing_dict(user):
user = {"name": "foo", "email": "foo", "age": 42.3}
s = UserSchema(user)
assert s.data['name'] == "foo"
assert s.data['age'] == 42.3
assert s.is_valid(['email']) is False
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_exclude_in_init(SchemaClass, user):
s = SchemaClass(user, exclude=('age', 'homepage'))
assert 'homepage' not in s.data
assert 'age' not in s.data
assert 'name' in s.data
@pytest.mark.parametrize('SchemaClass',
[UserSchema, UserMetaSchema])
def test_only_in_init(SchemaClass, user):
s = SchemaClass(user, only=('name', 'age'))
assert 'homepage' not in s.data
assert 'name' in s.data
assert 'age' in s.data
def test_invalid_only_param(user):
with pytest.raises(AttributeError):
UserSchema(user, only=("_invalid", "name"))
def test_strict_init():
invalid = User("Foo", email="foo.com")
with pytest.raises(MarshallingError):
UserSchema(invalid, strict=True)
def test_strict_meta_option():
class StrictUserSchema(UserSchema):
class Meta:
strict = True
invalid = User("Foo", email="foo.com")
with pytest.raises(MarshallingError):
StrictUserSchema(invalid)
def test_can_serialize_uuid(serialized_user, user):
assert serialized_user.data['uid'] == str(user.uid)
def test_can_serialize_time(user, serialized_user):
expected = user.time_registered.isoformat()[:12]
assert serialized_user.data['time_registered'] == expected
def test_invalid_time():
u = User('Joe', time_registered='foo')
s = UserSchema(u)
assert s.is_valid(['time_registered']) is False
assert s.errors['time_registered'] == "'foo' cannot be formatted as a time."
def test_invalid_date():
u = User("Joe", birthdate='foo')
s = UserSchema(u)
assert s.is_valid(['birthdate']) is False
assert s.errors['birthdate'] == "'foo' cannot be formatted as a date."
def test_invalid_selection():
u = User('Jonhy')
u.sex = 'hybrid'
s = UserSchema(u)
assert s.is_valid(['sex']) is False
assert s.errors['sex'] == "'hybrid' is not a valid choice for this field."
def test_custom_json():
class UserJSONSchema(Schema):
name = fields.String()
class Meta:
json_module = mockjson
user = User('Joe')
s = UserJSONSchema(user)
result, errors = s.dumps(user)
assert result == mockjson.dumps('val')
def test_custom_error_message():
class ErrorSchema(Schema):
email = fields.Email(error="Invalid email")
homepage = fields.Url(error="Bad homepage.")
balance = fields.Fixed(error="Bad balance.")
u = User("Joe", email="joe.net", homepage="[email protected]", balance="blah")
s = ErrorSchema()
data, errors = s.dump(u)
assert errors['email'] == "Invalid email"
assert errors['homepage'] == "Bad homepage."
assert errors['balance'] == "Bad balance."
def test_error_raised_if_fields_option_is_not_list():
class BadSchema(Schema):
name = fields.String()
class Meta:
fields = 'name'
u = User('Joe')
with pytest.raises(ValueError):
BadSchema(u)
def test_error_raised_if_additional_option_is_not_list():
class BadSchema(Schema):
name = fields.String()
class Meta:
additional = 'email'
u = User('Joe')
with pytest.raises(ValueError):
BadSchema(u)
def test_meta_serializer_fields():
u = User("John", age=42.3, email="[email protected]",
homepage="http://john.com")
s = UserMetaSchema(u)
assert s.data['name'] == u.name
assert s.data['balance'] == "100.00"
assert s.data['uppername'] == "JOHN"
assert s.data['is_old'] is False
assert s.data['created'] == utils.isoformat(u.created)
assert s.data['updated_local'] == utils.isoformat(u.updated, localtime=True)
assert s.data['finger_count'] == 10
class KeepOrder(Schema):
name = fields.String()
email = fields.Email()
age = fields.Integer()
created = fields.DateTime()
id = fields.Integer()
homepage = fields.Url()
birthdate = fields.DateTime()
def test_declared_field_order_is_maintained(user):
ser = KeepOrder()
data, errs = ser.dump(user)
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_nested_field_order_with_only_arg_is_maintained(user):
class HasNestedOnly(Schema):
user = fields.Nested(KeepOrder, only=('name', 'email', 'age',
'created', 'id', 'homepage'))
ser = HasNestedOnly()
data, errs = ser.dump({'user': user})
user_data = data['user']
keys = list(user_data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage']
def test_nested_field_order_with_exlude_arg_is_maintained(user):
class HasNestedExclude(Schema):
user = fields.Nested(KeepOrder, exclude=('birthdate', ))
ser = HasNestedExclude()
data, errs = ser.dump({'user': user})
user_data = data['user']
keys = list(user_data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage']
def test_meta_fields_order_is_maintained(user):
class MetaSchema(Schema):
class Meta:
fields = ('name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate')
ser = MetaSchema()
data, errs = ser.dump(user)
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_meta_fields_mapping(user):
s = UserMetaSchema(user)
assert type(s.fields['name']) == fields.String
assert type(s.fields['created']) == fields.DateTime
assert type(s.fields['updated']) == fields.DateTime
assert type(s.fields['updated_local']) == fields.LocalDateTime
assert type(s.fields['age']) == fields.Float
assert type(s.fields['balance']) == fields.Price
assert type(s.fields['registered']) == fields.Boolean
assert type(s.fields['sex_choices']) == fields.Raw
assert type(s.fields['hair_colors']) == fields.Raw
assert type(s.fields['finger_count']) == fields.Integer
assert type(s.fields['uid']) == fields.UUID
assert type(s.fields['time_registered']) == fields.Time
assert type(s.fields['birthdate']) == fields.Date
assert type(s.fields['since_created']) == fields.TimeDelta
def test_meta_field_not_on_obj_raises_attribute_error(user):
class BadUserSchema(Schema):
class Meta:
fields = ('name', 'notfound')
with pytest.raises(AttributeError):
BadUserSchema(user)
def test_exclude_fields(user):
s = UserExcludeSchema(user)
assert "created" not in s.data
assert "updated" not in s.data
assert "name" in s.data
def test_fields_option_must_be_list_or_tuple(user):
class BadFields(Schema):
class Meta:
fields = "name"
with pytest.raises(ValueError):
BadFields(user)
def test_exclude_option_must_be_list_or_tuple(user):
class BadExclude(Schema):
class Meta:
exclude = "name"
with pytest.raises(ValueError):
BadExclude(user)
def test_dateformat_option(user):
fmt = '%Y-%m'
class DateFormatSchema(Schema):
updated = fields.DateTime("%m-%d")
class Meta:
fields = ('created', 'updated')
dateformat = fmt
serialized = DateFormatSchema(user)
assert serialized.data['created'] == user.created.strftime(fmt)
assert serialized.data['updated'] == user.updated.strftime("%m-%d")
def test_default_dateformat(user):
class DateFormatSchema(Schema):
updated = fields.DateTime(format="%m-%d")
class Meta:
fields = ('created', 'updated')
serialized = DateFormatSchema(user)
assert serialized.data['created'] == utils.isoformat(user.created)
assert serialized.data['updated'] == user.updated.strftime("%m-%d")
def test_inherit_meta(user):
class InheritedMetaSchema(UserMetaSchema):
pass
result = InheritedMetaSchema(user).data
expected = UserMetaSchema(user).data
assert result == expected
def test_additional(user):
s = UserAdditionalSchema(user)
assert s.data['lowername'] == user.name.lower()
assert s.data['name'] == user.name
def test_cant_set_both_additional_and_fields(user):
class BadSchema(Schema):
name = fields.String()
class Meta:
fields = ("name", 'email')
additional = ('email', 'homepage')
with pytest.raises(ValueError):
BadSchema(user)
def test_serializing_none_meta():
s = UserMetaSchema(None)
# Since meta fields are used, defaults to None
assert s.data['name'] is None
assert s.data['email'] is None
class CustomError(Exception):
pass
class MySchema(Schema):
name = fields.String()
email = fields.Email()
class MySchema2(Schema):
homepage = fields.URL()
def test_dump_with_custom_error_handler(user):
@MySchema.error_handler
def handle_errors(serializer, errors, obj):
assert isinstance(serializer, MySchema)
assert 'email' in errors
assert isinstance(obj, User)
raise CustomError('Something bad happened')
user.email = 'bademail'
with pytest.raises(CustomError):
MySchema().dump(user)
user.email = '[email protected]'
assert MySchema(user).data
def test_load_with_custom_error_handler():
@MySchema.error_handler
def handle_errors(serializer, errors, data):
assert isinstance(serializer, MySchema)
assert 'email' in errors
assert isinstance(data, dict)
raise CustomError('Something bad happened')
with pytest.raises(CustomError):
MySchema().load({'email': 'invalid'})
def test_multiple_serializers_with_same_error_handler(user):
@MySchema.error_handler
@MySchema2.error_handler
def handle_errors(serializer, errors, obj):
raise CustomError('Something bad happened')
user.email = 'bademail'
user.homepage = 'foo'
with pytest.raises(CustomError):
MySchema().dump(user)
with pytest.raises(CustomError):
MySchema2().dump(user)
def test_setting_error_handler_class_attribute(user):
def handle_errors(serializer, errors, obj):
raise CustomError('Something bad happened')
class ErrorSchema(Schema):
email = fields.Email()
__error_handler__ = handle_errors
class ErrorSchemaSub(ErrorSchema):
pass
user.email = 'invalid'
ser = ErrorSchema()
with pytest.raises(CustomError):
ser.dump(user)
subser = ErrorSchemaSub()
with pytest.raises(CustomError):
subser.dump(user)
def test_serializer_with_custom_data_handler(user):
class CallbackSchema(Schema):
name = fields.String()
@CallbackSchema.data_handler
def add_meaning(serializer, data, obj):
data['meaning'] = 42
return data
ser = CallbackSchema()
data, _ = ser.dump(user)
assert data['meaning'] == 42
def test_serializer_with_multiple_data_handlers(user):
class CallbackSchema2(Schema):
name = fields.String()
@CallbackSchema2.data_handler
def add_meaning(serializer, data, obj):
data['meaning'] = 42
return data
@CallbackSchema2.data_handler
def upper_name(serializer, data, obj):
data['name'] = data['name'].upper()
return data
ser = CallbackSchema2()
data, _ = ser.dump(user)
assert data['meaning'] == 42
assert data['name'] == user.name.upper()
def test_setting_data_handlers_class_attribute(user):
def add_meaning(serializer, data, obj):
data['meaning'] = 42
return data
class CallbackSchema3(Schema):
__data_handlers__ = [add_meaning]
name = fields.String()
ser = CallbackSchema3()
data, _ = ser.dump(user)
assert data['meaning'] == 42
def test_root_data_handler(user):
class RootSchema(Schema):
NAME = 'user'
name = fields.String()
@RootSchema.data_handler
def add_root(serializer, data, obj):
return {
serializer.NAME: data
}
s = RootSchema()
data, _ = s.dump(user)
assert data['user']['name'] == user.name
def test_serializer_repr():
class MySchema(Schema):
name = fields.String()
ser = MySchema(many=True, strict=True)
rep = repr(ser)
assert 'MySchema' in rep
assert 'strict=True' in rep
assert 'many=True' in rep
class TestNestedSchema:
def setup_method(self, method):
self.user = User(name="Monty", age=81)
col1 = User(name="Mick", age=123)
col2 = User(name="Keith", age=456)
self.blog = Blog("Monty's blog", user=self.user, categories=["humor", "violence"],
collaborators=[col1, col2])
def test_flat_nested(self):
class FlatBlogSchema(Schema):
name = fields.String()
user = fields.Nested(UserSchema, only='name')
collaborators = fields.Nested(UserSchema, only='name', many=True)
s = FlatBlogSchema()
data, _ = s.dump(self.blog)
assert data['user'] == self.blog.user.name
for i, name in enumerate(data['collaborators']):
assert name == self.blog.collaborators[i].name
def test_flat_nested2(self):
class FlatBlogSchema(Schema):
name = fields.String()
collaborators = fields.Nested(UserSchema, many=True, only='uid')
s = FlatBlogSchema()
data, _ = s.dump(self.blog)
assert data['collaborators'][0] == str(self.blog.collaborators[0].uid)
def test_required_nested_field(self):
class BlogRequiredSchema(Schema):
user = fields.Nested(UserSchema, required=True)
b = Blog('Authorless blog', user=None)
_, errs = BlogRequiredSchema().dump(b)
assert 'user' in errs
assert 'required' in errs['user']
def test_nested_default(self):
class BlogDefaultSchema(Schema):
user = fields.Nested(UserSchema, default=0)
b = Blog('Just the default blog', user=None)
data, _ = BlogDefaultSchema().dump(b)
assert data['user'] == 0
def test_nested_none_default(self):
class BlogDefaultSchema(Schema):
user = fields.Nested(UserSchema, default=None)
b = Blog('Just the default blog', user=None)
data, _ = BlogDefaultSchema().dump(b)
assert data['user'] is None
def test_nested(self):
blog_serializer = BlogSchema()
serialized_blog, _ = blog_serializer.dump(self.blog)
user_serializer = UserSchema()
serialized_user, _ = user_serializer.dump(self.user)
assert serialized_blog['user'] == serialized_user
def test_nested_many_fields(self):
serialized_blog, _ = BlogSchema().dump(self.blog)
expected = [UserSchema().dump(col)[0] for col in self.blog.collaborators]
assert serialized_blog['collaborators'] == expected
def test_nested_meta_many(self):
serialized_blog = BlogUserMetaSchema().dump(self.blog)[0]
assert len(serialized_blog['collaborators']) == 2
expected = [UserMetaSchema().dump(col)[0] for col in self.blog.collaborators]
assert serialized_blog['collaborators'] == expected
def test_nested_only(self):
col1 = User(name="Mick", age=123, id_="abc")
col2 = User(name="Keith", age=456, id_="def")
self.blog.collaborators = [col1, col2]
serialized_blog = BlogOnlySchema().dump(self.blog)[0]
assert serialized_blog['collaborators'] == [{"id": col1.id}, {"id": col2.id}]
def test_exclude(self):
serialized = BlogSchemaExclude().dump(self.blog)[0]
assert "uppername" not in serialized['user'].keys()
def test_only_takes_precedence_over_exclude(self):
serialized = BlogSchemaOnlyExclude().dump(self.blog)[0]
assert serialized['user']['name'] == self.user.name
def test_list_field(self):
serialized = BlogSchema().dump(self.blog)[0]
assert serialized['categories'] == ["humor", "violence"]
def test_nested_errors(self):
invalid_user = User("Monty", email="foo")
blog = Blog("Monty's blog", user=invalid_user)
serialized_blog, errors = BlogSchema().dump(blog)
assert "email" in errors['user']
expected_msg = "\"{0}\" is not a valid email address.".format(invalid_user.email)
assert errors['user']['email'] == expected_msg
# No problems with collaborators
assert "collaborators" not in errors
def test_nested_method_field(self):
data = BlogSchema().dump(self.blog)[0]
assert data['user']['is_old']
assert data['collaborators'][0]['is_old']
def test_nested_function_field(self):
data = BlogSchema().dump(self.blog)[0]
assert data['user']['lowername'] == self.user.name.lower()
expected = self.blog.collaborators[0].name.lower()
assert data['collaborators'][0]['lowername'] == expected
def test_nested_prefixed_field(self):
data = BlogSchemaPrefixedUser().dump(self.blog)[0]
assert data['user']['usr_name'] == self.user.name
assert data['user']['usr_lowername'] == self.user.name.lower()
def test_nested_prefixed_many_field(self):
data = BlogSchemaPrefixedUser().dump(self.blog)[0]
assert data['collaborators'][0]['usr_name'] == self.blog.collaborators[0].name
def test_invalid_float_field(self):
user = User("Joe", age="1b2")
_, errors = UserSchema().dump(user)
assert "age" in errors
def test_serializer_meta_with_nested_fields(self):
data = BlogSchemaMeta().dump(self.blog)[0]
assert data['title'] == self.blog.title
assert data['user'] == UserSchema(self.user).data
assert data['collaborators'] == [UserSchema(c).data
for c in self.blog.collaborators]
assert data['categories'] == self.blog.categories
def test_serializer_with_nested_meta_fields(self):
# Schema has user = fields.Nested(UserMetaSerializer)
s = BlogUserMetaSchema(self.blog)
assert s.data['user'] == UserMetaSchema(self.blog.user).data
def test_nested_fields_must_be_passed_a_serializer(self):
class BadNestedFieldSchema(BlogSchema):
user = fields.Nested(fields.String)
with pytest.raises(ValueError):
BadNestedFieldSchema().dump(self.blog)
class TestSelfReference:
def setup_method(self, method):
self.employer = User(name="Joe", age=59)
self.user = User(name="Tom", employer=self.employer, age=28)
def test_nesting_serializer_within_itself(self):
class SelfSchema(Schema):
name = fields.String()
age = fields.Integer()
employer = fields.Nested('self', exclude=('employer', ))
data, errors = SelfSchema().dump(self.user)
assert not errors
assert data['name'] == self.user.name
assert data['employer']['name'] == self.employer.name
assert data['employer']['age'] == self.employer.age
def test_nesting_within_itself_meta(self):
class SelfSchema(Schema):
employer = fields.Nested("self", exclude=('employer', ))
class Meta:
additional = ('name', 'age')
data, errors = SelfSchema().dump(self.user)
assert not errors
assert data['name'] == self.user.name
assert data['age'] == self.user.age
assert data['employer']['name'] == self.employer.name
assert data['employer']['age'] == self.employer.age
def test_nested_self_with_only_param(self):
class SelfSchema(Schema):
employer = fields.Nested('self', only=('name', ))
class Meta:
fields = ('name', 'employer')
data = SelfSchema().dump(self.user)[0]
assert data['name'] == self.user.name
assert data['employer']['name'] == self.employer.name
assert 'age' not in data['employer']
def test_multiple_nested_self_fields(self):
class MultipleSelfSchema(Schema):
emp = fields.Nested('self', only='name', attribute='employer')
rels = fields.Nested('self', only='name',
many=True, attribute='relatives')
class Meta:
fields = ('name', 'emp', 'rels')
schema = MultipleSelfSchema()
self.user.relatives = [User(name="Bar", age=12), User(name='Baz', age=34)]
data, errors = schema.dump(self.user)
assert not errors
assert len(data['rels']) == len(self.user.relatives)
relative = data['rels'][0]
assert relative == self.user.relatives[0].name
def test_nested_many(self):
class SelfManySchema(Schema):
relatives = fields.Nested('self', many=True)
class Meta:
additional = ('name', 'age')
person = User(name='Foo')
person.relatives = [User(name="Bar", age=12), User(name='Baz', age=34)]
data = SelfManySchema().dump(person)[0]
assert data['name'] == person.name
assert len(data['relatives']) == len(person.relatives)
assert data['relatives'][0]['name'] == person.relatives[0].name
assert data['relatives'][0]['age'] == person.relatives[0].age
def test_serialization_with_required_field():
class RequiredUserSchema(Schema):
name = fields.String(required=True)
user = User(name=None)
data, errors = RequiredUserSchema().dump(user)
assert 'name' in errors
assert errors['name'] == 'Missing data for required field.'
def test_serialization_with_required_field_and_custom_validator():
class RequiredGenderSchema(Schema):
gender = fields.String(required=True,
validate=lambda x: x.lower() == 'f' or x.lower() == 'm',
error="Gender must be 'f' or 'm'.")
user = dict(gender=None)
data, errors = RequiredGenderSchema().dump(user)
assert errors
assert 'gender' in errors
assert errors['gender'] == "Missing data for required field."
user = dict(gender='Unkown')
s = RequiredGenderSchema(user)
assert s.is_valid() is False
assert 'gender' in s.errors
assert s.errors['gender'] == "Gender must be 'f' or 'm'."
class UserContextSchema(Schema):
is_owner = fields.Method('get_is_owner')
is_collab = fields.Function(lambda user, ctx: user in ctx['blog'])
def get_is_owner(self, user, context):
return context['blog'].user.name == user.name
class TestContext:
def test_context_method(self):
owner = User('Joe')
blog = Blog(title='Joe Blog', user=owner)
context = {'blog': blog}
serializer = UserContextSchema()
serializer.context = context
data = serializer.dump(owner)[0]
assert data['is_owner'] is True
nonowner = User('Fred')
data = serializer.dump(nonowner)[0]
assert data['is_owner'] is False
def test_context_method_function(self):
owner = User('Fred')
blog = Blog('Killer Queen', user=owner)
collab = User('Brian')
blog.collaborators.append(collab)
context = {'blog': blog}
serializer = UserContextSchema()
serializer.context = context
data = serializer.dump(collab)[0]
assert data['is_collab'] is True
noncollab = User('Foo')
data = serializer.dump(noncollab)[0]
assert data['is_collab'] is False
def test_method_field_raises_error_when_context_not_available(self):
# serializer that only has a method field
class UserMethodContextSchema(Schema):
is_owner = fields.Method('get_is_owner')
def get_is_owner(self, user, context):
return context['blog'].user.name == user.name
owner = User('Joe')
serializer = UserContextSchema(strict=True)
serializer.context = None
with pytest.raises(MarshallingError) as excinfo:
serializer.dump(owner)
msg = 'No context available for Method field {0!r}'.format('is_owner')
assert msg in str(excinfo)
def test_function_field_raises_error_when_context_not_available(self):
# only has a function field
class UserFunctionContextSchema(Schema):
is_collab = fields.Function(lambda user, ctx: user in ctx['blog'])
owner = User('Joe')
serializer = UserFunctionContextSchema(strict=True)
# no context
serializer.context = None
with pytest.raises(MarshallingError) as excinfo:
serializer.dump(owner)
msg = 'No context available for Function field {0!r}'.format('is_collab')
assert msg in str(excinfo)
def test_fields_context(self):
class CSchema(Schema):
name = fields.String()
ser = CSchema()
ser.context['foo'] = 42
assert ser.fields['name'].context == {'foo': 42}
def test_nested_fields_inherit_context(self):
class InnerSchema(Schema):
likes_bikes = fields.Function(lambda obj, ctx: 'bikes' in ctx['info'])
class CSchema(Schema):
inner = fields.Nested(InnerSchema)
ser = CSchema(strict=True)
ser.context['info'] = 'i like bikes'
obj = {
'inner': {}
}
result = ser.dump(obj)
assert result.data['inner']['likes_bikes'] is True
def raise_marshalling_value_error():
try:
raise ValueError('Foo bar')
except ValueError as error:
raise MarshallingError(error)
class TestMarshallingError:
def test_saves_underlying_exception(self):
with pytest.raises(MarshallingError) as excinfo:
raise_marshalling_value_error()
assert 'Foo bar' in str(excinfo)
error = excinfo.value
assert isinstance(error.underlying_exception, ValueError)
def test_error_gets_raised_if_many_is_omitted(user):
class BadSchema(Schema):
# forgot to set many=True
class Meta:
fields = ('name', 'relatives')
relatives = fields.Nested(UserSchema)
user.relatives = [User('Joe'), User('Mike')]
with pytest.raises(TypeError) as excinfo:
BadSchema().dump(user)
# Exception includes message about setting many argument
assert 'many=True' in str(excinfo)
def test_serializer_can_specify_nested_object_as_attribute(blog):
class BlogUsernameSchema(Schema):
author_name = fields.String(attribute='user.name')
ser = BlogUsernameSchema()
result = ser.dump(blog)
assert result.data['author_name'] == blog.user.name
|
the-stack_106_28445 | """
A script to systematicly check
1, read_cif
2, alternative setting
3, subgroup
4, supergroup
"""
from glob import glob
import numpy as np
import pymatgen.analysis.structure_matcher as sm
from pymatgen.core import Structure
from pyxtal import pyxtal
from pyxtal.supergroup import supergroups
for i, name in enumerate(glob("pyxtal/miscellaneous/cifs/*.cif")):
# 1, read from cif
s = pyxtal()
s.from_seed(name)
pmg_s1 = s.to_pymatgen()
pmg0 = Structure.from_file(name)
G = s.group.number
print(i, name, len(s.atom_sites))
if len(s.atom_sites) <= 6:
if not sm.StructureMatcher().fit(pmg_s1, pmg0):
print("Error in reading cif")
# 2, alternative setting
strucs = s.get_alternatives()
for i, struc in enumerate(strucs):
pmg_s2 = struc.to_pymatgen()
if not sm.StructureMatcher().fit(pmg_s1, pmg_s2):
print("Error in alternative setting")
print(s)
print(struc)
break
# 3, subgroup
for gtype in ['t', 'k']:
valid = True
try:
struc_h = s.subgroup_once(eps=0, group_type=gtype, max_cell=3)
H = struc_h.group.number
pmg_h = struc_h.to_pymatgen()
if not sm.StructureMatcher().fit(pmg_s1, pmg_h):
print("Error in subgroup", gtype)
except RuntimeError:
print("no splitter skip", name)
valid = False
# 4, supergroup
if valid:
#print(G, H)
if H>2 and H != G and H in s.group.get_max_subgroup_numbers():
struc_h = s.subgroup_once(eps=0.05, H=H, group_type=gtype, mut_lat=False)
try:
sup = supergroups(struc_h, G=G, d_tol=0.3, max_per_G=500)
if sup.strucs is not None:
match = False
for struc in sup.strucs:
pmg_g = struc.to_pymatgen()
if sm.StructureMatcher().fit(pmg_g, pmg_s1):
match = True
break
if not match:
print("Cannot recover the original structure", G, '<-', H)
else:
print("Error in supergroup", G, '<-', H)
except RuntimeError:
print("no splitter skip", name)
|
the-stack_106_28446 | """
Handle the frontend for Home Assistant.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/frontend/
"""
import asyncio
import hashlib
import json
import logging
import os
from urllib.parse import urlparse
from aiohttp import web
import voluptuous as vol
import jinja2
import homeassistant.helpers.config_validation as cv
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.http.const import KEY_AUTHENTICATED
from homeassistant.components import websocket_api
from homeassistant.config import find_config_file, load_yaml_config_file
from homeassistant.const import CONF_NAME, EVENT_THEMES_UPDATED
from homeassistant.core import callback
from homeassistant.helpers.translation import async_get_translations
from homeassistant.loader import bind_hass
REQUIREMENTS = ['home-assistant-frontend==20180521.0']
DOMAIN = 'frontend'
DEPENDENCIES = ['api', 'websocket_api', 'http', 'system_log']
URL_PANEL_COMPONENT_FP = '/frontend/panels/{}-{}.html'
CONF_THEMES = 'themes'
CONF_EXTRA_HTML_URL = 'extra_html_url'
CONF_EXTRA_HTML_URL_ES5 = 'extra_html_url_es5'
CONF_FRONTEND_REPO = 'development_repo'
CONF_JS_VERSION = 'javascript_version'
JS_DEFAULT_OPTION = 'auto'
JS_OPTIONS = ['es5', 'latest', 'auto']
DEFAULT_THEME_COLOR = '#03A9F4'
MANIFEST_JSON = {
'background_color': '#FFFFFF',
'description': 'Open-source home automation platform running on Python 3.',
'dir': 'ltr',
'display': 'standalone',
'icons': [],
'lang': 'en-US',
'name': 'Home Assistant',
'short_name': 'Assistant',
'start_url': '/states',
'theme_color': DEFAULT_THEME_COLOR
}
for size in (192, 384, 512, 1024):
MANIFEST_JSON['icons'].append({
'src': '/static/icons/favicon-{}x{}.png'.format(size, size),
'sizes': '{}x{}'.format(size, size),
'type': 'image/png'
})
DATA_FINALIZE_PANEL = 'frontend_finalize_panel'
DATA_PANELS = 'frontend_panels'
DATA_JS_VERSION = 'frontend_js_version'
DATA_EXTRA_HTML_URL = 'frontend_extra_html_url'
DATA_EXTRA_HTML_URL_ES5 = 'frontend_extra_html_url_es5'
DATA_THEMES = 'frontend_themes'
DATA_DEFAULT_THEME = 'frontend_default_theme'
DEFAULT_THEME = 'default'
PRIMARY_COLOR = 'primary-color'
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_FRONTEND_REPO): cv.isdir,
vol.Optional(CONF_THEMES): vol.Schema({
cv.string: {cv.string: cv.string}
}),
vol.Optional(CONF_EXTRA_HTML_URL):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EXTRA_HTML_URL_ES5):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_JS_VERSION, default=JS_DEFAULT_OPTION):
vol.In(JS_OPTIONS)
}),
}, extra=vol.ALLOW_EXTRA)
SERVICE_SET_THEME = 'set_theme'
SERVICE_RELOAD_THEMES = 'reload_themes'
SERVICE_SET_THEME_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
})
WS_TYPE_GET_PANELS = 'get_panels'
SCHEMA_GET_PANELS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_GET_PANELS,
})
class AbstractPanel:
"""Abstract class for panels."""
# Name of the webcomponent
component_name = None
# Icon to show in the sidebar (optional)
sidebar_icon = None
# Title to show in the sidebar (optional)
sidebar_title = None
# Url to the webcomponent (depending on JS version)
webcomponent_url_es5 = None
webcomponent_url_latest = None
# Url to show the panel in the frontend
frontend_url_path = None
# Config to pass to the webcomponent
config = None
@asyncio.coroutine
def async_register(self, hass):
"""Register panel with HASS."""
panels = hass.data.get(DATA_PANELS)
if panels is None:
panels = hass.data[DATA_PANELS] = {}
if self.frontend_url_path in panels:
_LOGGER.warning("Overwriting component %s", self.frontend_url_path)
if DATA_FINALIZE_PANEL in hass.data:
yield from hass.data[DATA_FINALIZE_PANEL](self)
panels[self.frontend_url_path] = self
@callback
def async_register_index_routes(self, router, index_view):
"""Register routes for panel to be served by index view."""
router.add_route(
'get', '/{}'.format(self.frontend_url_path), index_view.get)
router.add_route(
'get', '/{}/{{extra:.+}}'.format(self.frontend_url_path),
index_view.get)
class BuiltInPanel(AbstractPanel):
"""Panel that is part of hass_frontend."""
def __init__(self, component_name, sidebar_title, sidebar_icon,
frontend_url_path, config):
"""Initialize a built-in panel."""
self.component_name = component_name
self.sidebar_title = sidebar_title
self.sidebar_icon = sidebar_icon
self.frontend_url_path = frontend_url_path or component_name
self.config = config
def to_response(self, hass, request):
"""Panel as dictionary."""
return {
'component_name': self.component_name,
'icon': self.sidebar_icon,
'title': self.sidebar_title,
'config': self.config,
'url_path': self.frontend_url_path,
}
class ExternalPanel(AbstractPanel):
"""Panel that is added by a custom component."""
REGISTERED_COMPONENTS = set()
def __init__(self, component_name, path, md5, sidebar_title, sidebar_icon,
frontend_url_path, config):
"""Initialize an external panel."""
self.component_name = component_name
self.path = path
self.md5 = md5
self.sidebar_title = sidebar_title
self.sidebar_icon = sidebar_icon
self.frontend_url_path = frontend_url_path or component_name
self.config = config
@asyncio.coroutine
def async_finalize(self, hass, frontend_repository_path):
"""Finalize this panel for usage.
frontend_repository_path is set, will be prepended to path of built-in
components.
"""
try:
if self.md5 is None:
self.md5 = yield from hass.async_add_job(
_fingerprint, self.path)
except OSError:
_LOGGER.error('Cannot find or access %s at %s',
self.component_name, self.path)
hass.data[DATA_PANELS].pop(self.frontend_url_path)
return
self.webcomponent_url_es5 = self.webcomponent_url_latest = \
URL_PANEL_COMPONENT_FP.format(self.component_name, self.md5)
if self.component_name not in self.REGISTERED_COMPONENTS:
hass.http.register_static_path(
self.webcomponent_url_latest, self.path,
# if path is None, we're in prod mode, so cache static assets
frontend_repository_path is None)
self.REGISTERED_COMPONENTS.add(self.component_name)
def to_response(self, hass, request):
"""Panel as dictionary."""
result = {
'component_name': self.component_name,
'icon': self.sidebar_icon,
'title': self.sidebar_title,
'url_path': self.frontend_url_path,
'config': self.config,
}
if _is_latest(hass.data[DATA_JS_VERSION], request):
result['url'] = self.webcomponent_url_latest
else:
result['url'] = self.webcomponent_url_es5
return result
@bind_hass
@asyncio.coroutine
def async_register_built_in_panel(hass, component_name, sidebar_title=None,
sidebar_icon=None, frontend_url_path=None,
config=None):
"""Register a built-in panel."""
panel = BuiltInPanel(component_name, sidebar_title, sidebar_icon,
frontend_url_path, config)
yield from panel.async_register(hass)
@bind_hass
@asyncio.coroutine
def async_register_panel(hass, component_name, path, md5=None,
sidebar_title=None, sidebar_icon=None,
frontend_url_path=None, config=None):
"""Register a panel for the frontend.
component_name: name of the web component
path: path to the HTML of the web component
(required unless url is provided)
md5: the md5 hash of the web component (for versioning in URL, optional)
sidebar_title: title to show in the sidebar (optional)
sidebar_icon: icon to show next to title in sidebar (optional)
url_path: name to use in the URL (defaults to component_name)
config: config to be passed into the web component
"""
panel = ExternalPanel(component_name, path, md5, sidebar_title,
sidebar_icon, frontend_url_path, config)
yield from panel.async_register(hass)
@bind_hass
@callback
def add_extra_html_url(hass, url, es5=False):
"""Register extra html url to load."""
key = DATA_EXTRA_HTML_URL_ES5 if es5 else DATA_EXTRA_HTML_URL
url_set = hass.data.get(key)
if url_set is None:
url_set = hass.data[key] = set()
url_set.add(url)
def add_manifest_json_key(key, val):
"""Add a keyval to the manifest.json."""
MANIFEST_JSON[key] = val
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the serving of the frontend."""
if list(hass.auth.async_auth_providers):
client = yield from hass.auth.async_create_client(
'Home Assistant Frontend',
redirect_uris=['/'],
no_secret=True,
)
else:
client = None
hass.components.websocket_api.async_register_command(
WS_TYPE_GET_PANELS, websocket_handle_get_panels, SCHEMA_GET_PANELS)
hass.http.register_view(ManifestJSONView)
conf = config.get(DOMAIN, {})
repo_path = conf.get(CONF_FRONTEND_REPO)
is_dev = repo_path is not None
hass.data[DATA_JS_VERSION] = js_version = conf.get(CONF_JS_VERSION)
if is_dev:
hass_frontend_path = os.path.join(repo_path, 'hass_frontend')
hass_frontend_es5_path = os.path.join(repo_path, 'hass_frontend_es5')
else:
import hass_frontend
import hass_frontend_es5
hass_frontend_path = hass_frontend.where()
hass_frontend_es5_path = hass_frontend_es5.where()
hass.http.register_static_path(
"/service_worker_es5.js",
os.path.join(hass_frontend_es5_path, "service_worker.js"), False)
hass.http.register_static_path(
"/service_worker.js",
os.path.join(hass_frontend_path, "service_worker.js"), False)
hass.http.register_static_path(
"/robots.txt",
os.path.join(hass_frontend_path, "robots.txt"), False)
hass.http.register_static_path("/static", hass_frontend_path, not is_dev)
hass.http.register_static_path(
"/frontend_latest", hass_frontend_path, not is_dev)
hass.http.register_static_path(
"/frontend_es5", hass_frontend_es5_path, not is_dev)
local = hass.config.path('www')
if os.path.isdir(local):
hass.http.register_static_path("/local", local, not is_dev)
index_view = IndexView(repo_path, js_version, client)
hass.http.register_view(index_view)
async def finalize_panel(panel):
"""Finalize setup of a panel."""
if hasattr(panel, 'async_finalize'):
await panel.async_finalize(hass, repo_path)
panel.async_register_index_routes(hass.http.app.router, index_view)
yield from asyncio.wait([
async_register_built_in_panel(hass, panel)
for panel in ('dev-event', 'dev-info', 'dev-service', 'dev-state',
'dev-template', 'dev-mqtt', 'kiosk')], loop=hass.loop)
hass.data[DATA_FINALIZE_PANEL] = finalize_panel
# Finalize registration of panels that registered before frontend was setup
# This includes the built-in panels from line above.
yield from asyncio.wait(
[finalize_panel(panel) for panel in hass.data[DATA_PANELS].values()],
loop=hass.loop)
if DATA_EXTRA_HTML_URL not in hass.data:
hass.data[DATA_EXTRA_HTML_URL] = set()
if DATA_EXTRA_HTML_URL_ES5 not in hass.data:
hass.data[DATA_EXTRA_HTML_URL_ES5] = set()
for url in conf.get(CONF_EXTRA_HTML_URL, []):
add_extra_html_url(hass, url, False)
for url in conf.get(CONF_EXTRA_HTML_URL_ES5, []):
add_extra_html_url(hass, url, True)
async_setup_themes(hass, conf.get(CONF_THEMES))
hass.http.register_view(TranslationsView)
return True
def async_setup_themes(hass, themes):
"""Set up themes data and services."""
hass.http.register_view(ThemesView)
hass.data[DATA_DEFAULT_THEME] = DEFAULT_THEME
if themes is None:
hass.data[DATA_THEMES] = {}
return
hass.data[DATA_THEMES] = themes
@callback
def update_theme_and_fire_event():
"""Update theme_color in manifest."""
name = hass.data[DATA_DEFAULT_THEME]
themes = hass.data[DATA_THEMES]
if name != DEFAULT_THEME and PRIMARY_COLOR in themes[name]:
MANIFEST_JSON['theme_color'] = themes[name][PRIMARY_COLOR]
else:
MANIFEST_JSON['theme_color'] = DEFAULT_THEME_COLOR
hass.bus.async_fire(EVENT_THEMES_UPDATED, {
'themes': themes,
'default_theme': name,
})
@callback
def set_theme(call):
"""Set backend-preferred theme."""
data = call.data
name = data[CONF_NAME]
if name == DEFAULT_THEME or name in hass.data[DATA_THEMES]:
_LOGGER.info("Theme %s set as default", name)
hass.data[DATA_DEFAULT_THEME] = name
update_theme_and_fire_event()
else:
_LOGGER.warning("Theme %s is not defined.", name)
@callback
def reload_themes(_):
"""Reload themes."""
path = find_config_file(hass.config.config_dir)
new_themes = load_yaml_config_file(path)[DOMAIN].get(CONF_THEMES, {})
hass.data[DATA_THEMES] = new_themes
if hass.data[DATA_DEFAULT_THEME] not in new_themes:
hass.data[DATA_DEFAULT_THEME] = DEFAULT_THEME
update_theme_and_fire_event()
hass.services.async_register(
DOMAIN, SERVICE_SET_THEME, set_theme, schema=SERVICE_SET_THEME_SCHEMA)
hass.services.async_register(DOMAIN, SERVICE_RELOAD_THEMES, reload_themes)
class IndexView(HomeAssistantView):
"""Serve the frontend."""
url = '/'
name = 'frontend:index'
requires_auth = False
extra_urls = ['/states', '/states/{extra}']
def __init__(self, repo_path, js_option, client):
"""Initialize the frontend view."""
self.repo_path = repo_path
self.js_option = js_option
self.client = client
self._template_cache = {}
def get_template(self, latest):
"""Get template."""
if self.repo_path is not None:
root = self.repo_path
elif latest:
import hass_frontend
root = hass_frontend.where()
else:
import hass_frontend_es5
root = hass_frontend_es5.where()
tpl = self._template_cache.get(root)
if tpl is None:
with open(os.path.join(root, 'index.html')) as file:
tpl = jinja2.Template(file.read())
# Cache template if not running from repository
if self.repo_path is None:
self._template_cache[root] = tpl
return tpl
@asyncio.coroutine
def get(self, request, extra=None):
"""Serve the index view."""
hass = request.app['hass']
latest = self.repo_path is not None or \
_is_latest(self.js_option, request)
if request.path == '/':
panel = 'states'
else:
panel = request.path.split('/')[1]
if panel == 'states':
panel_url = ''
elif latest:
panel_url = hass.data[DATA_PANELS][panel].webcomponent_url_latest
else:
panel_url = hass.data[DATA_PANELS][panel].webcomponent_url_es5
no_auth = '1'
if hass.config.api.api_password and not request[KEY_AUTHENTICATED]:
# do not try to auto connect on load
no_auth = '0'
template = yield from hass.async_add_job(self.get_template, latest)
extra_key = DATA_EXTRA_HTML_URL if latest else DATA_EXTRA_HTML_URL_ES5
template_params = dict(
no_auth=no_auth,
panel_url=panel_url,
panels=hass.data[DATA_PANELS],
theme_color=MANIFEST_JSON['theme_color'],
extra_urls=hass.data[extra_key],
)
if self.client is not None:
template_params['client_id'] = self.client.id
return web.Response(text=template.render(**template_params),
content_type='text/html')
class ManifestJSONView(HomeAssistantView):
"""View to return a manifest.json."""
requires_auth = False
url = '/manifest.json'
name = 'manifestjson'
@asyncio.coroutine
def get(self, request): # pylint: disable=no-self-use
"""Return the manifest.json."""
msg = json.dumps(MANIFEST_JSON, sort_keys=True)
return web.Response(text=msg, content_type="application/manifest+json")
class ThemesView(HomeAssistantView):
"""View to return defined themes."""
requires_auth = False
url = '/api/themes'
name = 'api:themes'
@callback
def get(self, request):
"""Return themes."""
hass = request.app['hass']
return self.json({
'themes': hass.data[DATA_THEMES],
'default_theme': hass.data[DATA_DEFAULT_THEME],
})
class TranslationsView(HomeAssistantView):
"""View to return backend defined translations."""
url = '/api/translations/{language}'
name = 'api:translations'
@asyncio.coroutine
def get(self, request, language):
"""Return translations."""
hass = request.app['hass']
resources = yield from async_get_translations(hass, language)
return self.json({
'resources': resources,
})
def _fingerprint(path):
"""Fingerprint a file."""
with open(path) as fil:
return hashlib.md5(fil.read().encode('utf-8')).hexdigest()
def _is_latest(js_option, request):
"""
Return whether we should serve latest untranspiled code.
Set according to user's preference and URL override.
"""
import hass_frontend
if request is None:
return js_option == 'latest'
# latest in query
if 'latest' in request.query or (
request.headers.get('Referer') and
'latest' in urlparse(request.headers['Referer']).query):
return True
# es5 in query
if 'es5' in request.query or (
request.headers.get('Referer') and
'es5' in urlparse(request.headers['Referer']).query):
return False
# non-auto option in config
if js_option != 'auto':
return js_option == 'latest'
useragent = request.headers.get('User-Agent')
return useragent and hass_frontend.version(useragent)
@callback
def websocket_handle_get_panels(hass, connection, msg):
"""Handle get panels command.
Async friendly.
"""
panels = {
panel:
connection.hass.data[DATA_PANELS][panel].to_response(
connection.hass, connection.request)
for panel in connection.hass.data[DATA_PANELS]}
connection.to_write.put_nowait(websocket_api.result_message(
msg['id'], panels))
|
the-stack_106_28447 | """Example of using a custom image env and model.
Both the model and env are trivial (and super-fast), so they are useful
for running perf microbenchmarks.
"""
import argparse
import ray
import ray.tune as tune
from ray.tune import sample_from
from ray.rllib.examples.env.fast_image_env import FastImageEnv
from ray.rllib.examples.models.fast_model import FastModel, TorchFastModel
from ray.rllib.models import ModelCatalog
parser = argparse.ArgumentParser()
parser.add_argument("--num-cpus", type=int, default=2)
parser.add_argument("--torch", action="store_true")
parser.add_argument("--stop-iters", type=int, default=200)
parser.add_argument("--stop-timesteps", type=int, default=100000)
if __name__ == "__main__":
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None)
ModelCatalog.register_custom_model(
"fast_model", TorchFastModel if args.torch else FastModel)
config = {
"env": FastImageEnv,
"compress_observations": True,
"model": {
"custom_model": "fast_model"
},
"num_gpus": 0,
"num_workers": 2,
"num_envs_per_worker": 10,
"num_data_loader_buffers": 1,
"num_aggregation_workers": 1,
"broadcast_interval": 50,
"rollout_fragment_length": 100,
"train_batch_size": sample_from(
lambda spec: 1000 * max(1, spec.config.num_gpus)),
"fake_sampler": True,
"use_pytorch": args.torch,
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
}
tune.run("IMPALA", config=config, stop=stop)
ray.shutdown()
|
the-stack_106_28452 | """
Extensions to the Mailbox and System classes for simulating message and/or
node failures.
"""
from collections import namedtuple
import random
from paxos import SystemConfig
from paxos.messages import ClientRequestMsg, AdjustWeightsMsg
from paxos.sim import Mailbox
from paxos.test import DebugMailbox
class FailTestMailbox(Mailbox):
"""
A Mailbox class that drops messages destined to each process with a
probability specified in the system config.
"""
def send(self, to, msg):
"""
Test a random number between [0,1) against the fail rate to determine
whether or not to deliver/drop the message.
"""
try:
fail_rate = self.config.fail_rates[to]
except (AttributeError, IndexError):
fail_rate = 0
if msg == "quit" or isinstance(msg, SystemConfig) or isinstance(msg, ClientRequestMsg) or \
isinstance(msg, AdjustWeightsMsg) or fail_rate == 0 or fail_rate <= random.random():
super(FailTestMailbox, self).send(to, msg)
else:
self.message_failed()
#print("****** Message to {} failed: {} ******".format(to, msg))
def message_failed(self):
"""Hook for accounting of failed messages."""
pass
class FailTestSystemConfig(SystemConfig):
def __init__(self, *args, fail_rate=None, fail_rates=None, **kwargs):
"""
If given, fail_rate should be a number between 0 and 1, inclusive, that
will be set as a global message failure rate for all processes.
If fail_rates is given, it should be a list of fail rates with length
equal to the number of processes in the system.
"""
super(FailTestSystemConfig, self).__init__(*args, **kwargs)
if fail_rates:
self.fail_rates = fail_rates
else:
if fail_rate is None:
fail_rate = 0
self.fail_rates = [fail_rate for _ in range(self.num_processes)]
class DebugFailTestMailbox(FailTestMailbox, DebugMailbox):
def __init__(self, *args, **kwargs):
super(DebugFailTestMailbox, self).__init__(*args, **kwargs)
self.num_failed = 0
def get_counts(self):
#Counts = namedtuple('Counts', ['sent', 'recv', 'fail', 'total'])
return (self.num_sent, self.num_recv, self.num_failed,
self.num_sent + self.num_failed)
def message_failed(self):
self.num_failed += 1
def run_test(config):
"""
"""
from test import DebugSystem
system = DebugSystem(config, mailbox=DebugFailTestMailbox)
system.start()
for x in range(config.num_test_requests):
# Always send to the same proposer, effective using that proposer as
# the leader.
to = 0
#system.mailbox.send(to, ClientRequestMsg(None, "Query {}".format(x+1)))
system.mailbox.send(to, ClientRequestMsg(None, x+1))
#time.sleep(random.random()/10)
system.shutdown_agents()
system.logger.print_results()
system.print_summary(log=True)
system.quit()
def run_failrate_tests():
for num_agents in (3, 5, 7, 9, 11):
for fail_rate in [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]:
config = FailTestSystemConfig(num_agents, num_agents, num_agents,
num_test_requests=1000, fail_rate=fail_rate)
run_test(config)
def run_reliability_example():
fail_rates = [0, 0, 0, 0, 0, 0, 0.2, 0.2, 0.4, 0.4, 0, 0, 0, 0, 0]
requests = 1000
config = FailTestSystemConfig(5, 5, 5, num_test_requests=requests,
fail_rates=fail_rates)
run_test(config)
config = FailTestSystemConfig(5, 5, 5, num_test_requests=requests,
fail_rates=fail_rates, weights=[3,2,2,1,1])
run_test(config)
def demo1():
fail_rates = [0, 0, 0, 0, 0, 0, 0.2, 0.2, 0.4, 0.4, 0, 0, 0, 0, 0]
requests = 100
config = FailTestSystemConfig(5, 5, 5,
num_test_requests=requests,
fail_rates=fail_rates)
run_test(config)
def demo2():
fail_rates = [0, 0, 0, 0, 0, 0, 0.2, 0.2, 0.4, 0.4, 0, 0, 0, 0, 0]
requests = 100
config = FailTestSystemConfig(5, 5, 5,
num_test_requests=requests,
fail_rates=fail_rates, weights=[3,2,2,1,1])
run_test(config)
if __name__ == "__main__":
#run_failrate_tests()
run_reliability_example()
# Global fail rate for simulating unreliable messaging.
#config = FailTestSystemConfig(3, 3, 3, fail_rate=0.1, num_test_requests=3)
# Individual fail rates of 0/1 to simulate some machines down.
#config = FailTestSystemConfig(3, 3, 3, num_test_requests=30, fail_rates=[0, 1, 1, 1, 0, 0, 0, 0, 1])
# Failed acceptors. With equal weights nothing is learned. If we adjust
# the rates to give the only alive acceptor a majority weight, then values
# are accepted and it learns all values.
#config = FailTestSystemConfig(3, 3, 3, num_test_requests=100, fail_rates=[0, 0, 0, 1, 1, 0, 0, 0, 0], weights=[1,1,3])
#config = FailTestSystemConfig(3, 3, 3, num_test_requests=4000, fail_rates=[.2, .2, .2, .8, .2, .1, .2, .2, .2], weights=[1,2,4])
#config = FailTestSystemConfig(6, 6, 6, num_test_requests=400, fail_rates=[.2, .2, .2, .2, .2, .2, .8, .2, .1, .8, .2, .1, .2, .2, .2, .2, .2, .2], weights=[1,2,4,1,2,4])
#config = FailTestSystemConfig(3, 3, 3, num_test_requests=1000, fail_rate=0)
|
the-stack_106_28453 | import os, sys, subprocess
import re, itertools, types
import logging, tempfile
from collections import defaultdict
try:
from pathlib2 import Path
except ImportError: # pragma: py3 only
from pathlib import Path
from clckwrkbdgr import utils
import clckwrkbdgr.winnt.registry as registry
def list_starts_with(main_list, prefix_list):
if len(main_list) < len(prefix_list):
return False
for part, pattern in zip(main_list, prefix_list):
if pattern == '*':
continue
if part != pattern:
return False
return True
def list_has_prefix(main_list, prefix_list):
for part, pattern in zip(main_list, prefix_list):
if pattern == '*':
continue
if part != pattern:
return False
return True
def parse_pattern_file(filename):
for line in Path(filename).read_text().splitlines():
values = re.split(r'[/\\]', line)
if values:
yield values
def save_snapshot(rootkey, dest_file, quiet=False):
args = ["REG", "EXPORT", rootkey, str(dest_file), '/y']
logging.debug(args)
rc = subprocess.call(args, stdout=subprocess.DEVNULL if quiet else None)
if rc != 0:
logging.error("Failed to extract registry snapshot of {0}!".format(rootkey))
return False
return True
def remove_WOW6432Node_entry(keypath):
return [entry for entry in keypath if entry != 'WOW6432Node']
def backup_registry_rootkeys(rootkeys, dest_file, exclude_patterns=None, quiet=False):
exclude_patterns = list(exclude_patterns) or []
tempfiles = []
for rootkey in rootkeys:
fhandle, filename = tempfile.mkstemp(suffix='.reg', prefix=rootkey)
os.close(fhandle)
filename = Path(filename)
tempfiles.append(filename)
if not save_snapshot(rootkey.upper(), filename, quiet=quiet):
logging.error("Failed to backup registry!")
return False
with open(str(dest_file), 'w', encoding='utf-16') as f:
parsed = itertools.chain.from_iterable(
registry.iterate_with_context(registry.parse(filename))
for filename in tempfiles
)
header_printed = False
for context, entry in parsed:
if context and any(list_starts_with(remove_WOW6432Node_entry(context), pattern) for pattern in exclude_patterns):
continue
if isinstance(entry, registry.Header):
if header_printed:
continue
else:
header_printed = True
f.write(str(entry))
for filename in tempfiles:
os.unlink(str(filename))
return True
def extract_part_of_snapshot(snapshot_file, exclude_patterns=None, include_patterns=None, output=None, quiet=False):
output = output or sys.stdout
exclude_patterns = list(exclude_patterns) or []
include_patterns = list(include_patterns) or []
parsed = registry.iterate_with_context(registry.parse(snapshot_file))
header_printed = False
for context, entry in parsed:
if context and exclude_patterns and any(list_starts_with(remove_WOW6432Node_entry(context), pattern) for pattern in exclude_patterns):
continue
if context and include_patterns and not any(list_has_prefix(remove_WOW6432Node_entry(context), pattern) for pattern in include_patterns):
continue
if isinstance(entry, registry.Header):
if header_printed:
continue
else:
header_printed = True
try:
output.write(str(entry))
except UnicodeError:
try:
output.write(str(entry).encode('utf-8', 'replace').decode('utf-8'))
except UnicodeError:
output.write(str(entry).encode('ascii', 'replace').decode('ascii'))
def mark_diff(lines, old_file_name):
entries = iter(registry.iterate_with_context(registry.parse(old_file_name)))
for line in lines:
if line.startswith('>') or line == '---' or line.startswith('<'):
yield line
continue
try:
diff_context = re.match(r'^(\d+)(?:[,](\d+))?([acd])(\d+)(?:[,](\d+))?$', line)
start, stop = diff_context.group(1), diff_context.group(2)
action = diff_context.group(3)
context, entry = next(entries)
prev_context = context
start = int(start)
while entry.line_number <= start:
prev_context = context
context, entry = next(entries)
affected = [prev_context]
if stop:
stop = int(stop)
while entry.line_number <= stop:
affected.append(context)
context, entry = next(entries)
if action == 'a':
for context in affected:
yield '# [NEW LINE(S) AFTER:] {0}'.format('\\'.join(context))
else:
for context in affected:
yield '# {0}'.format('\\'.join(context))
except Exception as e:
yield '# ERROR: ' + str(e)
yield line
import click
@click.group()
@click.option('--quiet', is_flag=True, help='Show less output')
@click.pass_context
def main(ctx, quiet=False):
""" Utilities for Windows Registry. """
ctx.obj = types.SimpleNamespace()
ctx.obj.quiet = quiet
@main.command()
@click.argument('snapshot_file')
@click.option('-e', '--exclude', help='File with regkey prefixes to be excluded. Paths case-sensitive and are separated with slash (either direct or backward). Everything under prefix will be excluded. Values are considered part of the exclude path too (as the last component).')
@click.pass_obj
@utils.exits_with_return_value
def backup(args, snapshot_file, exclude=None):
""" Creates backup file and filters it.
Destination location of created and filtered SNAPSHOT FILE.
"""
quiet = args.quiet
exclude_patterns = list(parse_pattern_file(exclude)) if exclude else []
return backup_registry_rootkeys(['HKCU', 'HKLM'], snapshot_file, exclude_patterns=exclude_patterns, quiet=quiet)
@main.command()
@click.argument('snapshot_file')
@click.option('-i', '--include', help='File with regkey prefixes to be included. Paths are case-sensitive and are separated with slash (either direct or backward). Everything under prefix will be included (except patterns that are --excluded). Values are considered part of the include path too (as the last component).')
@click.option('-e', '--exclude', help='File with regkey prefixes to be excluded. Paths are case-sensitive and are separated with slash (either direct or backward). Everything under prefix will be excluded. Values are considered part of the exclude path too (as the last component).')
@click.pass_obj
@utils.exits_with_return_value
def extract(args, snapshot_file, exclude=None, include=None):
""" Extracts part of registry snapshot file, prints to stdout.
Expects prepared registry SNAPSHOT FILE.
"""
if not exclude and not include:
logging.error("Expected at least one of the --exclude or --include arguments!")
return False
quiet = args.quiet
exclude_patterns = list(parse_pattern_file(exclude)) if exclude else []
include_patterns = list(parse_pattern_file(include)) if include else []
return extract_part_of_snapshot(snapshot_file, exclude_patterns=exclude_patterns, include_patterns=include_patterns, quiet=quiet)
@main.command()
@click.argument('snapshot_file')
@utils.exits_with_return_value
def sort(snapshot_file):
""" Sorts values within keys. Prints to stdout.
Expects prepared registry SNAPSHOT FILE.
"""
output = sys.stdout
for entry in umi.registry.sort(umi.registry.parse(snapshot_file)):
try:
output.write(str(entry))
except UnicodeError:
try:
output.write(str(entry).encode('utf-8', 'replace').decode('utf-8'))
except UnicodeError:
output.write(str(entry).encode('ascii', 'replace').decode('ascii'))
@main.command()
@click.argument('diff_file')
@click.argument('old_file')
@utils.exits_with_return_value
def filterdiff(diff_file, old_file):
""" Filters registry dump diff file and marks affected keys/values.
Prints to stdout.
Arguments:
1. Registry dump diff file.
2. Old registry dump file (read-only, for references).
"""
with open(diff_file) as f:
for line in mark_diff(f.read().splitlines(), old_file):
print(line)
@main.command()
@click.argument('snapshot_file')
@click.option('-n', '--topmost', type=int, default=40, help='Displays only this number of keys with largest sizes.')
@utils.exits_with_return_value
def stat(snapshot_file, topmost=40):
""" Collects and prints stat on registry snapshot file.
Currently supported are only the topmost keys with largest numbers of subkeys/values.
"""
all_sizes = defaultdict(int)
for context, entry in umi.registry.iterate_with_context(umi.registry.parse(Path(snapshot_file).expanduser())):
while context:
all_sizes[context] +=1
context = context[:-1]
all_sizes = sorted(all_sizes.items(), key=lambda x: x[-1], reverse=True)
for entry, size in all_sizes[:max(1, topmost)]:
print(size, entry)
@main.command()
@click.option('--diff-command', help='External diff command (GNUdiff-compatible). By default will use simple `diff` on the PATH.')
@click.argument('old_file')
@click.argument('new_file')
@utils.exits_with_return_value
def diff(old_file, new_file, diff_command=None):
""" Prints diff of two registry dump file.
Arguments:
1. Registry dump diff file.
2. Old registry dump file (read-only, for references).
"""
p = subprocess.Popen([diff_command or 'diff', '-a', old_file, new_file], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
rc = p.wait()
if stdout:
stdout = stdout.replace(b'\x00', b'')
stdout = stdout.decode('utf-8', 'replace')
for line in mark_diff(stdout.splitlines(), old_file):
try:
print(line)
except UnicodeError:
print(repr(line).encode('ascii', 'replace').decode('ascii', 'replace'))
return rc
if __name__ == '__main__':
main()
|
the-stack_106_28456 | #!/usr/bin/env python
from __future__ import print_function
"""objectivefunction
Objective function utilities for inversions
"""
from setuptools import find_packages
try:
from numpy.distutils.core import setup
except Exception:
raise Exception(
"Install requires numpy. "
"If you use conda, `conda install numpy` "
"or you can use pip, `pip install numpy`"
)
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Natural Language :: English',
]
with open("README.rst") as f:
LONG_DESCRIPTION = ''.join(f.readlines())
# def configuration(parent_package='', top_path=None):
# from numpy.distutils.misc_util import Configuration
# config = Configuration(None, parent_package, top_path)
# config.set_options(ignore_setup_xxx_py=True,
# assume_default_configuration=True,
# delegate_options_to_subpackages=True,
# quiet=True)
# config.add_subpackage('objectivefunction')
# return config
setup(
name="objectivefunction",
version="0.0.3b0",
install_requires=[
'numpy>=1.7',
'scipy>=0.13',
'matplotlib',
'properties>=0.3.6b0',
],
author="Open Geophysics Developers",
author_email="[email protected]",
description="Objective function utilities for inversions",
long_description=LONG_DESCRIPTION,
license="MIT",
keywords="optimization, inversion, objective functions",
url="http://simpeg.xyz/",
download_url="https://github.com/opengeophysics/objectivefunction",
classifiers=CLASSIFIERS,
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
use_2to3=False,
setup_requires=['numpy'],
# configuration=configuration
)
|
the-stack_106_28458 | # Copyright (C) 2017 Datera Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service using Datera Elastic Data Fabric
Architecture:
Container/Bucket --> Datera EDF Volume (single volume AppInstance)
Object --> Datera EDF Volume Snapshot
ObjectID --> Datera EDF Volume Snapshot Timestamp
Essentially, we create a Volume as a "Bucket", then write data to that volume
and snapshot it. The snapshots serves as our "Object" analogue. We can
restore a backup by restoring snapshots in reverse order and reading the data
back.
Since our minimum volume size is 1 GB, we'll use that as our minimum chunk size
Multiplexing:
This version of the driver also handles multiplexing between different
drivers. We determine the driver type by something!
"""
import contextlib
import hashlib
import os
import shlex
import six
import struct
import subprocess
import time
import uuid
import eventlet
from eventlet.green import threading
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from cinder.backup import chunkeddriver
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from os_brick import exception as brick_exception
import cinder.volume.drivers.datera.datera_common as datc
LOG = logging.getLogger(__name__)
bd_opts = [
cfg.StrOpt('backup_datera_san_ip',
default=None,
help='(REQUIRED) IP address of Datera EDF backend'),
cfg.StrOpt('backup_datera_san_login',
default=None,
help='(REQUIRED) Username for Datera EDF backend account'),
cfg.StrOpt('backup_datera_san_password',
default=None,
help='(REQUIRED) Password for Datera EDF backend account'),
cfg.StrOpt('backup_datera_tenant_id',
default='/root',
help='Datera tenant_id under which backup should be stored'),
cfg.IntOpt('backup_datera_chunk_size',
default=1,
help='Total chunk size (in GB, min 1 GB) to use for backup'),
cfg.BoolOpt('backup_datera_progress_timer',
default=False,
help='Enable progress timer for backup'),
cfg.IntOpt('backup_datera_replica_count',
default=3,
help='Number of replicas for container'),
cfg.StrOpt('backup_datera_placement_mode',
default='hybrid',
help='Options: hybrid, single_flash, all_flash'),
cfg.StrOpt('backup_datera_api_port',
default='7717',
help='Datera API port.'),
cfg.ListOpt('backup_datera_secondary_backup_drivers',
default=[],
help='Secondary drivers to manage with this driver. This is '
'done as a way to simulate a scheduler for backups. '
'Takes the form:\n'
'["cinder.backup.drivers.driver1",\n'
' "cinder.backup.drivers.driver2"]'),
cfg.BoolOpt('backup_datera_debug',
default=False,
help="True to set function arg and return logging"),
cfg.IntOpt('backup_datera_503_timeout',
default='120',
help='Timeout for HTTP 503 retry messages'),
cfg.IntOpt('backup_datera_503_interval',
default='5',
help='Interval between 503 retries'),
cfg.BoolOpt('backup_datera_disable_profiler',
default=False,
help="Set to True to disable profiling in the Datera driver"),
cfg.BoolOpt('backup_driver_use_ssl', default=False,
help="Set True to use SSL. Must also provide cert options"),
cfg.StrOpt('backup_driver_client_cert',
default=None,
help="Path to client certificate file"),
cfg.StrOpt('backup_driver_client_cert_key',
default=None,
help="Path to client certificate key file")
]
CONF = cfg.CONF
CONF.register_opts(bd_opts)
METADATA = "_metadata"
SHA256 = "_sha256file"
PREFIX = "DAT"
SI_NAME = 'storage-1'
VOL_NAME = 'volume-1'
PACK = "iQ32sxx"
TOTAL_OFFSET = 50
@interface.backupdriver
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class DateraBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete of backup objects within Datera EDF.
Version history:
1.0.0 - Initial driver
1.0.1 - Added secondary backup driver dispatching/multiplexing
2018.5.1.0 - Switched to date-based versioning scheme
"""
VERSION = '2018.5.1.0'
HEADER_DATA = {'Datera-Driver': 'OpenStack-Backup-{}'.format(VERSION)}
def __init__(self, context, db_driver=None):
# Ensure we have room for offset headers
chunk_size = CONF.backup_datera_chunk_size * units.Gi - TOTAL_OFFSET
# We don't care about chunks any smaller than our normal chunk size
sha_size = chunk_size
container_name = "replace-me"
super(DateraBackupDriver, self).__init__(context, chunk_size,
sha_size, container_name,
db_driver)
self.ctxt = context
self.db_driver = db_driver
self.support_force_delete = True
self._backup = None
self.san_ip = CONF.backup_datera_san_ip
self.username = CONF.backup_datera_san_login
self.password = CONF.backup_datera_san_password
self.api_port = CONF.backup_datera_api_port
self.driver_use_ssl = CONF.backup_driver_use_ssl
self.driver_client_cert = CONF.backup_driver_client_cert
self.driver_client_cert_key = CONF.backup_driver_client_cert_key
self.replica_count = CONF.backup_datera_replica_count
self.placement_mode = CONF.backup_datera_placement_mode
self.driver_strs = CONF.backup_datera_secondary_backup_drivers
self.driver = None
self.drivers = {}
self.type = 'datera'
self.cluster_stats = {}
self.datera_api_token = None
self.interval = CONF.backup_datera_503_interval
self.retry_attempts = (CONF.backup_datera_503_timeout /
self.interval)
self.driver_prefix = str(uuid.uuid4())[:4]
self.datera_debug = CONF.backup_datera_debug
self.datera_api_versions = []
if self.datera_debug:
utils.setup_tracing(['method'])
self.tenant_id = CONF.backup_datera_tenant_id
if self.tenant_id and self.tenant_id.lower() == 'none':
self.tenant_id = None
self.api_check = time.time()
self.api_cache = []
self.api_timeout = 0
self.do_profile = not CONF.backup_datera_disable_profiler
self.thread_local = threading.local()
self.thread_local.trace_id = ""
self._populate_secondary_drivers()
datc.register_driver(self)
self._check_options()
def _populate_secondary_drivers(self):
for dstr in self.driver_strs:
driver = importutils.import_module(dstr)
self.drivers[dstr.split(".")[-1]] = driver
@staticmethod
def _execute(cmd):
parts = shlex.split(cmd)
putils.execute(*parts, root_helper=utils.get_root_helper(),
run_as_root=True)
def login(self):
"""Use the san_login and san_password to set token."""
body = {
'name': self.username,
'password': self.password
}
# Unset token now, otherwise potential expired token will be sent
# along to be used for authorization when trying to login.
self.datera_api_token = None
try:
LOG.debug('Getting Datera auth token.')
results = self._issue_api_request(
'login', 'put', body=body, sensitive=True, api_version='2.1',
tenant=None)
self.datera_api_token = results['key']
except exception.NotAuthorized:
with excutils.save_and_reraise_exception():
LOG.error('Logging into the Datera cluster failed. Please '
'check your username and password set in the '
'cinder.conf and start the cinder-volume '
'service again.')
def _check_options(self):
req_opts = ('backup_datera_san_ip',
'backup_datera_san_login',
'backup_datera_san_password')
no_opts = filter(lambda opt: not getattr(CONF, opt, None), req_opts)
if no_opts:
raise exception.InvalidInput(
reason=_('Missing required opts %s') % no_opts)
def _create_volume(self, name, size):
tenant = self.tenant_id
app_params = (
{
'create_mode': "openstack",
'name': name,
'access_control_mode': 'deny_all',
'storage_instances': [
{
'name': SI_NAME,
'volumes': [
{
'name': VOL_NAME,
'size': size,
'placement_mode': self.placement_mode,
'replica_count': self.replica_count,
'snapshot_policies': [
]
}
]
}
]
})
self._issue_api_request(datc.URL_TEMPLATES['ai'](), 'post',
body=app_params, api_version='2.1',
tenant=tenant)
def _detach_volume(self, name):
url = datc.URL_TEMPLATES['ai_inst']().format(name)
data = {
'admin_state': 'offline',
'force': True
}
try:
self._issue_api_request(url, method='put', body=data,
api_version='2.1', tenant=self.tenant_id)
except exception.NotFound:
msg = _("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, name)
def _delete_volume(self, name):
self._detach_volume(name)
try:
self._issue_api_request(
datc.URL_TEMPLATES['ai_inst']().format(name), 'delete',
api_version='2.1', tenant=self.tenant_id)
except (exception.DateraAPIException, exception.NotFound):
LOG.debug("Couldn't find volume: {}".format(name))
def _volume_exists(self, bname):
try:
self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
bname), 'get', api_version='2.1', tenant=self.tenant_id)
return True
except exception.NotFound:
return False
def _create_snapshot(self, bname):
snap = self._issue_api_request(datc.URL_TEMPLATES['vol_inst'](
SI_NAME, VOL_NAME).format(bname) + '/snapshots', 'post',
body={}, api_version='2.1', tenant=self.tenant_id)
# Polling the snapshot is absolutely necessary otherwise we hit race
# conditions that can cause the snapshot to fail
self._snap_poll_2_1(snap['path'].strip("/"))
return snap['data']
def _restore_snapshot(self, bname, timestamp):
url = datc.URL_TEMPLATES['ai_inst']().format(bname)
self._detach_volume(bname)
self._issue_api_request(datc.URL_TEMPLATES['vol_inst'](
SI_NAME, VOL_NAME).format(bname), 'put',
body={'restore_point': timestamp}, api_version='2.1',
tenant=self.tenant_id)
data = {
'admin_state': 'online'
}
self._issue_api_request(
url, method='put', body=data, api_version='2.1',
tenant=self.tenant_id)
# Trying a sleep here to give the snapshot a moment to restore
LOG.debug("Sleeping for 5s to give the snapshot a chance")
eventlet.sleep(5)
def _list_snapshots(self, bname):
snaps = self._issue_api_request(datc.URL_TEMPLATES['vol_inst'](
SI_NAME, VOL_NAME).format(bname) + '/snapshots', 'get',
api_version='2.1', tenant=self.tenant_id)
return snaps['data']
def _get_snapshot(self, bname, timestamp):
return self._issue_api_request(datc.URL_TEMPLATES['vol_inst'](
SI_NAME, VOL_NAME).format(
bname) + '/snapshots/{}'.format(timestamp), 'get',
api_version='2.1', tenant=self.tenant_id)
def _delete_snapshot(self, bname, timestamp):
for snapshot in self._list_snapshots(bname):
if snapshot['utc_ts'] == timestamp:
self._issue_api_request(datc.URL_TEMPLATES['vol_inst'](
SI_NAME, VOL_NAME).format(bname) + '/snapshots/{'
'}'.format(timestamp), 'delete', api_version='2.1')
return
LOG.debug('Did not find snapshot {} to delete'.format(timestamp))
def _get_sis_iqn_portal(self, bname):
iqn = None
portal = None
url = datc.URL_TEMPLATES['ai_inst']().format(bname)
data = {
'admin_state': 'online'
}
app_inst = self._issue_api_request(
url, method='put', body=data, api_version='2.1',
tenant=self.tenant_id)['data']
storage_instances = app_inst["storage_instances"]
si = storage_instances[0]
portal = si['access']['ips'][0] + ':3260'
iqn = si['access']['iqn']
return storage_instances, iqn, portal
def _register_acl(self, bname, initiator, storage_instances):
initiator_name = "OpenStack_{}_{}".format(
self.driver_prefix, str(uuid.uuid4())[:4])
found = False
if not found:
data = {'id': initiator, 'name': initiator_name}
# Try and create the initiator
# If we get a conflict, ignore it
self._issue_api_request("initiators",
method="post",
body=data,
conflict_ok=True,
api_version='2.1',
tenant=self.tenant_id)
initiator_path = "/initiators/{}".format(initiator)
# Create ACL with initiator for storage_instances
for si in storage_instances:
acl_url = (datc.URL_TEMPLATES['si']() +
"/{}/acl_policy").format(bname, si['name'])
existing_acl = self._issue_api_request(acl_url,
method="get",
api_version='2.1',
tenant=self.tenant_id)[
'data']
data = {}
data['initiators'] = existing_acl['initiators']
data['initiators'].append({"path": initiator_path})
data['initiator_groups'] = existing_acl['initiator_groups']
self._issue_api_request(acl_url,
method="put",
body=data,
api_version='2.1',
tenant=self.tenant_id)
self._si_poll(bname)
def _si_poll(self, bname):
TIMEOUT = 10
retry = 0
check_url = datc.URL_TEMPLATES['si_inst'](SI_NAME).format(bname)
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
si = self._issue_api_request(check_url,
api_version='2.1',
tenant=self.tenant_id)['data']
if si['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
def _snap_poll_2_1(self, url):
tenant = self.tenant_id
eventlet.sleep(datc.DEFAULT_SNAP_SLEEP)
TIMEOUT = 20
retry = 0
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
snap = self._issue_api_request(url,
api_version='2.1',
tenant=tenant)['data']
if snap['op_state'] == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Snapshot not ready.'))
@contextlib.contextmanager
def _connect_target(self, container):
connector = None
try:
sis, iqn, portal = self._get_sis_iqn_portal(container)
conn = {'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_portal': portal,
'target_lun': 0,
'volume_id': None,
'discard': False}}
connector = utils.brick_get_connector(
conn['driver_volume_type'],
use_multipath=False,
device_scan_attempts=10,
conn=conn)
# Setup ACL
initiator = connector.get_initiator()
self._register_acl(container, initiator, sis)
# Attach Target
attach_info = {}
attach_info['target_portal'] = portal
attach_info['target_iqn'] = iqn
attach_info['target_lun'] = 0
retries = 10
while True:
try:
attach_info.update(
connector.connect_volume(conn['data']))
break
except brick_exception.FailedISCSITargetPortalLogin:
retries -= 1
if not retries:
LOG.error("Could not log into portal before end of "
"polling period")
raise
LOG.debug("Failed to login to portal, retrying")
eventlet.sleep(2)
device_path = attach_info['path']
yield device_path
finally:
# Close target connection
if connector:
# Best effort disconnection
try:
connector.disconnect_volume(attach_info, attach_info)
except Exception:
pass
def _parse_name(self, name):
return int(name.split("-")[-1])
def _get_driver(self):
if not self.driver:
supported_list = []
for dstr in self.driver_strs:
supported_list.append(dstr.split(".")[-1])
name = (self._backup['display_name'].lower()
if self._backup['display_name'] else None)
if not name or 'datera' in name:
self.type = 'datera'
return
for supported in supported_list:
if supported in name:
self.type = supported
self.driver = self.drivers[self.type].get_backup_driver(
self.ctxt)
if not self.driver:
raise EnvironmentError(
"Unsupported driver: {}, display name of backup must "
"contain name of driver to use. Supported drivers: {}"
"".format(name, self.drivers.keys()))
return self.driver
def put_container(self, bucket):
"""Create the bucket if not exists."""
driver = self._get_driver()
if not driver:
if self._volume_exists(bucket):
return
else:
vol_size = CONF.backup_datera_chunk_size
self._create_volume(bucket, vol_size)
return
return driver.put_container(bucket)
def get_container_entries(self, bucket, prefix):
"""Get bucket entry names."""
driver = self._get_driver()
if not driver:
return ["-".join((prefix, "{:05d}".format(i + 1)))
for i, _ in enumerate(self._list_snapshots(bucket))][:-2]
return driver.get_container_entries(bucket, prefix)
def get_object_writer(self, bucket, object_name, extra_metadata=None):
"""Return a writer object.
Returns a writer object that stores a chunk of volume data in a
Datera volume
"""
driver = self._get_driver()
if not driver:
return DateraObjectWriter(bucket, object_name, self)
return driver.get_object_reader(bucket, object_name, extra_metadata)
def get_object_reader(self, bucket, object_name, extra_metadata=None):
"""Return reader object.
Returns a reader object that retrieves a chunk of backed-up volume data
from a Datera EDF object store.
"""
driver = self._get_driver()
if not driver:
return DateraObjectReader(bucket, object_name, self)
return driver.get_object_reader(bucket, object_name, extra_metadata)
def delete_object(self, bucket, object_name):
"""Deletes a backup object from a Datera EDF object store."""
driver = self._get_driver()
if not driver:
return self._delete_snapshot(bucket, object_name)
return driver.delete_object(bucket, object_name)
def backup(self, backup, volume_file, backup_metadata=False):
self._backup = backup
driver = self._get_driver()
if not driver:
# We should always backup metadata in the Datera driver
# It costs practically nothing and Tempest expects metadata to
# be backed up.
return super(DateraBackupDriver, self).backup(
backup, volume_file, backup_metadata=True)
return driver.backup(backup, volume_file, backup_metadata)
def restore(self, backup, volume_id, volume_file):
self._backup = backup
driver = self._get_driver()
if not driver:
return super(DateraBackupDriver, self).restore(
backup, volume_id, volume_file)
return driver.restore(backup, volume_id, volume_file)
# def get_metadata(self, volume_id):
# driver = self._get_driver()
# if not driver:
# return super(DateraBackupDriver, self).get_metadata(volume_id)
# return driver.get_metadata(volume_id)
# def put_metadata(self, volume_id, json_metadata):
# driver = self._get_driver()
# if not driver:
# return super(DateraBackupDriver, self).put_metadata(
# volume_id, json_metadata)
# return driver.put_metadata(volume_id, json_metadata)
def delete(self, backup):
self._backup = backup
driver = self._get_driver()
if not driver:
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
'prefix: %(pre)s.',
{'id': backup['id'],
'cont': container,
'pre': object_prefix})
if container is not None:
self._delete_volume(container)
LOG.debug('delete %s finished.', backup['id'])
return
return driver.delete(backup)
def export_record(self, backup):
"""Export driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
and return it here as a dictionary so it can be serialized into a
string.
Default backup driver implementation has no extra information.
:param backup: backup object to export
:returns: driver_info - dictionary with extra information
"""
self._backup = backup
driver = self._get_driver()
if not driver:
return super(DateraBackupDriver, self).export_record(backup)
return driver.export_record(backup)
def import_record(self, backup, driver_info):
"""Import driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
since it will be called with the extra information that was provided by
export_record when exporting the backup.
Default backup driver implementation does nothing since it didn't
export any specific data in export_record.
:param backup: backup object to export
:param driver_info: dictionary with driver specific backup record
information
:returns: nothing
"""
self._backup = backup
driver = self._get_driver()
if not driver:
return super(DateraBackupDriver, self).import_record(
backup, driver_info)
return driver.import_record(backup, driver_info)
def _generate_object_name_prefix(self, backup):
"""Generates a Datera EDF backup object name prefix."""
driver = self._get_driver()
if not driver:
return PREFIX
return driver._generate_object_name_prefix(self, backup)
def update_container_name(self, backup, bucket):
"""Use the bucket name as provided - don't update."""
driver = self._get_driver()
if not driver:
if not backup['container']:
return "-".join(("BACKUP", str(self._backup['id'])))
else:
return
return driver.update_container_name(self, backup, bucket)
def get_extra_metadata(self, backup, volume):
"""Datera EDF driver does not use any extra metadata."""
driver = self._get_driver()
if not driver:
return
return driver.get_extra_metadata(backup, volume)
class DateraObjectWriter(object):
def __init__(self, container, object_name, driver):
LOG.debug("Object writer. container: %(container)s, "
"object_name: %(object)s",
{'container': container,
'object': object_name})
self.container = container
self.object_name = object_name
self.driver = driver
self.data = None
self.write_metadata = True if object_name.endswith(METADATA) else False
self.write_sha256 = True if object_name.endswith(SHA256) else False
if self.write_metadata and self.write_sha256:
raise ValueError("We're misunderstanding the requirements...")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def write(self, data):
# Assuming a single write
self.data = data
def close(self):
LOG.debug("Writing backup.Container: %(container)s, "
"object_name: %(object)s",
{'container': self.container,
'object': self.object_name})
with self.driver._connect_target(self.container) as device_path:
# Write backup data
self.driver._execute("chmod o+w {}".format(device_path))
f = os.open(device_path, os.O_SYNC | os.O_WRONLY)
# Write number, length and MD5 to initial offset
if self.write_sha256:
n = -2
elif self.write_metadata:
n = -1
else:
n = self.driver._parse_name(self.object_name)
l = len(self.data)
h = hashlib.md5(self.data).hexdigest()
os.write(f, struct.pack(PACK, n, l, h))
LOG.debug("Writing Headers.\n Number: %(number)s\n"
"Length: %(length)s\n"
"MD5: %(md5)s",
{'number': n,
'length': len(self.data),
'md5': h})
# Write actual data
# os.lseek(f, TOTAL_OFFSET, 0)
os.write(f, self.data)
# If we're writing a really small amount of data (< 1 KiB), then
# we should write additional data to ensure the block device
# recognizes that we wrote data. We'll just write 5 KiB of random
# data after the data we care about so as to not impact performance
if l <= 1 * units.Ki:
LOG.debug("Writing additional data to ensure write takes")
# Pad 8 bytes for visual debugging
os.write(f, "\x00" * 8)
# Random data
os.write(f, os.urandom(5 * units.Ki))
os.close(f)
# for short writes we need to let the cache flush
subprocess.check_call("sync")
# Then sleep so the flush occurs
eventlet.sleep(3)
self.driver._execute("chmod o-w {}".format(device_path))
self.driver._create_snapshot(self.container)
class DateraObjectReader(object):
def __init__(self, container, object_name, driver):
LOG.debug("Object reader. Container: %(container)s, "
"object_name: %(object)s",
{'container': container,
'object': object_name})
self.container = container
self.object_name = object_name
self.driver = driver
self.read_metadata = True if object_name.endswith(METADATA) else False
self.read_sha256 = True if object_name.endswith(SHA256) else False
if self.read_metadata and self.read_sha256:
raise ValueError("We're misunderstanding the requirements...")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return
def read(self):
LOG.debug("Reading backup. Container: %(container)s, "
"object_name: %(object)s",
{'container': self.container,
'object': self.object_name})
data = self.driver._list_snapshots(self.container)
if self.read_sha256:
snap = data[-2]["utc_ts"]
elif self.read_metadata:
snap = data[-1]["utc_ts"]
else:
# Backups start at 00001, convert to zero index
snap = data[self.driver._parse_name(self.object_name) - 1][
"utc_ts"]
LOG.debug("Restoring Snapshot: {}".format(snap))
self.driver._restore_snapshot(self.container, snap)
# self.driver._delete_snapshot(self.container, most_recent)
with self.driver._connect_target(self.container) as device_path:
# Read backup data
self.driver._execute("chmod o+r {}".format(device_path))
f = os.open(device_path, os.O_RDONLY)
# Read headers
rawh = os.read(f, TOTAL_OFFSET)
n, l, h = struct.unpack(PACK, rawh)
LOG.debug("Reading Headers.\n Number: %(number)s\n"
"Length: %(length)s\n"
"MD5: %(md5)s",
{'number': n,
'length': l,
'md5': h})
# Read data
data = os.read(f, l)
os.close(f)
# Compare hashes
newh = hashlib.md5(data).hexdigest()
if newh != h:
raise ValueError("Data hash read off backup doesn't match "
"calculated hash. new hash: %(new)s "
"read hash: %(read)s",
{'new': newh,
'read': h})
self.driver._execute("chmod o-r {}".format(device_path))
return data
def get_backup_driver(context, db_driver=None):
return DateraBackupDriver(context, db_driver=db_driver)
|
the-stack_106_28459 |
from .inceptionv3 import inception_v3
from .vgg import vggnet
from .resnet import resnet
import torch
def initialise_model(args):
# create model
if args.arch.find('inceptionV3') > -1:
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
print("NUmber of classes will be ", args.num_classes)
model = inception_v3(num_classes=args.num_classes, pretrained=True, global_models_dir=args.global_models_dir, seq_len=args.seq_len)
else:
print("=> creating model '{}'".format(args.arch))
model = inception_v3(num_classes=args.num_classes, seq_len=args.seq_len)
elif args.arch.find('vgg') > -1:
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = vggnet(num_classes=args.num_classes, pretrained=True, global_models_dir=args.global_models_dir, seq_len=args.seq_len)
else:
print("=> creating model '{}'".format(args.arch))
model = vggnet(num_classes=args.num_classes, seq_len=args.seq_len)
elif args.arch[:6] == 'resnet':
modelperms = {'resnet18': [2, 2, 2, 2], 'resent34': [3, 4, 6, 3], 'resnet50': [3, 4, 6, 3],
'resnet101': [3, 4, 23, 3], 'resent152': [3, 8, 36, 3]}
model = resnet(modelperms[args.arch], args.arch, args.seq_len, args.num_classes)
if args.pretrained:
load_dict = torch.load(args.global_models_dir + '/' + args.arch+'.pth')
# print(load_dict.keys(), '\n\n', model.state_dict().keys())
model.load_my_state_dict(load_dict, args.seq_len)
else:
raise Exception('Spcify the correct model type')
if args.ngpu>1:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
else:
print('Apply DataParallel')
model = torch.nn.DataParallel(model)
model.cuda()
# define loss function (criterion) and optimizer
criterion = torch.nn.CrossEntropyLoss().cuda()
return model, criterion |
the-stack_106_28460 | # qubit number=4
# total number=13
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(2) # number=10
prog += CZ(1,2) # number=11
prog += H(2) # number=12
prog += X(2) # number=6
prog += H(3) # number=4
prog += Y(3) # number=5
prog += CNOT(1,0) # number=7
prog += CNOT(1,0) # number=8
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil274.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_106_28461 | import FWCore.ParameterSet.Config as cms
process = cms.Process("EcalSimRawData")
#simulation of raw data. Defines the ecalSimRawData module:
process.load("SimCalorimetry.EcalElectronicsEmulation.EcalSimRawData_cfi")
# Geometry
#
process.load("Geometry.CMSCommonData.cmsSimIdealGeometryXML_cfi")
# Calo geometry service model
process.load("Geometry.CaloEventSetup.CaloGeometry_cff")
# Description of EE trigger tower map
process.load("Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
# number of events to generate:
input = cms.untracked.int32(2)
)
process.ecalSimpleProducer = cms.EDProducer("EcalSimpleProducer",
# string formula = "200+(4<=isample0)*(isample0<=6)*16*(1.)+1<<12"
formula = cms.string(''),
#TT samples:
# TT sample format:
# |11 | 10 | 9 - 0 |
# |gap|fgvb| Et |
# energy set to TT id in first event and then incremented at each event:
tpFormula = cms.string('itt0+ievt0'),
verbose = cms.untracked.bool(False)
)
process.p = cms.Path(process.ecalSimpleProducer*process.ecalSimRawData)
process.ecalSimRawData.trigPrimProducer = 'ecalSimpleProducer'
process.ecalSimRawData.tcpDigiCollection = ''
process.ecalSimRawData.tcc2dccData = False
process.ecalSimRawData.srp2dccData = False
process.ecalSimRawData.fe2dccData = False
process.ecalSimRawData.tpVerbose = False
|
the-stack_106_28463 | # ------------------------------------------------------------------------------
# Portions of this code are from
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
from .utils import _tranpose_and_gather_feat, _nms, _topk
import torch.nn.functional as F
from utils.image import draw_umich_gaussian
def _slow_neg_loss(pred, gt):
'''focal loss from CornerNet'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _neg_loss(pred, gt):
''' Reimplemented focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _only_neg_loss(pred, gt):
gt = torch.pow(1 - gt, 4)
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * gt
return neg_loss.sum()
class FastFocalLoss(nn.Module):
'''
Reimplemented focal loss, exactly the same as the CornerNet version.
Faster and costs much less memory.
'''
def __init__(self, opt=None):
super(FastFocalLoss, self).__init__()
self.only_neg_loss = _only_neg_loss
def forward(self, out, target, ind, mask, cat):
'''
Arguments:
out, target: B x C x H x W
ind, mask: B x M
cat (category id for peaks): B x M
'''
neg_loss = self.only_neg_loss(out, target)
pos_pred_pix = _tranpose_and_gather_feat(out, ind) # B x M x C
pos_pred = pos_pred_pix.gather(2, cat.unsqueeze(2)) # B x M
num_pos = mask.sum()
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2) * \
mask.unsqueeze(2)
pos_loss = pos_loss.sum()
if num_pos == 0:
return - neg_loss
return - (pos_loss + neg_loss) / num_pos
# def forward(self, out, target, ind, mask, cat, tracking_mask):
# """
#
# :param out: Batch x Class x H x W, value=predict pos probability (0~1)
# :param target: Batch x Class x H x W, value=predict pos probability (0 or 1)
# :param ind: B x M, value = index
# :param mask: B x M, value = 1 or 0
# :param cat: B x M, value = class
# :param tracking_mask: B x M, value = mask of whether the target exist in previous frame
# :return:
# """
# # negative samples loss (non-one)
# neg_loss = self.only_neg_loss(out, target)
#
# # predicted position
# pos_pred_pix = _tranpose_and_gather_feat(out, ind) # B x M x C, value = prediction pos probability
#
# # cat.unsqueeze(2) : B x M -> B x M x 1, value = class, used to guide find probability of different class
# pos_pred = pos_pred_pix.gather(2, cat.unsqueeze(2)) # B x M, value = probability
# tracking_mask = tracking_mask[:, :, 0]
# mask_match = tracking_mask
# mask_new = (1 - tracking_mask) * mask
# num_pos = mask.sum()
# pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2) * mask_match.unsqueeze(2)
# pos_loss += 10 * torch.log(pos_pred) * torch.pow(1 - pos_pred, 2) * mask_new.unsqueeze(2)
# pos_loss = pos_loss.sum()
# if num_pos == 0:
# return - neg_loss
# return - (pos_loss + neg_loss) / num_pos
def _reg_loss(regr, gt_regr, mask):
''' L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
'''
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
regr = regr * mask
gt_regr = gt_regr * mask
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, reduction='sum')
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
class RegWeightedL1Loss(nn.Module):
def __init__(self):
super(RegWeightedL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _tranpose_and_gather_feat(output, ind)
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, reduction='sum')
loss = loss / (mask.sum() + 1e-4)
return loss
class WeightedBCELoss(nn.Module):
def __init__(self):
super(WeightedBCELoss, self).__init__()
self.bceloss = torch.nn.BCEWithLogitsLoss(reduction='none')
def forward(self, output, mask, ind, target):
# output: B x F x H x W
# ind: B x M
# mask: B x M x F
# target: B x M x F
pred = _tranpose_and_gather_feat(output, ind) # B x M x F
loss = mask * self.bceloss(pred, target)
loss = loss.sum() / (mask.sum() + 1e-4)
return loss
class BinRotLoss(nn.Module):
def __init__(self):
super(BinRotLoss, self).__init__()
def forward(self, output, mask, ind, rotbin, rotres):
pred = _tranpose_and_gather_feat(output, ind)
loss = compute_rot_loss(pred, rotbin, rotres, mask)
return loss
def compute_res_loss(output, target):
return F.smooth_l1_loss(output, target, reduction='elementwise_mean')
def compute_bin_loss(output, target, mask):
mask = mask.expand_as(output)
output = output * mask.float()
return F.cross_entropy(output, target, reduction='elementwise_mean')
def compute_rot_loss(output, target_bin, target_res, mask):
# output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
# bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
# target_bin: (B, 128, 2) [bin1_cls, bin2_cls]
# target_res: (B, 128, 2) [bin1_res, bin2_res]
# mask: (B, 128, 1)
output = output.view(-1, 8)
target_bin = target_bin.view(-1, 2)
target_res = target_res.view(-1, 2)
mask = mask.view(-1, 1)
loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask)
loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask)
loss_res = torch.zeros_like(loss_bin1)
if target_bin[:, 0].nonzero().shape[0] > 0:
idx1 = target_bin[:, 0].nonzero()[:, 0]
valid_output1 = torch.index_select(output, 0, idx1.long())
valid_target_res1 = torch.index_select(target_res, 0, idx1.long())
loss_sin1 = compute_res_loss(
valid_output1[:, 2], torch.sin(valid_target_res1[:, 0]))
loss_cos1 = compute_res_loss(
valid_output1[:, 3], torch.cos(valid_target_res1[:, 0]))
loss_res += loss_sin1 + loss_cos1
if target_bin[:, 1].nonzero().shape[0] > 0:
idx2 = target_bin[:, 1].nonzero()[:, 0]
valid_output2 = torch.index_select(output, 0, idx2.long())
valid_target_res2 = torch.index_select(target_res, 0, idx2.long())
loss_sin2 = compute_res_loss(
valid_output2[:, 6], torch.sin(valid_target_res2[:, 1]))
loss_cos2 = compute_res_loss(
valid_output2[:, 7], torch.cos(valid_target_res2[:, 1]))
loss_res += loss_sin2 + loss_cos2
return loss_bin1 + loss_bin2 + loss_res
class IDloss(nn.Module):
def __init__(self):
super(IDloss, self).__init__()
self.gamma = 2.0
self.alpha = 0.25
def forward(self, cur_reid, pre_reid, cur_inds, pre_inds, tracking_mask):
alpha = self.alpha
gamma = self.gamma
tracking_mask = tracking_mask[:, :, 0]
a = cur_reid.shape[2]*cur_reid.shape[3]
# circumstance index
pre_circum_ind = torch.randint_like(pre_inds, low=0, high=a, dtype=pre_inds.dtype)
cur_circum_ind = torch.randint_like(cur_inds, low=0, high=a, dtype=pre_inds.dtype)
# fusion index
cur_inds = cur_inds * tracking_mask.long() + cur_circum_ind * (1-tracking_mask.long())
pre_inds = pre_inds * tracking_mask.long() + pre_circum_ind * (1-tracking_mask.long())
# extract feature
cur_feat = _tranpose_and_gather_feat(cur_reid, cur_inds) # batch x max_obj x channel
pre_feat = _tranpose_and_gather_feat(pre_reid, pre_inds)
# cur_feat = F.normalize(cur_feat, p=2, dim=2)
# pre_feat = F.normalize(pre_feat, p=2, dim=2)
# calculate simiarity
pre_feat = pre_feat.permute(0, 2, 1).contiguous() # batch x channel x max_obj
similarity = torch.matmul(cur_feat, pre_feat)
similarity = F.sigmoid(similarity)
# ground_truth
groundtruth = torch.diag_embed(tracking_mask)
# groundtruth = groundtruth.type_as(similarity)
# get loss
groundtruth = groundtruth.view(-1, 1)
similarity = similarity.view(-1, 1)
similarity = torch.cat((1 - similarity, similarity), dim=1)
class_mask = torch.zeros(similarity.shape[0], similarity.shape[1]).cuda()
class_mask.scatter_(1, groundtruth.view(-1, 1).long(), 1.)
probs = (similarity * class_mask).sum(dim=1).view(-1, 1)
probs = probs.clamp(min=0.0001, max=1.0)
log_p = probs.log()
alpha = torch.ones(similarity.shape[0], similarity.shape[1]).cuda()
alpha[:, 0] = alpha[:, 0] * (1 - self.alpha)
alpha[:, 1] = alpha[:, 1] * self.alpha
alpha = (alpha * class_mask).sum(dim=1).view(-1, 1)
batch_loss = -alpha * (torch.pow((1 - probs), self.gamma)) * log_p
loss = batch_loss.mean()
#
# loss = F.binary_cross_entropy_with_logits(
# similarity, groundtruth, reduction='none') * focal_weight
# loss = torch.mean(loss)
return loss
# class IDloss(nn.Module):
# def __init__(self):
# super(IDloss, self).__init__()
# self.only_neg_loss = _only_neg_loss
# self.sigma = 1
#
# def forward(self, cur_reid, pre_reid, cts_int, tracking, tracking_mask, pre_cts_int, opt):
# # cts_int and pre_cts_int are ground truth
# sigma = self.sigma
# mask = tracking_mask[:, :, 0]
# # ret['tracking'][k] = pre_ct - ct_int
# # ret['ind'][k] = ct_int[1] * self.opt.output_w + ct_int[0]
# cur_inds = cts_int[:, :, 1]*opt.output_w + cts_int[:, :, 0]
# # tracking is predicted
# tracking = _tranpose_and_gather_feat(tracking, cur_inds)
# pre_cts = cts_int.float() + tracking
# pre_inds = pre_cts[:, :, 1]*opt.output_w + pre_cts[:, :, 0]
#
# pre_feats = _tranpose_and_gather_feat(pre_reid, pre_inds.long())
# pre_feats = F.normalize(pre_feats, p=2, dim=2, eps=1e-5)
# cur_feats = _tranpose_and_gather_feat(cur_reid, cur_inds)
# cur_feats = F.normalize(cur_feats, p=2, dim=2, eps=1e-5)
# out = torch.sum(pre_feats * cur_feats, dim=2)
#
# x = pre_cts[:, :, 0].float() - pre_cts_int[:, :, 0].float()
# y = pre_cts[:, :, 1].float() - pre_cts_int[:, :, 1].float()
# target = torch.pow(-(x * x + y * y) / (2 * sigma * sigma), 2)
# target = mask * target
#
# neg_loss = self.only_neg_loss(out, target)
#
# num_pos = mask.sum()
# pos_loss = torch.log(out) * torch.pow(1 - out, 2) * \
# mask
# pos_loss = pos_loss.sum()
# if num_pos == 0:
# return - neg_loss
# return - (pos_loss + neg_loss) / num_pos
|
the-stack_106_28465 | # -*- coding: utf-8 -*-
import numpy as np
from aesara_theano_fallback import aesara
from aesara_theano_fallback import tensor as aet
from rebound_pymc3.test_tools import InferShapeTester
from rebound_pymc3.python_impl import ReboundOp
class TestRebound(InferShapeTester):
def setup_method(self):
super().setup_method()
self.op_class = ReboundOp
self.op = ReboundOp()
def get_args(self):
m_val = np.array([1.3, 1e-3, 1e-5])
x_val = np.zeros((3, 6))
x_val[1, 0] = 15.0
x_val[1, 4] = 0.4
x_val[2, 0] = 100.0
x_val[2, 4] = 0.2
t = np.linspace(100, 1000, 12)
m = aet.dvector()
x = aet.dmatrix()
f = aesara.function([m, x], self.op(m, x, t)[0])
return t, f, [m, x], [m_val, x_val]
def test_basic(self):
_, f, _, in_args = self.get_args()
f(*in_args)
def test_infer_shape(self):
t, f, args, arg_vals = self.get_args()
self._compile_and_check(
args, self.op(*(list(args) + [t])), arg_vals, self.op_class
)
def test_grad(self):
t, _, _, in_args = self.get_args()
func = lambda *args: self.op(*(list(args) + [t]))[0] # NOQA
aesara.gradient.verify_grad(func, in_args, n_tests=1, rng=np.random)
|
the-stack_106_28467 | import math
import torch
import warnings
import numpy as np
import pandas as pd
from scipy.interpolate import pchip_interpolate
import itertools
from typing import Dict
from torchkbnufft import AdjKbNufft
from torchkbnufft.math import complex_mult, imag_exp, absolute
from torchio.transforms.augmentation.random_transform import RandomTransform
import torch.nn.functional as F
class TorchRandomMotionFromTimeCourse(RandomTransform):
def __init__(self, nT=200, maxDisp=(2,5), maxRot=(2,5), noiseBasePars=(5,15),
swallowFrequency=(0,5), swallowMagnitude=(2,6),
suddenFrequency=(0,5), suddenMagnitude=(2,6),
fitpars=None, read_func=lambda x: pd.read_csv(x, header=None).values,
displacement_shift=1, freq_encoding_dim=[0], tr=2.3, es=4E-3,
nufft=True, oversampling_pct=0.3, proba_to_augment: float = 1,
verbose=False, preserve_center_pct=0, correct_motion=False, res_dir=None, cuda=False, metrics: Dict = None):
"""
parameters to simulate 3 types of displacement random noise swllow or sudden mouvement
:param nT (int): number of points of the time course
:param maxDisp (float, float): (min, max) value of displacement in the perlin noise (useless if noiseBasePars is 0)
:param maxRot (float, float): (min, max) value of rotation in the perlin noise (useless if noiseBasePars is 0)
:param noiseBasePars (float, float): (min, max) base value of the perlin noise to generate for the time course
optional (float, float, float) where the third is the probability to performe this type of noise
:param swallowFrequency (int, int): (min, max) number of swallowing movements to generate in the time course
optional (float, float, float) where the third is the probability to performe this type of noise
:param swallowMagnitude (float, float): (min, max) magnitude of the swallowing movements to generate
:param suddenFrequency (int, int): (min, max) number of sudden movements to generate in the time course
optional (float, float, float) where the third is the probability to performe this type of noise
:param suddenMagnitude (float, float): (min, max) magnitude of the sudden movements to generate
if fitpars is not None previous parameter are not used
:param fitpars : movement parameters to use (if specified, will be applied as such, no movement is simulated)
:param read_func (function): if fitpars is a string, function to use to read the data. Must return an array of shape (6, nT) :param displacement_shift (bool): whether or not to substract the time course by the values of the center of the kspace
:param freq_encoding_dim (tuple of ints): potential frequency encoding dims to use (one of them is randomly chosen)
:param tr (float): repetition time of the data acquisition (used for interpolating the time course movement)
:param es (float): echo spacing time of the data acquisition (used for interpolating the time course movement)
:param nufft (bool): whether or not to apply nufft (if false, no rotation is aaplyed ! )
:param oversampling_pct (float): percentage with which the data will be oversampled in the image domain prior to applying the motion
:param verbose (bool): verbose
Note currently on freq_encoding_dim=0 give the same ringing direction for rotation and translation, dim 1 and 2 are not coherent
Note fot suddenFrequency and swallowFrequency min max must differ and the max is never achieved, so to have 0 put (0,1)
"""
super(TorchRandomMotionFromTimeCourse, self).__init__(verbose=verbose, p=proba_to_augment, metrics=metrics)
self.tr = tr
self.es = es
self.nT = nT
self.maxDisp = maxDisp
self.maxRot = maxRot
self.noiseBasePars = noiseBasePars
self.swallowFrequency = swallowFrequency
self.swallowMagnitude = swallowMagnitude
self.suddenFrequency = suddenFrequency
self.suddenMagnitude = suddenMagnitude
self.displacement_shift = displacement_shift
self.preserve_center_frequency_pct = preserve_center_pct
self.freq_encoding_choice = freq_encoding_dim
self.frequency_encoding_dim = np.random.choice(self.freq_encoding_choice)
self.read_func = read_func
self.displacement_substract = np.zeros(6)
if fitpars is None:
self.fitpars = None
self.simulate_displacement = True
else:
self.fitpars = self.read_fitpars(fitpars)
self.simulate_displacement = False
self.nufft = nufft
self.cuda = cuda
self.oversampling_pct = oversampling_pct
self.proba_to_augment = proba_to_augment
self.preserve_center_pct = preserve_center_pct
self.correct_motion = correct_motion
self.res_dir = res_dir
self.nb_saved = 0
def apply_transform(self, sample):
parameters_motion = {}
for image_name, image_dict in sample.get_images_dict().items():
do_it = np.random.uniform() <= self.proba_to_augment
parameters_motion['simu_param'] = dict(noisPar=0.0, maxDisp=0.0, maxRot=0.0, swallowFrequency=0.0,
swallowMagnitude=[0.0,0.0], suddenFrequency=0.0, suddenMagnitude=[0.0,0.0])
if not do_it:
sample[image_name]['motion'] = False
return sample
else:
sample[image_name]['motion'] = True
#image_data = np.squeeze(image_dict['data'])[..., np.newaxis, np.newaxis]
image_data = image_dict['data'].unsqueeze(1)
original_image = torch.squeeze(image_data)
if self.oversampling_pct > 0.0:
original_image_shape = original_image.shape
original_image = self._oversample(original_image, self.oversampling_pct)
self._calc_dimensions(original_image.shape)
if self.simulate_displacement:
fitpars_interp = self._simulate_random_trajectory()
parameters_motion['simu_param'] = self.simu_param
else:
if self.fitpars.ndim==4:
fitpars_interp = self.fitpars
else:
fitpars_interp = self._interpolate_space_timing(self.fitpars)
fitpars_interp = self._tile_params_to_volume_dims(fitpars_interp)
if self.displacement_shift > 1:
fitpars_interp = self.demean_fitpar(fitpars_interp, original_image)[0]
fitpars_vox = fitpars_interp.reshape((6, -1))
self.translations, self.rotations = fitpars_vox[:3], np.radians(fitpars_vox[3:])
self.translations, self.rotations = torch.from_numpy(self.translations), torch.from_numpy(self.rotations)
if self.cuda:
original_image = original_image.cuda()
# fft
im_freq_domain = self._fft_im(original_image)
translated_im_freq_domain = self._translate_freq_domain(freq_domain=im_freq_domain)
# iNufft for rotations
if self.nufft:
corrupted_im = self._nufft(translated_im_freq_domain)
corrupted_im = corrupted_im/(corrupted_im.numel()/2) # normalize
else:
corrupted_im = self._ifft_im(translated_im_freq_domain)
if self.correct_motion:
corrected_im = self.do_correct_motion(corrupted_im)
image_dict["data_cor"] = corrected_im[np.newaxis, ...]
image_dict['data_cor'] = torch.from_numpy(image_dict['data_cor']).float()
# magnitude
corrupted_im = absolute(corrupted_im.squeeze(), -1)
if self.oversampling_pct > 0.0:
corrupted_im = self.crop_volume(corrupted_im, original_image_shape)
image_dict["data"] = corrupted_im.T.float()
#image_dict['data'] = torch.from_numpy(image_dict['data']).float()
#add extra field to follow what have been done
#sample[image_name]['fit_pars'] = self.fitpars
#sample[image_name]['fit_pars_interp'] = self.fitpars_interp
if self.res_dir is not None:
self.save_to_dir(image_dict)
metrics = dict()
if self.fitpars.ndim == 2:
metrics['mean_DispP'] = calculate_mean_Disp_P(self.fitpars)
metrics['rmse_Disp'] = calculate_mean_RMSE_displacment(self.fitpars)
metrics['mean_DispP_iterp'] = calculate_mean_Disp_P(fitpars_interp)
metrics['rmse_Disp_iterp'] = calculate_mean_RMSE_displacment(fitpars_interp)
ff_interp, to_substract = self.demean_fitpar(fitpars_interp, original_image)
metrics['TFsubstract'] = to_substract
metrics['rmse_DispTF'] = calculate_mean_RMSE_displacment(ff_interp, original_image)
parameters_motion['metrics_motion'] = metrics
#sample.add_transform(self, parameters_motion)
return sample
#output type is double, TODO where to cast in Float ?
def _fft_im(self, image, signal_ndim=3):
output = self._fftshift2d(torch.rfft(image, signal_ndim=signal_ndim, onesided=False, normalized=True))
return output.squeeze()
def _ifft_im(self, freq_domain, signal_ndim=3):
output = torch.ifft(self._ifftshift2d(freq_domain), signal_ndim=signal_ndim)
return output.squeeze()
def roll_n(self, X, axis, n):
'''
from https://github.com/tomrunia/PyTorchSteerablePyramid
'''
f_idx = tuple(slice(None, None, None) if i != axis else slice(0, n, None) for i in range(X.dim()))
b_idx = tuple(slice(None, None, None) if i != axis else slice(n, None, None) for i in range(X.dim()))
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front], axis)
def _ifftshift2d(self, x):
'''
from https://github.com/tomrunia/PyTorchSteerablePyramid
'''
real, imag = torch.unbind(x, -1)
for dim in range(len(real.size()) - 1, -1, -1):
real = self.roll_n(real, axis=dim, n=real.size(dim) // 2)
imag = self.roll_n(imag, axis=dim, n=imag.size(dim) // 2)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
def _fftshift2d(self, x):
'''
from https://github.com/tomrunia/PyTorchSteerablePyramid
'''
real, imag = torch.unbind(x, -1)
for dim in range(0, len(real.size())):
n_shift = real.size(dim) // 2
if real.size(dim) % 2 != 0:
n_shift += 1 # for odd-sized images
real = self.roll_n(real, axis=dim, n=n_shift)
imag = self.roll_n(imag, axis=dim, n=n_shift)
return torch.stack((real, imag), -1)
@staticmethod
def _oversample(data, perc_oversampling=.10):
"""
Oversamples data with a zero padding. Adds perc_oversampling percentage values
:param data (ndarray): array to pad
:param perc_oversampling (float): percentage of oversampling to add to data (based on its current shape)
:return oversampled version of the data:
"""
data_shape = list(data.shape)
to_pad = np.ceil(np.asarray(data_shape) * perc_oversampling / 2) * 2
# to force an even number if odd, this will shift the volume when croping
# print("Pading at {}".format(to_pad))
left_pad = np.floor(to_pad / 2).astype(int)
right_pad = np.ceil(to_pad / 2).astype(int)
to_pad = [x for x in itertools.chain.from_iterable(itertools.zip_longest(left_pad, right_pad)) ]
return F.pad(data, to_pad)
def save_to_dir(self, image_dict):
volume_path = image_dict['path']
dd = volume_path.split('/')
volume_name = dd[len(dd)-2] + '_' + image_dict['stem']
nb_saved = image_dict['index']
import os
resdir = self.res_dir + '/mvt_param/'
if not os.path.isdir(resdir): os.mkdir(resdir)
fname = resdir + 'ssim_{}_N{:05d}_suj_{}'.format(image_dict['metrics']['ssim'],
nb_saved, volume_name)
np.savetxt(fname + '_mvt.csv', self.fitpars, delimiter=',')
def do_correct_motion(self, image):
im_freq_domain = self._fft_im(image)
# print('translation')
translated_im_freq_domain = self._translate_freq_domain(freq_domain=im_freq_domain, inv_transfo=True)
# print('rotaion')
# iNufft for rotations
if self.nufft:
corrected_im = self._nufft(translated_im_freq_domain, inv_transfo=True)
corrected_im = corrected_im / corrected_im.size # normalize
else:
corrected_im = self._ifft_im(translated_im_freq_domain)
# magnitude
corrected_im = abs(corrected_im)
return corrected_im
@staticmethod
def get_params():
pass
def read_fitpars(self, fitpars):
'''
:param fitpars:
'''
fpars = None
if isinstance(fitpars, np.ndarray):
fpars = fitpars
elif isinstance(fitpars, list):
fpars = np.asarray(fitpars)
elif isinstance(fitpars, str):
try:
fpars = self.read_func(fitpars)
except:
warnings.warn("Could not read {} with given function. Motion parameters are set to None".format(fpars))
fpars = None
if fpars.shape[0] != 6:
warnings.warn("Given motion parameters has {} on the first dimension. "
"Expected 6 (3 translations and 3 rotations). Setting motions to None".format(fpars.shape[0]))
fpars = None
elif len(fpars.shape) != 2:
warnings.warn("Expected motion parameters to be of shape (6, N), found {}. Setting motions to None".format(fpars.shape))
fpars = None
if self.displacement_shift > 0:
to_substract = fpars[:, int(round(self.nT / 2))]
fpars = np.subtract(fpars, to_substract[..., np.newaxis])
#print('removing to fit_pars {}'.format(to_substract))
self.displacement_substract = to_substract
#print(fpars.shape)
if np.any(np.isnan(fpars)) :
#assume it is the last column, as can happen if the the csv line ends with ,
fpars = fpars[:, :-1]
if np.any(np.isnan(fpars)):
warnings.warn('There is still NaN in the fitpar, it will crash the nufft')
self.nT = fpars.shape[1]
return fpars
def _calc_dimensions(self, im_shape):
"""
calculate dimensions based on im_shape
:param im_shape (list/tuple) : image shape
- sets self.phase_encoding_dims, self.phase_encoding_shape, self.num_phase_encoding_steps, self.frequency_encoding_dim
"""
pe_dims = [0, 1, 2]
pe_dims.pop(self.frequency_encoding_dim)
self.phase_encoding_dims = pe_dims
im_shape = list(im_shape)
self.im_shape = im_shape.copy()
im_shape.pop(self.frequency_encoding_dim)
self.phase_encoding_shape = im_shape #[ im_shape[pp-1] for pp in pe_dims]
self.num_phase_encoding_steps = self.phase_encoding_shape[0] * self.phase_encoding_shape[1]
self.frequency_encoding_dim = len(self.im_shape) - 1 if self.frequency_encoding_dim == -1 \
else self.frequency_encoding_dim
# no more used
def _center_k_indices_to_preserve(self):
"""get center k indices of freq domain"""
mid_pts = [int(math.ceil(x / 2)) for x in self.phase_encoding_shape]
num_pts_preserve = [math.ceil(self.preserve_center_frequency_pct * x) for x in self.phase_encoding_shape]
ind_to_remove = {val + 1: slice(mid_pts[i] - num_pts_preserve[i], mid_pts[i] + num_pts_preserve[i])
for i, val in enumerate(self.phase_encoding_dims)}
ix_to_remove = [ind_to_remove.get(dim, slice(None)) for dim in range(4)]
return ix_to_remove
@staticmethod
def perlinNoise1D(npts, weights):
if not isinstance(weights, list):
weights = range(int(round(weights)))
weights = np.power([2] * len(weights), weights)
n = len(weights)
xvals = np.linspace(0, 1, npts)
total = np.zeros((npts, 1))
for i in range(n):
frequency = 2**i
this_npts = round(npts / frequency)
if this_npts > 1:
total += weights[i] * pchip_interpolate(np.linspace(0, 1, this_npts), np.random.random((this_npts, 1)),
xvals)
# else:
# TODO does it matter print("Maxed out at octave {}".format(i))
total = total - np.min(total)
total = total / np.max(total)
return total.reshape(-1)
def _simulate_random_trajectory(self):
"""
Simulates the parameters of the transformation through the vector fitpars using 6 dimensions (3 translations and
3 rotations).
"""
maxDisp = np.random.uniform(low=self.maxDisp[0], high=self.maxDisp[1])
maxRot = np.random.uniform(low=self.maxRot[0], high=self.maxRot[1])
noiseBasePars = np.random.uniform(low=self.noiseBasePars[0], high=self.noiseBasePars[1])
swallowFrequency = np.random.randint(low=self.swallowFrequency[0], high=self.swallowFrequency[1])
swallowMagnitude = [np.random.uniform(low=self.swallowMagnitude[0], high=self.swallowMagnitude[1]),
np.random.uniform(low=self.swallowMagnitude[0], high=self.swallowMagnitude[1])]
suddenFrequency = np.random.randint(low=self.suddenFrequency[0], high=self.suddenFrequency[1])
suddenMagnitude = [np.random.uniform(low=self.suddenMagnitude[0], high=self.suddenMagnitude[1]),
np.random.uniform(low=self.suddenMagnitude[0], high=self.suddenMagnitude[1])]
#prba to include the different type of noise
proba_noiseBase = self.noiseBasePars[2] if len(self.noiseBasePars) == 3 else 1
proba_swallow = self.swallowFrequency[2] if len(self.swallowFrequency) == 3 else 1
proba_sudden = self.suddenFrequency[2] if len(self.suddenFrequency) == 3 else 1
do_noise, do_swallow, do_sudden = False, False, False
while (do_noise or do_swallow or do_sudden) is False: #at least one is not false
do_noise = np.random.uniform() <= proba_noiseBase
do_swallow = np.random.uniform() <= proba_swallow
do_sudden = np.random.uniform() <= proba_sudden
if do_noise is False: noiseBasePars = 0
if do_swallow is False: swallowFrequency = 0
if do_sudden is False: suddenFrequency = 0
#print('simulate FITpars')
if noiseBasePars > 0:
fitpars = np.asarray([self.perlinNoise1D(self.nT, noiseBasePars) - 0.5 for _ in range(6)])
fitpars[:3] *= maxDisp
fitpars[3:] *= maxRot
else:
fitpars = np.zeros((6, self.nT))
# add in swallowing-like movements - just to z direction and pitch
if swallowFrequency > 0:
swallowTraceBase = np.exp(-np.linspace(0, 100, self.nT))
swallowTrace = np.zeros(self.nT)
for i in range(swallowFrequency):
rand_shifts = int(round(np.random.rand() * self.nT))
rolled = np.roll(swallowTraceBase, rand_shifts, axis=0)
swallowTrace += rolled
fitpars[2, :] += swallowMagnitude[0] * swallowTrace
fitpars[3, :] += swallowMagnitude[1] * swallowTrace
# add in random sudden movements in any direction
if suddenFrequency > 0:
suddenTrace = np.zeros(fitpars.shape)
for i in range(suddenFrequency):
iT_sudden = int(np.ceil(np.random.rand() * self.nT))
to_add = np.asarray([suddenMagnitude[0] * (2 * np.random.random(3) - 1),
suddenMagnitude[1] * (2 * np.random.random(3) - 1)]).reshape((-1, 1))
suddenTrace[:, iT_sudden:] = np.add(suddenTrace[:, iT_sudden:], to_add)
fitpars += suddenTrace
if self.displacement_shift > 0:
to_substract = fitpars[:, int(round(self.nT / 2))]
fitpars = np.subtract(fitpars, to_substract[..., np.newaxis])
self.displacement_substract = to_substract
if self.preserve_center_frequency_pct:
center = np.int(np.floor( fitpars.shape[1] /2 ))
nbpts = np.int(np.floor(fitpars.shape[1] * self.preserve_center_frequency_pct/2))
fitpars[:, center-nbpts:center+nbpts] = 0
self.fitpars = fitpars
#print(f' in _simul_motionfitpar shape fitpars {fitpars.shape}')
simu_param = dict(noisPar=noiseBasePars,maxDisp=maxDisp,maxRot=maxRot,
swallowFrequency=swallowFrequency, swallowMagnitudeT=swallowMagnitude[0], swallowMagnitudeR=swallowMagnitude[1],
suddenFrequency=suddenFrequency,suddenMagnitudeT=suddenMagnitude[0], suddenMagnitude=suddenMagnitude[1])
self.simu_param = simu_param
fitpars = self._interpolate_space_timing(fitpars)
fitpars = self._tile_params_to_volume_dims(fitpars)
return fitpars
def _interpolate_space_timing_1D(self, fitpars):
n_phase= self.phase_encoding_shape[0]
nT = self.nT
# Time steps
mg_total = np.linspace(0,1,n_phase)
# Equidistant time spacing
teq = np.linspace(0, 1, nT)
# Actual interpolation
fitpars_interp = np.asarray([np.interp(mg_total, teq, params) for params in fitpars])
# Reshaping to phase encoding dimensions
self.fitpars_interp = fitpars_interp
# Add missing dimension
fitpars_interp = np.expand_dims(fitpars_interp, axis= [self.frequency_encoding_dim + 1,self.phase_encoding_dims[1] + 1])
return fitpars_interp
def _interpolate_space_timing(self, fitpars):
n_phase, n_slice = self.phase_encoding_shape[0], self.phase_encoding_shape[1]
# Time steps
t_steps = n_phase * self.tr
# Echo spacing dimension
dim_es = np.cumsum(self.es * np.ones(n_slice)) - self.es
dim_tr = np.cumsum(self.tr * np.ones(n_phase)) - self.tr
# Build grid
mg_es, mg_tr = np.meshgrid(*[dim_es, dim_tr])
mg_total = mg_es + mg_tr # MP-rage timing
# Flatten grid and sort values
mg_total = np.sort(mg_total.reshape(-1))
# Equidistant time spacing
teq = np.linspace(0, t_steps, self.nT)
# Actual interpolation
fitpars_interp = np.asarray([np.interp(mg_total, teq, params) for params in fitpars])
# Reshaping to phase encoding dimensions
fitpars_interp = fitpars_interp.reshape([6] + self.phase_encoding_shape)
self.fitpars_interp = fitpars_interp
# Add missing dimension
fitpars_interp = np.expand_dims(fitpars_interp, axis=self.frequency_encoding_dim + 1)
return fitpars_interp
def _tile_params_to_volume_dims(self, params_to_reshape):
target_shape = [6] + self.im_shape
data_shape = params_to_reshape.shape
tiles = np.floor_divide(target_shape, data_shape, dtype=int)
return np.tile(params_to_reshape, reps=tiles)
def _translate_freq_domain(self, freq_domain, inv_transfo=False):
"""
image domain translation by adding phase shifts in frequency domain
:param freq_domain - frequency domain data 3d numpy array:
:return frequency domain array with phase shifts added according to self.translations:
"""
translations = -self.translations if inv_transfo else self.translations
lin_spaces = [torch.linspace(-.5, .5, x) for x in freq_domain.shape[:-1]]
meshgrids = torch.meshgrid(*lin_spaces)
grid_coords = torch.stack([mg.flatten() for mg in meshgrids], 0)
phase_shift = torch.mul(grid_coords, translations).sum(0)
exp_phase_shift = imag_exp(-2 * math.pi * phase_shift, -1)
if self.cuda:
exp_phase_shift = exp_phase_shift.cuda()
freq_domain_translated = complex_mult(exp_phase_shift, freq_domain.reshape((-1, 2)), -1)
return freq_domain_translated.reshape(freq_domain.shape)
def _rotate_coordinates(self, inv_transfo=False):
"""
:return: grid_coordinates after applying self.rotations
"""
rotations = -self.rotations if inv_transfo else self.rotations
center = [math.ceil((x - 1) / 2) for x in self.im_shape]
[i1, i2, i3] = np.meshgrid(2*(np.arange(self.im_shape[0]) - center[0])/self.im_shape[0],
2*(np.arange(self.im_shape[1]) - center[1])/self.im_shape[1],
2*(np.arange(self.im_shape[2]) - center[2])/self.im_shape[2], indexing='ij')
#to rotate coordinate between -1 and 1 is not equivalent to compute it betawe -100 and 100 and divide by 100
#special thanks to the matlab code from Gallichan https://github.com/dgallichan/retroMoCoBox
grid_coordinates = np.array([i1.flatten('F'), i2.flatten('F'), i3.flatten('F')])
#print('rotation size is {}'.format(self.rotations.shape))
rotations = rotations.reshape([3] + self.im_shape)
ix = (len(self.im_shape) + 1) * [slice(None)]
ix[self.frequency_encoding_dim + 1] = 0 # dont need to rotate along freq encoding
rotations = rotations[tuple(ix)].reshape(3, -1)
rotation_matrices = np.apply_along_axis(create_rotation_matrix_3d, axis=0, arr=rotations).transpose([-1, 0, 1])
rotation_matrices = rotation_matrices.reshape(self.phase_encoding_shape + [3, 3])
rotation_matrices = np.expand_dims(rotation_matrices, self.frequency_encoding_dim)
rotation_matrices = np.tile(rotation_matrices,
reps=([self.im_shape[ self.frequency_encoding_dim] if i == self.frequency_encoding_dim else 1
for i in range(5)])) # tile in freq encoding dimension
#bug fix same order F as for grid_coordinates where it will be multiply to
rotation_matrices = rotation_matrices.reshape([-1, 3, 3], order='F')
# tile grid coordinates for vectorizing computation
grid_coordinates_tiled = np.tile(grid_coordinates, [3, 1])
grid_coordinates_tiled = grid_coordinates_tiled.reshape([3, -1], order='F').T
rotation_matrices = rotation_matrices.reshape([-1, 3]) #reshape for matrix multiplication, so no order F
new_grid_coords = (rotation_matrices * grid_coordinates_tiled).sum(axis=1)
# reshape new grid coords back to 3 x nvoxels
new_grid_coords = new_grid_coords.reshape([3, -1], order='F')
# scale data between -pi and pi
max_vals = [1, 1, 1]
new_grid_coordinates_scaled = [(new_grid_coords[i, :] / max_vals[i]) * math.pi for i in [0, 1, 2]]
# range(new_grid_coords.shape[0])]
#new_grid_coordinates_scaled = [np.asfortranarray(i) for i in new_grid_coordinates_scaled]
#rrr why already flat ... ?
#self.new_grid_coordinates_scaled = new_grid_coordinates_scaled
#self.grid_coordinates = grid_coordinates
#self.new_grid_coords = new_grid_coords
return new_grid_coordinates_scaled, [grid_coordinates, new_grid_coords]
def _nufft(self, freq_domain_data, iflag=1, eps=1E-7, inv_transfo=False):
"""
rotate coordinates and perform nufft
:param freq_domain_data:
:param iflag/eps: see finufftpy doc
:param eps: precision of nufft
:return: nufft of freq_domain_data after applying self.rotations
"""
new_grid_coords = torch.from_numpy(np.asarray(self._rotate_coordinates(inv_transfo=inv_transfo)[0])).unsqueeze(0)
adj_nufft = AdjKbNufft(im_size=freq_domain_data.shape[:-1], n_shift=(0, 0, 0))
freq_domain_data = freq_domain_data.permute(3, 0, 1, 2).view((1, 1, 2, -1))
if self.cuda:
adj_nufft = adj_nufft.cuda()
new_grid_coords = new_grid_coords.cuda()
im_out = adj_nufft(freq_domain_data, new_grid_coords)
im_out = torch.stack(torch.unbind(im_out.squeeze(), 0), -1)
del adj_nufft, new_grid_coords
return im_out
def demean_fitpar(self,fitpars_interp, original_image):
o_shape = original_image.shape
#original_image = np.moveaxis(original_image.numpy(), 1, 2)
tfi = np.abs(np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(original_image))).astype(np.complex128))
#tfi = np.real((np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(original_image)))).astype(np.complex128)) does not work (make a shift)
#ss = np.tile(tfi, (6, 1, 1, 1))
ss = tfi #np.moveaxis(tfi, 1,2) # to have the same order as previously
# mean around kspace center
#ss_center = np.zeros(ss.shape)
#nbpts = 2
#center = [np.floor((x - 1) / 2).astype(int) for x in o_shape]
#ss_center[center[0]-nbpts:center[0]+nbpts,center[1]-nbpts:center[1]+nbpts,center[2]-nbpts:center[2]+nbpts] = ss[center[0]-nbpts:center[0]+nbpts,center[1]-nbpts:center[1]+nbpts,center[2]-nbpts:center[2]+nbpts]
#ss = ss_center
ff = fitpars_interp
#ff = np.moveaxis(fitpars_interp, 2, 3) # because y is slowest axis uselull to plot but not necessary if ff and ss are coherent
to_substract = np.zeros(6)
for i in range(0, 6):
ffi = ff[i].reshape(-1)
ssi = ss.reshape(-1)
#xx = np.argwhere(ssi > (np.max(ssi) * 0.001)).reshape(-1)
#to_substract[i] = np.sum(ffi[xx] * ssi[xx]) / np.sum(ssi[xx])
to_substract[i] = np.sum(ffi * ssi) / np.sum(ssi)
#print('Removing TF mean {} '.format(to_substract))
#print('Removing {} OR {}'.format(to_substract, (to_substract+self.displacement_substract)))
to_substract_tile = np.tile(to_substract[..., np.newaxis, np.newaxis, np.newaxis],
(1, o_shape[0], o_shape[1], o_shape[2]))
fitpars_interp = np.subtract(fitpars_interp, to_substract_tile)
return fitpars_interp, to_substract
def create_rotation_matrix_3d(angles):
"""
given a list of 3 angles, create a 3x3 rotation matrix that describes rotation about the origin
:param angles (list or numpy array) : rotation angles in 3 dimensions
:return (numpy array) : rotation matrix 3x3
"""
mat1 = np.array([[1., 0., 0.],
[0., math.cos(angles[0]), math.sin(angles[0])],
[0., -math.sin(angles[0]), math.cos(angles[0])]],
dtype='float')
mat2 = np.array([[math.cos(angles[1]), 0., -math.sin(angles[1])],
[0., 1., 0.],
[math.sin(angles[1]), 0., math.cos(angles[1])]],
dtype='float')
mat3 = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0.],
[-math.sin(angles[2]), math.cos(angles[2]), 0.],
[0., 0., 1.]],
dtype='float')
mat = (mat1 @ mat2) @ mat3
return mat
def calculate_mean_FD_P(motion_params):
"""
Method to calculate Framewise Displacement (FD) as per Power et al., 2012
"""
translations = np.transpose(np.abs(np.diff(motion_params[0:3, :])))
rotations = np.transpose(np.abs(np.diff(motion_params[3:6, :])))
fd = np.sum(translations, axis=1) + (50 * np.pi / 180) * np.sum(rotations, axis=1)
#fd = np.insert(fd, 0, 0)
return np.mean(fd)
def calculate_mean_Disp_P(motion_params):
"""
Same as previous, but without taking the diff between frame
"""
translations = np.transpose(np.abs(motion_params[0:3, :]))
rotations = np.transpose(np.abs(motion_params[3:6, :]))
fd = np.mean(translations, axis=1) + (50 * np.pi / 180) * np.mean(rotations, axis=1)
return np.mean(fd)
def calculate_mean_FD_J(motion_params):
"""
Method to calculate framewise displacement as per Jenkinson et al. 2002
"""
pm = np.zeros((motion_params.shape[1],16))
for tt in range(motion_params.shape[1]):
P = np.hstack((motion_params[:, tt], np.array([1, 1, 1, 0, 0, 0])))
pm[tt,:] = spm_matrix(P, order=0).reshape(-1)
# The default radius (as in FSL) of a sphere represents the brain
rmax = 80.0
T_rb_prev = pm[0].reshape(4, 4)
fd = np.zeros(pm.shape[0])
for i in range(1, pm.shape[0]):
T_rb = pm[i].reshape(4, 4)
M = np.dot(T_rb, np.linalg.inv(T_rb_prev)) - np.eye(4)
A = M[0:3, 0:3]
b = M[0:3, 3]
fd[i] = np.sqrt( (rmax * rmax / 5) * np.trace(np.dot(A.T, A)) + np.dot(b.T, b) )
T_rb_prev = T_rb
return np.mean(fd)
def calculate_mean_RMSE_displacment(fit_pars, image=None):
"""
very crude approximation where rotation in degree and translation are average ...
"""
if image is None:
r1 = np.sqrt(np.sum(fit_pars[0:3] * fit_pars[0:3], axis=0))
rms1 = np.sqrt(np.mean(r1 * r1))
r2 = np.sqrt(np.sum(fit_pars[3:6] * fit_pars[3:6], axis=0))
rms2 = np.sqrt(np.mean(r2 * r2))
res = (rms1 + rms2) / 2
else:
tfi = np.abs(np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(image))).astype(np.complex128))
ss = tfi
ff = fit_pars
to_substract = np.zeros(6)
for i in range(0, 6):
ffi = ff[i].reshape(-1)
ssi = ss.reshape(-1)
# xx = np.argwhere(ssi > (np.max(ssi) * 0.001)).reshape(-1)
# to_substract[i] = np.sum(ffi[xx] * ssi[xx]) / np.sum(ssi[xx])
to_substract[i] = np.sqrt( np.sum(ffi * ffi * ssi) / np.sum(ssi) )
res = np.mean(to_substract)
return res
def spm_matrix(P, order=0):
"""
FORMAT [A] = spm_matrix(P )
P(0) - x translation
P(1) - y translation
P(2) - z translation
P(3) - x rotation around x in degree
P(4) - y rotation around y in degree
P(5) - z rotation around z in degree
P(6) - x scaling
P(7) - y scaling
P(8) - z scaling
P(9) - x affine
P(10) - y affine
P(11) - z affine
order - application order of transformations. if order (the Default): T*R*Z*S if order==0 S*Z*R*T
"""
convert_to_torch=False
if torch.is_tensor(P):
P = P.numpy()
convert_to_torch=True
[P[3], P[4], P[5]] = [P[3]*180/np.pi, P[4]*180/np.pi, P[5]*180/np.pi] #degre to radian
T = np.array([[1,0,0,P[0]],[0,1,0,P[1]],[0,0,1,P[2]],[0,0,0,1]])
R1 = np.array([[1,0,0,0],
[0,np.cos(P[3]),np.sin(P[3]),0],#sing change compare to spm because neuro versus radio ?
[0,-np.sin(P[3]),np.cos(P[3]),0],
[0,0,0,1]])
R2 = np.array([[np.cos(P[4]),0,-np.sin(P[4]),0],
[0,1,0,0],
[np.sin(P[4]),0,np.cos(P[4]),0],
[0,0,0,1]])
R3 = np.array([[np.cos(P[5]),np.sin(P[5]),0,0], #sing change compare to spm because neuro versus radio ?
[-np.sin(P[5]),np.cos(P[5]),0,0],
[0,0,1,0],
[0,0,0,1]])
#R = R1.dot(R2.dot(R3))
R = (R1.dot(R2)).dot(R3)
Z = np.array([[P[6],0,0,0],[0,P[7],0,0],[0,0,P[8],0],[0,0,0,1]])
S = np.array([[1,P[9],P[10],0],[0,1,P[11],0],[0,0,1,0],[0,0,0,1]])
if order==0:
A = S.dot(Z.dot(R.dot(T)))
else:
A = T.dot(R.dot(Z.dot(S)))
if convert_to_torch:
A = torch.from_numpy(A).float()
return A
|
the-stack_106_28468 | matOne = []
print("Enter 9 Elements for First Matrix: ")
for i in range(3):
matOne.append([])
for j in range(3):
num = int(input())
matOne[i].append(num)
matTwo = []
print("Enter 9 Elements for Second Matrix: ")
for i in range(3):
matTwo.append([])
for j in range(3):
num = int(input())
matTwo[i].append(num)
matThree = []
for i in range(3):
matThree.append([])
for j in range(3):
matThree[i].append(matOne[i][j]+matTwo[i][j])
print("\nAddition Result of Two Given Matrix is:")
for i in range(3):
for j in range(3):
print(matThree[i][j], end=" ")
print()
|
the-stack_106_28469 | import turtle as t
def triangle(index, len):
for i in range(3):
t.fd(len)
t.right(120)
t.fd(len / 2)
def init():
t.setup(1200, 1200)
t.bgcolor("black")
t.color("white")
t.pensize(2)
t.speed(10)
def main():
init()
length = 100
triangle(2, length)
t.mainloop()
if __name__ == "__main__":
main()
|
the-stack_106_28471 | import logging
from typing import Callable, Optional, Tuple
import numpy as np
from numpy import ndarray
from scipy.sparse import coo_matrix
from skfem.element import DiscreteField, Element
from skfem.mapping import Mapping
from skfem.mesh import Mesh
from .abstract_basis import AbstractBasis
from ..dofs import Dofs
logger = logging.getLogger(__name__)
class CellBasis(AbstractBasis):
"""For fields defined inside the domain.
:class:`~skfem.assembly.CellBasis` object is a combination of
:class:`~skfem.mesh.Mesh` and :class:`~skfem.element.Element`.
>>> from skfem import *
>>> m = MeshTri.init_symmetric()
>>> e = ElementTriP1()
>>> basis = CellBasis(m, e)
The resulting objects are used in the assembly.
>>> from skfem.models.poisson import laplace
>>> K = asm(laplace, basis)
>>> K.shape
(5, 5)
"""
def __init__(self,
mesh: Mesh,
elem: Element,
mapping: Optional[Mapping] = None,
intorder: Optional[int] = None,
elements: Optional[ndarray] = None,
quadrature: Optional[Tuple[ndarray, ndarray]] = None,
dofs: Optional[Dofs] = None):
"""Combine :class:`~skfem.mesh.Mesh` and :class:`~skfem.element.Element`
into a set of precomputed global basis functions.
Parameters
----------
mesh
An object of type :class:`~skfem.mesh.Mesh`.
elem
An object of type :class:`~skfem.element.Element`.
mapping
An object of type :class:`skfem.mapping.Mapping`. If `None`, uses
`mesh.mapping`.
intorder
Optional integration order, i.e. the degree of polynomials that are
integrated exactly by the used quadrature. Not used if `quadrature`
is specified.
elements
Optional subset of element indices.
quadrature
Optional tuple of quadrature points and weights.
dofs
Optional :class:`~skfem.assembly.Dofs` object.
"""
logger.info("Initializing {}({}, {})".format(type(self).__name__,
type(mesh).__name__,
type(elem).__name__))
super(CellBasis, self).__init__(mesh,
elem,
mapping,
intorder,
quadrature,
mesh.refdom,
dofs)
self.basis = [self.elem.gbasis(self.mapping, self.X, j, tind=elements)
for j in range(self.Nbfun)]
if elements is None:
self.nelems = mesh.nelements
self.tind = None
else:
self.nelems = len(elements)
self.tind = self._normalize_elements(elements)
self.dx = (np.abs(self.mapping.detDF(self.X, tind=elements))
* np.tile(self.W, (self.nelems, 1)))
logger.info("Initializing finished.")
def default_parameters(self):
"""Return default parameters for `~skfem.assembly.asm`."""
return {'x': self.global_coordinates(),
'h': self.mesh_parameters()}
def global_coordinates(self) -> DiscreteField:
return DiscreteField(self.mapping.F(self.X, tind=self.tind))
def mesh_parameters(self) -> DiscreteField:
return DiscreteField(np.abs(self.mapping.detDF(self.X, self.tind))
** (1. / self.mesh.dim()))
def refinterp(self,
y: ndarray,
nrefs: int = 1,
Nrefs: Optional[int] = None) -> Tuple[Mesh, ndarray]:
"""Refine and interpolate (for plotting)."""
if Nrefs is not None:
nrefs = Nrefs # for backwards compatibility
# mesh reference domain, refine and take the vertices
meshclass = type(self.mesh)
m = meshclass.init_refdom().refined(nrefs)
X = m.p
# map vertices to global elements
x = self.mapping.F(X)
# interpolate some previous discrete function at the vertices
# of the refined mesh
w = 0. * x[0]
for j in range(self.Nbfun):
basis = self.elem.gbasis(self.mapping, X, j)
w += y[self.element_dofs[j]][:, None] * basis[0]
# create connectivity for the new mesh
nt = self.nelems
t = np.tile(m.t, (1, nt))
dt = np.max(t)
t += (dt + 1) *\
(np.tile(np.arange(nt), (m.t.shape[0] * m.t.shape[1], 1))
.flatten('F')
.reshape((-1, m.t.shape[0])).T)
if X.shape[0] == 1:
p = np.array([x.flatten()])
else:
p = x[0].flatten()
for itr in range(len(x) - 1):
p = np.vstack((p, x[itr + 1].flatten()))
M = meshclass(p, t)
return M, w.flatten()
def probes(self, x: ndarray) -> coo_matrix:
"""Return matrix which acts on a solution vector to find its values
on points `x`.
The product of this with a finite element function vector is like the
result of assembling a `Functional` and it can be thought of as the
matrix of inner products of the test functions of the basis with Dirac
deltas at `x` but because its action is concentrated at points it is
not assembled with the usual quadratures.
"""
cells = self.mesh.element_finder(mapping=self.mapping)(*x)
pts = self.mapping.invF(x[:, :, np.newaxis], tind=cells)
phis = np.array(
[
self.elem.gbasis(self.mapping, pts, k, tind=cells)[0][0]
for k in range(self.Nbfun)
]
).flatten()
return coo_matrix(
(
phis,
(
np.tile(np.arange(x.shape[1]), self.Nbfun),
self.element_dofs[:, cells].flatten(),
),
),
shape=(x.shape[1], self.N),
)
def point_source(self, x: ndarray) -> ndarray:
"""Return right-hand side vector for unit source at `x`,
i.e. the vector of inner products of a Dirac delta at `x`
with the test functions of the basis.
This is like what is obtained by assembling a `LinearForm`
but because its action is concentrated at points it is not
assembled with the usual quadratures.
"""
return self.probes(x[:, None]).toarray()[0]
def interpolator(self, y: ndarray) -> Callable[[ndarray], ndarray]:
"""Return a function handle, which can be used for finding
values of the given solution vector `y` on given points."""
def interpfun(x: ndarray) -> ndarray:
return self.probes(x) @ y
return interpfun
def with_element(self, elem: Element) -> 'CellBasis':
"""Return a similar basis using a different element."""
return type(self)(
self.mesh,
elem,
mapping=self.mapping,
quadrature=self.quadrature,
elements=self.tind,
)
def project(self, interp, elements=None):
"""Perform :math:`L^2` projection onto the basis.
See :ref:`l2proj` for more information.
Parameters
----------
interp
An object of type :class:`~skfem.element.DiscreteField` which is a
function (to be projected) evaluated at global quadrature points.
If a function is given, then :class:`~skfem.element.DiscreteField`
is created by passing an array of global quadrature point locations
to the function.
elements
Optionally perform the projection on a subset of elements. The
values of the remaining DOFs are zero.
"""
from skfem.utils import solve, condense
M, f = self._projection(interp)
if elements is not None:
return solve(*condense(M, f, I=self.get_dofs(elements=elements)))
elif self.tind is not None:
return solve(*condense(M, f, I=self.get_dofs(elements=self.tind)))
return solve(M, f)
|
the-stack_106_28474 | from bytecode import Instr, Bytecode, Label
from boa.code.vmtoken import VMTokenizer
from boa.code.expression import Expression
from boa.code import pyop
from uuid import uuid4
class method(object):
code = None
bytecode = None
block = None
blocks = []
stack_size = 0
tokens = []
tokenizer = None
address = 0
module = None
name = None
module_name = None
_blocks = None
_expressions = None
_scope = None
_forloop_counter = 0
_extra = None
_id = None
@property
def id(self):
return self._id
@property
def forloop_counter(self):
self._forloop_counter += 1
return self._forloop_counter
@property
def vm_tokens(self):
"""
Returns a list of all vm tokens in this method.
:return: a list of vm tokens in this method
:rtype: list
"""
return self.tokenizer.vm_tokens
@property
def is_interop(self):
if 'boa.interop' in self.full_name:
return True
if 'boa.builtins' in self.full_name and self.full_name != 'boa.builtins.range':
return True
return False
@property
def full_name(self):
if len(self.module_name):
return '%s.%s' % (self.module_name, self.name)
return self.name
@property
def scope(self):
return self._scope
@property
def args(self):
return self.bytecode.argnames
@property
def stacksize(self):
return self.bytecode.argcount + len(self._blocks) + 2
def __init__(self, module, block, module_name, extra):
self.module = module
self.block = block
self.module_name = module_name
self._extra = extra
self._id = uuid4()
try:
self.code = self.block[0].arg
self.name = self.block[1].arg
except Exception as e:
print("Colud not get code or name %s " % e)
# print("**********************")
# print("**********************")
# print("METHOD %s " % self.name)
# import dis
# dis.dis(self.code)
self.bytecode = Bytecode.from_code(self.code)
self.setup()
def setup(self):
self._scope = {}
for index, name in enumerate(self.bytecode.argnames):
self._scope[name] = index
blocks = []
# find LOAD_GLOBALS
gbl = []
for instr in self.bytecode:
if isinstance(instr, Instr) and instr.opcode == pyop.LOAD_GLOBAL:
gbl.append(instr.arg)
# if there are global things passed in
# we want to check if they are used in the method
# and if so, load them in
global_blocks = []
if len(self._extra):
for item in self._extra:
if item[-1].opcode == pyop.STORE_NAME:
if item[-1].arg in gbl:
global_blocks.append(item)
self.add_to_scope(item[-1].arg)
if item[0].opcode == pyop.LOAD_NAME:
item[0].opcode = pyop.LOAD_GLOBAL
blocks = global_blocks
instructions = []
last_ln = self.bytecode[0].lineno
for instr in self.bytecode:
if not isinstance(instr, Label) and instr.lineno != last_ln:
last_ln = instr.lineno
if len(instructions):
blocks.append(instructions)
instructions = []
if not isinstance(instr, Label) and instr.opcode == pyop.STORE_FAST:
self.add_to_scope(instr.arg)
instructions.append(instr)
if len(instructions):
blocks.append(instructions)
self._blocks = blocks
self.tokenizer = VMTokenizer(self)
self._expressions = []
def add_to_scope(self, argname):
if argname not in self.scope.keys():
current_total = len(self._scope)
self._scope[argname] = current_total
def prepare(self):
last_exp = None
for block in self._blocks:
exp = Expression(block, self.tokenizer, self)
self._expressions.append(exp)
if last_exp:
last_exp.next = exp
last_exp = exp
for exp in self._expressions:
exp.tokenize()
self.convert_breaks()
self.convert_jumps()
def convert_jumps(self):
filtered = []
for vmtoken in self.tokenizer.vm_tokens.values():
if vmtoken.pytoken:
filtered.append(vmtoken)
for vmtoken in filtered:
if vmtoken.pytoken.jump_from and not vmtoken.pytoken.jump_found:
for vmtoken2 in filtered:
if vmtoken2.pytoken.jump_target == vmtoken.pytoken.jump_from:
diff = vmtoken2.addr - vmtoken.addr
vmtoken.data = diff.to_bytes(2, 'little', signed=True)
vmtoken2.pytoken.jump_from_addr = vmtoken.addr
vmtoken.pytoken.jump_to_addr = vmtoken2.addr
def convert_breaks(self):
tokens = list(self.tokenizer.vm_tokens.values())
setup_token_label = None
for tkn in tokens:
if tkn.pytoken:
if tkn.pytoken.pyop == pyop.SETUP_LOOP:
setup_token_label = tkn.pytoken.jump_from
if tkn.pytoken.pyop == pyop.BREAK_LOOP:
if not setup_token_label:
raise Exception("No loopsetup for break")
tkn.pytoken.jump_from = setup_token_label
|
the-stack_106_28476 | import matplotlib.pyplot as plt
import numpy as np
import sampler as randc
############################################################################
# RANDOM COUNTERS
############################################################################
bins = 17
rc = randc.Sampler(np.ones(bins))
rc.update(.3, 6)
rc.update(.3, 7)
rc.update(.5, 1)
rc.update(.1, bins - 1)
sampling = []
for i in range(10000):
sampling.append(rc.sample())
print(rc.score_tree)
plt.hist(sampling, bins=bins)
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.