gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
02/13/2013 copied from goto6mcpV4-5
10/25/2012 by EM
move telescope to desAlt using MCP
11/18/2012 - changed timeout from 40 sec to 80 sec
01/04/2013 - changed time rate from 1 sec to 0.4 sec; output every second output;
review of pos and velocity faster by reorganization of check block;
predicted new position by (velocity)*(time interval) and stop if out of range.
01/08/2013 - call say from subprocess but not from os; calculate predicted value
of alt and stop if the next linear destination go below it; output predicted alt;
change log style
01/09/2013 - add +-0.5 degrees behind destination, make room to finish naturally
01/23/2013 1) removed tcc 'axis init' from the end;
2) changed "mcp alt move 6" to
"mcp alt goto_pos_va %s 300000 20000" % (self.altDes*3600/0.01400002855);
02/12/2013 changed condition to stop: stop if go down and
elif pos<(self.altDes+(self.alt-self.altDes)*0.2) and abs(vel)>=abs(velold):
05/17/2013 EM check host name and rise an error if not telescope laptop
06/17/2014 EM changed getBit(self, key, name, val) function for new stui 1.4
08/19/2014 EM changed low limit from 6 to 5 degrees after summer shakedown
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import RO.Wdg
import Tkinter
import TUI.Models
import os
import time
import subprocess
import socket
class ScriptClass(object):
def __init__(self, sr):
sr.debug = False # if False, real time run
# sr.debug = True # if True, run in debug-only mode
self.name="goto5mcp "
self.sr = sr
sr.master.winfo_toplevel().wm_resizable(True, True)
F1 = Tkinter.Frame(sr.master)
gr1a = RO.Wdg.Gridder(F1)
self.lowLimit=5
self.altWdg = RO.Wdg.IntEntry(master =F1, defValue = 30,
minValue = self.lowLimit, maxValue = 90, helpText = "Destination altitude ",)
gr1a.gridWdg("Destination altitude: ", self.altWdg,)
F1.grid(row=0, column=0, sticky="w")
self.logWdg = RO.Wdg.LogWdg(master=sr.master, width=35, height =20,)
self.logWdg.grid(row=1, column=0, sticky="news")
sr.master.rowconfigure(1, weight=1)
sr.master.columnconfigure(0, weight=1)
self.tccModel = TUI.Models.getModel("tcc")
self.mcpModel = TUI.Models.getModel("mcp")
self.ab_I6_L0=['alt_mtr_up_perm_in', 'alt_mtr_dn_perm_in', 'alt_mtr1_perm_in', 'alt_mtr2_perm_in', 'wind_alt_mtr_perm_in', 'alt_plc_perm_in', 'wind_alt_plc_perm_in', 'az_stow_3a', 'az_mtr_cw_perm_in', 'az_mtr_ccw_perm_in', 'az_mtr1_perm_in', 'az_mtr2_perm_in', 'wind_az_mtr_perm_in', 'az_plc_perm_in', 'wind_az_plc_perm_in', 'az_stow_3b', 'tcc_stop', 'cr_stop', 'spare_s1_c2', 'fiber_signal_loss', 'n_wind_stop', 'n_fork_stop', 'n_rail_stop', 's_rail_stop', 'w_rail_stop', 'n_lower_stop', 's_lower_stop', 'e_lower_stop', 'w_lower_stop', 's_wind_stop', 'nw_fork_stop', 'mcp_watchdog_timer', ]
self.ab_I7_L0=['alt_grt_83_limit_1', 'bldg_clear_alt', 'az_stow_2a', 'az_stow_2b', 'deg_15_stop_ext', 'alt_grt_18d6_limit_2', 'alt_slip', 'alt_velocity_limit', 'az_dir_cw', 'az_dir_ccw', 'az_neg_201a_cw', 'az_pos_445a_ccw', 'az_neg_201b_cw', 'az_pos_445b_ccw', 'spare_s8_c6', 'spare_s8_c7', 'rot_mtr_cw_perm_in', 'rot_mtr_ccw_perm_in', 'rot_mtr_perm_in', 'spare_s5_c3', 'bldg_perm_in', 'rot_plc_perm_in', 'hatch_cls', 'alt_les_2d5_limit', 'alt_grt_0d3_limit', 'alt_locking_pin_out', 'alt_les_90d5_limit', 'bldg_on_alt', 'az_109_131_limit_1', 'alt_grt_18d6_limit_1', 'az_stow_1a', 'az_stow_1b', ]
self.ab_I9_L0=['s1_c0_bypass_sw', 's1_c1_bypass_sw', 's1_c2_bypass_sw', 's1_c3_bypass_sw', 's1_c4_bypass_sw', 's1_c5_bypass_sw', 's1_c6_bypass_sw', 's1_c7_bypass_sw', 's2_c0_bypass_sw', 's2_c1_bypass_sw', 's2_c2_bypass_sw', 's2_c3_bypass_sw', 's2_c4_bypass_sw', 's2_c5_bypass_sw', 's2_c6_bypass_sw', 's2_c7_mcp_wtchdg_byp', 't_bar_tel_stat', 'clamp_en_stat', 'clamp_dis_stat', 'az_brake_en_stat', 'az_brake_dis_stat', 'alt_brake_en_stat', 'alt_brake_dis_stat', 'low_lvl_lighting_req', 'solenoid_engage_sw', 'alt_locking_pin_in', 'in_9_bit_10_spare', 'in_9_bit_11_spare', 'in_9_bit_12_spare', 'in_9_bit_13_spare', 'in_9_bit_14_spare', 'in_9_bit_15_spare',]
self.MaxPosErr = 0.01 # alt maximum position error (deg)
self.azErr=0.2
self.timeInt= 0.4*1000 # 0.4 * 1000 sec
self.TimeLimit = 80 # time limit for move to final altitude (80 sec)
self.owMcp="(telnet):-1:"
def getTAITimeStr(self,):
''' get TAI time for time-stamps'''
return time.strftime("%H:%M:%S",
time.gmtime(time.time() - RO.Astro.Tm.getUTCMinusTAI()))
def prnMsg (self, ss):
''' output time-stamp and message'''
self.logWdg.addMsg(ss)
def semOwner(self,):
''' get semaphoreOwner from mcp'''
sr=self.sr
ow = sr.getKeyVar(self.mcpModel.semaphoreOwner, ind=0, defVal=None)
# ow= self.mcpModel.semaphoreOwner[0]
return ow
def ifBrakesOn(self,):
''' check if alt brakes on ? '''
alt_brake=self.getBit(self.ab_I9_L0,"alt_brake_en_stat",self.mcpModel.ab_I9_L0[0])
return alt_brake
def ifAzStow1(self,):
''' check if az position in stow 121 ? '''
az_stow1a=self.getBit(self.ab_I7_L0,"az_stow_1a",self.mcpModel.ab_I7_L0[0])
az_stow1b=self.getBit(self.ab_I7_L0,"az_stow_1b",self.mcpModel.ab_I7_L0[0])
return (az_stow1a and az_stow1b)
def getBit1(self, key, name, val):
''' get plc bit, my old version, do not use now '''
ind=key.index(name)
mask=hex( int("1"+"0"*ind,2) )
if val & int(mask,16) !=0:
rr=1
else:
rr=0
return rr
def getBit(self, key, name, val):
''' get plc bit, new version suggested by RO'''
ind=key.index(name)
mask = 1 << ind
if val & mask !=0:
rr=1
else:
rr=0
return rr
def run(self, sr, sel=0):
''' main program to goto5 '''
# check settings
# is telescope laptop?
host=socket.gethostname()
if not ('25m-macbook' in host):
self.prnMsg("goto5mcp should run on telescope laptop only")
raise sr.ScriptError("not right computer")
tm= self.getTAITimeStr()
self.altDes=self.altWdg.getNum() # destination altDes from self.altWdg
self.prnMsg("%s Start the move to %s " % (tm, self.altDes))
# is alt brakes?
if self.ifBrakesOn():
mes="clear altitude brake and run again"
os.popen('say %s ' % (mes) ) # say mes
raise sr.ScriptError(mes)
# my debug set: True - run, False - skip the command
self.run1=True # tcc axis stop
self.run2=True # mcp sem_take
self.run3=True # mcp alt move
self.run5=True # mcp alt brake.on
self.run6=True # mcp sem_give
# sem owners
owTcc="TCC:0:0"
owNone="None"
owMcpGui="[email protected]"
ow=self.semOwner()
if owMcpGui in ow:
raise sr.ScriptError(" please release MCP GUI semaphore and run again")
if not ( (ow==owTcc) or (ow==owNone) or (self.owMcp in ow) ):
raise sr.ScriptError("unknown semaphore owner, exit")
self.prnMsg("semaphoreOwner = %s" % ow)
# check axis status
yield sr.waitCmd(actor="tcc", cmdStr="axis status",
keyVars=[self.tccModel.altStat, self.tccModel.azStat, self.tccModel.rotStat,
self.tccModel.axePos],)
# self.az, self.alt, self.rot = self.tccModel.axePos[0:3]
self.alt=sr.value.getLastKeyVarData(self.tccModel.altStat)[0]
self.az =sr.value.getLastKeyVarData(self.tccModel.azStat)[0]
self.rot=sr.value.getLastKeyVarData(self.tccModel.rotStat)[0]
self.prnMsg("az=%6.2f, alt=%5.2f, rot=%6.2f" % (self.az, self.alt,self.rot))
if (self.az is None) or (self.alt ==None) or (self.rot is None):
raise sr.ScriptError("some of axis are not availble, exit")
if self.ifAzStow1() != 1:
raise sr.ScriptError("plc: az is not at stow, exit")
if abs(self.az - 121.0 ) >= self.azErr:
raise sr.ScriptError("tcc: az is not 121, exit")
# get the direction of the move, direct=Up,Down, or None
# from init section - self.MaxPosErr = 0.01 # alt maximum position error (deg)
if abs(self.alt - self.altDes) < self.MaxPosErr:
self.prnMsg(" alt == altDes, exit")
os.popen('say %s ' % "telescope at destination, exit") # say mes
direct="None"
#return
raise sr.ScriptError()
elif self.altDes > self.alt:
direct="Up"
elif self.altDes < self.alt:
direct="Down"
else:
raise sr.ScriptError("where to go? exit")
self.prnMsg("alt=%s --> altDes=%s, %s" % (self.alt, self.altDes, direct))
os.popen('say %s ' % ("goto " + str(self.altDes)) ) # say mes
# Action section
# it owner == TCC, "tcc axis stop"
if self.semOwner()==owTcc:
act="tcc"; cmd="axis stop";
self.prnMsg("%s %s .." % (act, cmd))
if self.run1:
yield sr.waitCmd(actor=act, cmdStr=cmd)
yield sr.waitMS(500)
it=0
while self.semOwner() != owNone:
yield sr.waitMS(500)
it=it+1
if it > 10:
raise sr.ScriptError("tcc axis stop - failed, exit")
if self.semOwner() != owNone:
self.prnMsg("%s %s .." % (self.semOwner(), owNone))
raise sr.ScriptError("tcc axis stop - failed, exit")
# it owner is None, "mcp sem_take"
if self.semOwner()=="None":
act="mcp"; cmd="sem_take";
self.prnMsg("%s %s .." % (act, cmd))
if self.run2:
yield sr.waitCmd(actor=act, cmdStr=cmd, keyVars=[self.mcpModel.semaphoreOwner],)
it=0
while not (self.owMcp in self.semOwner()):
yield sr.waitMS(500)
it=it+1
if it > 10:
raise sr.ScriptError("mcp sem_take - failed, exit")
# check, is semOwner in owMcp="(telnet):-1:" ?
ow= self.semOwner()
if not (self.owMcp in ow):
raise sr.ScriptError("mcp did not get semaphore - failed")
# move "mcp alt move %s" % (altDes)
dtold=0; velold=0;
startTime = time.time()
act="mcp";
# cmd="alt move %s" % (self.altDes);
cmd="alt goto_pos_va %s 300000 20000" % (self.altDes*3600./0.01400002855)
self.prnMsg("%s %s .." % (act, cmd))
if self.run3:
yield sr.waitCmd(actor=act, cmdStr=cmd)
# watch for moving progress
i=0
while True:
yield sr.waitMS(self.timeInt)
yield sr.waitCmd(actor="tcc", cmdStr="axis status", \
keyVars=[self.tccModel.altStat, self.tccModel.axePos],)
# pos = self.tccModel.axePos[1]
# pos, vel = self.tccModel.altStat[0:2]
pos, vel = sr.value.getLastKeyVarData(self.tccModel.altStat)[0:2]
dt=time.time() - startTime
nextAlt=pos+vel*(dt-dtold)
ssPos="%s, %5.2f sec, alt =%5.2f --> %5.2f, vel=%5.2f" % (i, dt, pos, nextAlt, vel)
if i%2==0:
ssPos1="alt =%5.2f vel=%5.2f" % (pos, vel)
self.prnMsg(ssPos1)
subprocess.Popen(['say',str(int(round(pos)))])
else:
tm= self.getTAITimeStr()
mes="" # request to break
if abs(pos - self.altDes) < self.MaxPosErr:
mes="moved to destination, brake"
self.prnMsg(mes)
break
if direct=="Down":
if nextAlt < (self.altDes-0.5):
self.prnMsg(ssPos)
mes="next move too low - brake"
self.prnMsg(mes)
break
elif pos < self.altDes:
self.prnMsg(ssPos)
mes="moved too low - brake"
self.prnMsg(mes)
break
elif pos < self.lowLimit:
self.prnMsg(ssPos)
mes="alt below %s - brake" % self.lowLimit
self.prnMsg(mes)
break
elif pos<(self.altDes+(self.alt-self.altDes)*0.2) and abs(vel)>=abs(velold):
self.prnMsg(ssPos)
mes="move did not decelerate, brake"
self.prnMsg(mes)
break
if self.ifBrakesOn():
mes="alt brake detected, stop"
self.prnMsg(mes); break
if direct == "Up":
if nextAlt > (self.altDes+0.5):
self.prnMsg(ssPos)
mes="next move too high - brake"
self.prnMsg(mes)
break
elif pos > self.altDes:
self.prnMsg(ssPos)
mes="moved too high - brake"
self.prnMsg(mes)
break
if dt > self.TimeLimit:
mes="timeout, brake"
self.prnMsg(mes)
break
i = i+1
dtold = dt
velold = vel
# if semOwn=mcp but alt brake.off, call alt brake.on
if (self.owMcp in self.semOwner()) and not self.ifBrakesOn():
act="mcp"
cmd="alt brake.on"
self.prnMsg("%s %s .." % (act, cmd))
if self.run5:
yield sr.waitCmd(actor=act, cmdStr=cmd, checkFail=False)
os.popen('say %s ' % (mes) ) # say mes
# if semOwn = mcp, release sem to None
if self.owMcp in self.semOwner():
act="mcp"
cmd="sem_give"
self.prnMsg("%s %s .." % (act, cmd))
if self.run6:
yield sr.waitCmd(actor=act, cmdStr=cmd, checkFail=False)
yield sr.waitCmd(actor="tcc", cmdStr="axis status", keyVars=[self.tccModel.altStat])
pos, vel = sr.value.getLastKeyVarData(self.tccModel.altStat)[0:2]
self.prnMsg("final alt = %s, velocity = %s .." % (pos, vel))
yield sr.waitCmd(actor="tcc", cmdStr="axis stop")
def end(self, sr):
"""Clean up"""
if self.owMcp in self.semOwner():
act="mcp"; cmd="sem_give";
sr.startCmd(actor=act, cmdStr=cmd, checkFail=False)
self.logWdg.addMsg("="*20)
|
|
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class SequenceFeatureExtractionTestMixin(FeatureExtractionSavingTestMixin):
# to overwrite at feature extractactor specific tests
feat_extract_tester = None
feature_extraction_class = None
@property
def feat_extract_dict(self):
return self.feat_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_common_properties(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feat_extract, "feature_size"))
self.assertTrue(hasattr(feat_extract, "sampling_rate"))
self.assertTrue(hasattr(feat_extract, "padding_value"))
def test_batch_feature(self):
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name])))
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True)
processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np")
batch_features_input = processed_features[input_name]
if len(batch_features_input.shape) < 3:
batch_features_input = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)
)
@require_torch
def test_batch_feature_pt(self):
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True)
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt")
batch_features_input = processed_features[input_name]
if len(batch_features_input.shape) < 3:
batch_features_input = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)
)
@require_tf
def test_batch_feature_tf(self):
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(equal_length=True)
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="tf")
batch_features_input = processed_features[input_name]
if len(batch_features_input.shape) < 3:
batch_features_input = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)
)
def _check_padding(self, numpify=False):
def _inputs_have_equal_length(input):
length = len(input[0])
for input_slice in input[1:]:
if len(input_slice) != length:
return False
return True
def _inputs_are_equal(input_1, input_2):
if len(input_1) != len(input_2):
return False
for input_slice_1, input_slice_2 in zip(input_1, input_2):
if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3):
return False
return True
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
pad_diff = self.feat_extract_tester.seq_length_diff
pad_max_length = self.feat_extract_tester.max_seq_length + pad_diff
pad_min_length = self.feat_extract_tester.min_seq_length
batch_size = self.feat_extract_tester.batch_size
feature_size = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
input_1 = feat_extract.pad(processed_features, padding=False)
input_1 = input_1[input_name]
input_2 = feat_extract.pad(processed_features, padding="longest")
input_2 = input_2[input_name]
input_3 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[-1]))
input_3 = input_3[input_name]
input_4 = feat_extract.pad(processed_features, padding="longest", return_tensors="np")
input_4 = input_4[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, padding="max_length")[input_name]
input_5 = feat_extract.pad(
processed_features, padding="max_length", max_length=pad_max_length, return_tensors="np"
)
input_5 = input_5[input_name]
self.assertFalse(_inputs_have_equal_length(input_1))
self.assertTrue(_inputs_have_equal_length(input_2))
self.assertTrue(_inputs_have_equal_length(input_3))
self.assertTrue(_inputs_are_equal(input_2, input_3))
self.assertTrue(len(input_1[0]) == pad_min_length)
self.assertTrue(len(input_1[1]) == pad_min_length + pad_diff)
self.assertTrue(input_4.shape[:2] == (batch_size, len(input_3[0])))
self.assertTrue(input_5.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_4.shape[2] == input_5.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
input_6 = feat_extract.pad(processed_features, pad_to_multiple_of=10)
input_6 = input_6[input_name]
input_7 = feat_extract.pad(processed_features, padding="longest", pad_to_multiple_of=10)
input_7 = input_7[input_name]
input_8 = feat_extract.pad(
processed_features, padding="max_length", pad_to_multiple_of=10, max_length=pad_max_length
)
input_8 = input_8[input_name]
input_9 = feat_extract.pad(
processed_features,
padding="max_length",
pad_to_multiple_of=10,
max_length=pad_max_length,
return_tensors="np",
)
input_9 = input_9[input_name]
self.assertTrue(all(len(x) % 10 == 0 for x in input_6))
self.assertTrue(_inputs_are_equal(input_6, input_7))
expected_mult_pad_length = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(x) == expected_mult_pad_length for x in input_8))
self.assertTrue(input_9.shape[:2], (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_9.shape[2] == feature_size)
# Check padding value is correct
padding_vector_sum = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_2[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3
)
self.assertTrue(
abs(
np.asarray(input_2[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff)
)
< 1e-3
)
self.assertTrue(
abs(
np.asarray(input_2[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff)
)
< 1e-3
)
self.assertTrue(
abs(input_5[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3
)
self.assertTrue(
abs(input_9[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3
)
def _check_truncation(self, numpify=False):
def _inputs_have_equal_length(input):
length = len(input[0])
for input_slice in input[1:]:
if len(input_slice) != length:
return False
return True
def _inputs_are_equal(input_1, input_2):
if len(input_1) != len(input_2):
return False
for input_slice_1, input_slice_2 in zip(input_1, input_2):
if not np.allclose(np.asarray(input_slice_1), np.asarray(input_slice_2), atol=1e-3):
return False
return True
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common(numpify=numpify)
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
# truncate to smallest
input_1 = feat_extract.pad(
processed_features, padding="max_length", max_length=len(speech_inputs[0]), truncation=True
)
input_1 = input_1[input_name]
input_2 = feat_extract.pad(processed_features, padding="max_length", max_length=len(speech_inputs[0]))
input_2 = input_2[input_name]
self.assertTrue(_inputs_have_equal_length(input_1))
self.assertFalse(_inputs_have_equal_length(input_2))
# truncate to smallest with np
input_3 = feat_extract.pad(
processed_features,
padding="max_length",
max_length=len(speech_inputs[0]),
return_tensors="np",
truncation=True,
)
input_3 = input_3[input_name]
input_4 = feat_extract.pad(
processed_features, padding="max_length", max_length=len(speech_inputs[0]), return_tensors="np"
)
input_4 = input_4[input_name]
self.assertTrue(_inputs_have_equal_length(input_3))
self.assertTrue(input_3.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(input_4))
# truncate to middle
input_5 = feat_extract.pad(
processed_features,
padding="max_length",
max_length=len(speech_inputs[1]),
truncation=True,
return_tensors="np",
)
input_5 = input_5[input_name]
input_6 = feat_extract.pad(
processed_features, padding="max_length", max_length=len(speech_inputs[1]), truncation=True
)
input_6 = input_6[input_name]
input_7 = feat_extract.pad(
processed_features, padding="max_length", max_length=len(speech_inputs[1]), return_tensors="np"
)
input_7 = input_7[input_name]
self.assertTrue(input_5.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(input_5))
self.assertTrue(_inputs_have_equal_length(input_6))
self.assertTrue(_inputs_are_equal(input_5, input_6))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(input_7))
self.assertTrue(len(input_7[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, truncation=True)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, padding="longest", truncation=True)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(ValueError):
feat_extract.pad(processed_features, padding="max_length", truncation=True)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
pad_to_multiple_of = 12
input_8 = feat_extract.pad(
processed_features,
padding="max_length",
max_length=len(speech_inputs[0]),
pad_to_multiple_of=pad_to_multiple_of,
truncation=True,
)
input_8 = input_8[input_name]
input_9 = feat_extract.pad(
processed_features,
padding="max_length",
max_length=len(speech_inputs[0]),
pad_to_multiple_of=pad_to_multiple_of,
)
input_9 = input_9[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
expected_length = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
expected_length = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_8[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(input_8))
self.assertFalse(_inputs_have_equal_length(input_9))
def test_padding_from_list(self):
self._check_padding(numpify=False)
def test_padding_from_array(self):
self._check_padding(numpify=True)
def test_truncation_from_list(self):
self._check_truncation(numpify=False)
def test_truncation_from_array(self):
self._check_truncation(numpify=True)
@require_torch
def test_padding_accepts_tensors_pt(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name]
input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().astype(np.float32).sum()) < 1e-2)
@require_tf
def test_padding_accepts_tensors_tf(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_name = feat_extract.model_input_names[0]
processed_features = BatchFeature({input_name: speech_inputs})
input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name]
input_tf = feat_extract.pad(processed_features, padding="longest", return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.float32).sum() - input_tf.numpy().astype(np.float32).sum()) < 1e-2)
def test_attention_mask(self):
feat_dict = self.feat_extract_dict
feat_dict["return_attention_mask"] = True
feat_extract = self.feature_extraction_class(**feat_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_lenghts = [len(x) for x in speech_inputs]
input_name = feat_extract.model_input_names[0]
processed = BatchFeature({input_name: speech_inputs})
processed = feat_extract.pad(processed, padding="longest", return_tensors="np")
self.assertIn("attention_mask", processed)
self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lenghts)
def test_attention_mask_with_truncation(self):
feat_dict = self.feat_extract_dict
feat_dict["return_attention_mask"] = True
feat_extract = self.feature_extraction_class(**feat_dict)
speech_inputs = self.feat_extract_tester.prepare_inputs_for_common()
input_lenghts = [len(x) for x in speech_inputs]
input_name = feat_extract.model_input_names[0]
processed = BatchFeature({input_name: speech_inputs})
max_length = min(input_lenghts)
processed_pad = feat_extract.pad(
processed, padding="max_length", max_length=max_length, truncation=True, return_tensors="np"
)
self.assertIn("attention_mask", processed_pad)
self.assertListEqual(
list(processed_pad.attention_mask.shape), list((processed_pad[input_name].shape[0], max_length))
)
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist(), [max_length for x in speech_inputs]
)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Volume Code.
"""
import cStringIO
import mox
import shutil
import tempfile
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
from nova.openstack.common import rpc
import nova.policy
from nova import quota
from nova import test
import nova.volume.api
QUOTAS = quota.QUOTAS
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class VolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
super(VolumeTestCase, self).setUp()
self.compute = importutils.import_object(FLAGS.compute_manager)
vol_tmpdir = tempfile.mkdtemp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volumes_dir=vol_tmpdir)
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
['nova.openstack.common.notifier.test_notifier'])
self.volume = importutils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
instance = db.instance_create(self.context, {})
self.instance_id = instance['id']
self.instance_uuid = instance['uuid']
test_notifier.NOTIFICATIONS = []
def tearDown(self):
try:
shutil.rmtree(FLAGS.volumes_dir)
except OSError, e:
pass
db.instance_destroy(self.context, self.instance_uuid)
notifier_api._reset_drivers()
super(VolumeTestCase, self).tearDown()
@staticmethod
def _create_volume(size=0, snapshot_id=None):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['snapshot_id'] = snapshot_id
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(context.get_admin_context(), vol)
def test_ec2_uuid_mapping(self):
ec2_vol = db.ec2_volume_create(context.get_admin_context(),
'aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaaaaa', 5)
self.assertEqual(5, ec2_vol['id'])
self.assertEqual('aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaaaaa',
db.get_volume_uuid_by_ec2_id(context.get_admin_context(), 5))
ec2_vol = db.ec2_volume_create(context.get_admin_context(),
'aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaaaaa', 1)
self.assertEqual(1, ec2_vol['id'])
ec2_vol = db.ec2_volume_create(context.get_admin_context(),
'aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaazzz')
self.assertEqual(6, ec2_vol['id'])
def test_create_delete_volume(self):
"""Test volume can be created and deleted."""
# Need to stub out reserve, commit, and rollback
def fake_reserve(context, expire=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations):
pass
def fake_rollback(context, reservations):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume = self._create_volume()
volume_id = volume['id']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
self.volume.create_volume(self.context, volume_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
volume_id).id)
self.volume.delete_volume(self.context, volume_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 4)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def _do_test_create_over_quota(self, resource, expected):
"""Test volume creation over quota."""
def fake_reserve(context, **deltas):
kwargs = dict(overs=[resource],
quotas=dict(gigabytes=1000, volumes=10),
usages=dict(gigabytes=dict(reserved=1, in_use=999),
volumes=dict(reserved=1, in_use=9)))
raise exception.OverQuota(**kwargs)
def fake_commit(context, reservations):
self.fail('should not commit over quota')
self.stubs.Set(QUOTAS, 'reserve', fake_reserve)
self.stubs.Set(QUOTAS, 'commit', fake_commit)
volume_api = nova.volume.api.API()
self.assertRaises(expected,
volume_api.create,
self.context,
2,
'name',
'description')
def test_create_volumes_over_quota(self):
self._do_test_create_over_quota('volumes',
exception.VolumeLimitExceeded)
def test_create_gigabytes_over_quota(self):
self._do_test_create_over_quota('gigabytes',
exception.VolumeSizeTooLarge)
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
self.volume.driver.delete_volume(mox.IgnoreArg()).AndRaise(
exception.VolumeIsBusy)
self.mox.ReplayAll()
res = self.volume.delete_volume(self.context, volume_id)
self.assertEqual(True, res)
volume_ref = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume_id, volume_ref.id)
self.assertEqual("available", volume_ref.status)
self.mox.UnsetStubs()
self.volume.delete_volume(self.context, volume_id)
def test_create_volume_from_snapshot(self):
"""Test volume can be created from a snapshot."""
volume_src = self._create_volume()
self.volume.create_volume(self.context, volume_src['id'])
snapshot_id = self._create_snapshot(volume_src['id'])
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_id)
volume_dst = self._create_volume(0, snapshot_id)
self.volume.create_volume(self.context, volume_dst['id'], snapshot_id)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id, db.volume_get(
context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
def test_too_big_volume(self):
"""Ensure failure if a too large of a volume is requested."""
# FIXME(vish): validation needs to move into the data layer in
# volume_create
return True
try:
volume = self._create_volume('1001')
self.volume.create_volume(self.context, volume)
self.fail("Should have thrown TypeError")
except TypeError:
pass
def test_too_many_volumes(self):
"""Ensure that NoMoreTargets is raised when we run out of volumes."""
vols = []
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
vols.append(volume['id'])
volume = self._create_volume()
self.assertRaises(db.NoMoreTargets,
self.volume.create_volume,
self.context,
volume['id'])
db.volume_destroy(context.get_admin_context(), volume['id'])
for volume_id in vols:
self.volume.delete_volume(self.context, volume_id)
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
inst = {}
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = '2' # m1.tiny
inst['ami_launch_index'] = 0
instance = db.instance_create(self.context, {})
instance_id = instance['id']
instance_uuid = instance['uuid']
mountpoint = "/dev/sdf"
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
if FLAGS.fake_tests:
db.volume_attached(self.context, volume_id, instance_uuid,
mountpoint)
else:
self.compute.attach_volume(self.context,
instance_uuid,
volume_id,
mountpoint)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertRaises(exception.NovaException,
self.volume.delete_volume,
self.context,
volume_id)
if FLAGS.fake_tests:
db.volume_detached(self.context, volume_id)
else:
self.compute.detach_volume(self.context,
instance_uuid,
volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
db.instance_destroy(self.context, instance_uuid)
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
targets = []
def _check(volume_id):
"""Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
LOG.debug(_("Target %s allocated"), iscsi_target)
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume = self._create_volume()
d = self.volume.create_volume(self.context, volume['id'])
_check(d)
for volume_id in volume_ids:
self.volume.delete_volume(self.context, volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
@staticmethod
def _create_snapshot(volume_id, size='0'):
"""Create a snapshot object."""
snap = {}
snap['volume_size'] = size
snap['user_id'] = 'fake'
snap['project_id'] = 'fake'
snap['volume_id'] = volume_id
snap['status'] = "creating"
return db.snapshot_create(context.get_admin_context(), snap)['id']
def test_create_delete_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
self.assertEqual(snapshot_id,
db.snapshot_get(context.get_admin_context(),
snapshot_id).id)
self.volume.delete_snapshot(self.context, snapshot_id)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_cant_delete_volume_with_snapshots(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
self.assertEqual(snapshot_id,
db.snapshot_get(context.get_admin_context(),
snapshot_id).id)
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = nova.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_can_delete_errored_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
snapshot = db.snapshot_get(context.get_admin_context(),
snapshot_id)
volume_api = nova.volume.api.API()
snapshot['status'] = 'badstatus'
self.assertRaises(exception.InvalidVolume,
volume_api.delete_snapshot,
self.context,
snapshot)
snapshot['status'] = 'error'
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
def fake_cast(ctxt, topic, msg):
pass
self.stubs.Set(rpc, 'cast', fake_cast)
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
db.volume_attached(self.context, volume['id'], self.instance_uuid,
'/dev/sda1')
volume_api = nova.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
db.snapshot_destroy(self.context, snapshot_ref['id'])
db.volume_destroy(self.context, volume['id'])
def test_delete_busy_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
snapshot_id = self._create_snapshot(volume_id)
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(mox.IgnoreArg()).AndRaise(
exception.SnapshotIsBusy)
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot_id)
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.mox.UnsetStubs()
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_id)
def test_create_volume_usage_notification(self):
"""Ensure create volume generates appropriate usage notification"""
volume = self._create_volume()
volume_id = volume['id']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
self.volume.create_volume(self.context, volume_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['event_type'], 'volume.create.start')
payload = msg['payload']
self.assertEquals(payload['status'], 'creating')
msg = test_notifier.NOTIFICATIONS[1]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'volume.create.end')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], volume['project_id'])
self.assertEquals(payload['user_id'], volume['user_id'])
self.assertEquals(payload['volume_id'], volume['id'])
self.assertEquals(payload['status'], 'available')
self.assertEquals(payload['size'], volume['size'])
self.assertTrue('display_name' in payload)
self.assertTrue('snapshot_id' in payload)
self.assertTrue('launched_at' in payload)
self.assertTrue('created_at' in payload)
self.volume.delete_volume(self.context, volume_id)
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
driver_name = "nova.volume.driver.FakeBaseDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volume_driver=self.driver_name,
volumes_dir=vol_tmpdir,
logging_default_format_string="%(message)s")
self.volume = importutils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
self.output = ""
def _fake_execute(_command, *_args, **_kwargs):
"""Fake _execute."""
return self.output, None
self.volume.driver.set_execute(_fake_execute)
log = logging.getLogger('nova')
self.stream = cStringIO.StringIO()
log.logger.addHandler(logging.logging.StreamHandler(self.stream))
inst = {}
instance = db.instance_create(self.context, {})
self.instance_id = instance['id']
self.instance_uuid = instance['uuid']
def tearDown(self):
try:
shutil.rmtree(FLAGS.volumes_dir)
except OSError, e:
pass
super(DriverTestCase, self).tearDown()
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
return []
def _detach_volume(self, volume_id_list):
"""Detach volumes from an instance."""
for volume_id in volume_id_list:
db.volume_detached(self.context, volume_id)
self.volume.delete_volume(self.context, volume_id)
class VolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver"""
driver_name = "nova.volume.driver.VolumeDriver"
def test_delete_busy_volume(self):
"""Test deleting a busy volume."""
self.stubs.Set(self.volume.driver, '_volume_not_present',
lambda x: False)
self.stubs.Set(self.volume.driver, '_delete_volume',
lambda x, y: False)
# Want DriverTestCase._fake_execute to return 'o' so that
# volume.driver.delete_volume() raises the VolumeIsBusy exception.
self.output = 'o'
self.assertRaises(exception.VolumeIsBusy,
self.volume.driver.delete_volume,
{'name': 'test1', 'size': 1024})
# when DriverTestCase._fake_execute returns something other than
# 'o' volume.driver.delete_volume() does not raise an exception.
self.output = 'x'
self.volume.driver.delete_volume({'name': 'test1', 'size': 1024})
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
driver_name = "nova.volume.driver.ISCSIDriver"
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
volume_id_list = []
for index in xrange(3):
vol = {}
vol['size'] = 0
vol_ref = db.volume_create(self.context, vol)
self.volume.create_volume(self.context, vol_ref['id'])
vol_ref = db.volume_get(self.context, vol_ref['id'])
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
db.volume_attached(self.context, vol_ref['id'], self.instance_uuid,
mountpoint)
volume_id_list.append(vol_ref['id'])
return volume_id_list
def test_check_for_export_with_no_volume(self):
"""No log message when no volume is attached to an instance."""
self.stream.truncate(0)
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_volume_exported(self):
"""No log message when all the processes are running."""
volume_id_list = self._attach_volume()
self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target')
for i in volume_id_list:
tid = db.volume_get_iscsi_target_num(self.context, i)
self.volume.driver.tgtadm.show_target(tid)
self.stream.truncate(0)
self.mox.ReplayAll()
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
def test_check_for_export_with_some_volume_missing(self):
"""Output a warning message when some volumes are not recognied
by ietd."""
volume_id_list = self._attach_volume()
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target')
self.volume.driver.tgtadm.show_target(tid).AndRaise(
exception.ProcessExecutionError())
self.mox.ReplayAll()
self.assertRaises(exception.ProcessExecutionError,
self.volume.check_for_export,
self.context,
self.instance_id)
msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
self.assertTrue(0 <= self.stream.getvalue().find(msg))
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
class VolumePolicyTestCase(test.TestCase):
def setUp(self):
super(VolumePolicyTestCase, self).setUp()
nova.policy.reset()
nova.policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(VolumePolicyTestCase, self).tearDown()
nova.policy.reset()
def _set_rules(self, rules):
nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
def test_check_policy(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
nova.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
nova.volume.api.check_policy(self.context, 'attach')
def test_check_policy_with_target(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'id': 2,
}
nova.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
nova.volume.api.check_policy(self.context, 'attach', {'id': 2})
|
|
## dispatcher.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: dispatcher.py,v 1.42 2007/05/18 23:18:36 normanr Exp $
"""
Main xmpppy mechanism. Provides library with methods to assign different handlers
to different XMPP stanzas.
Contains one tunable attribute: DefaultTimeout (25 seconds by default). It defines time that
Dispatcher.SendAndWaitForResponce method will wait for reply stanza before giving up.
"""
import simplexml,time,sys,re
from protocol import *
from client import PlugIn
from xml.parsers.expat import ExpatError
DefaultTimeout=25
ID=0
class Dispatcher(PlugIn):
""" Ancestor of PlugIn class. Handles XMPP stream, i.e. aware of stream headers.
Can be plugged out/in to restart these headers (used for SASL f.e.). """
def __init__(self):
PlugIn.__init__(self)
DBG_LINE='dispatcher'
self.handlers={}
self._expected={}
self._defaultHandler=None
self._pendingExceptions=[]
self._eventHandler=None
self._cycleHandlers=[]
self._exported_methods=[self.Process,self.RegisterHandler,self.RegisterDefaultHandler,\
self.RegisterEventHandler,self.UnregisterCycleHandler,self.RegisterCycleHandler,\
self.RegisterHandlerOnce,self.UnregisterHandler,self.RegisterProtocol,\
self.WaitForResponse,self.SendAndWaitForResponse,self.send,self.disconnect,\
self.SendAndCallForResponse, ]
def dumpHandlers(self):
""" Return set of user-registered callbacks in it's internal format.
Used within the library to carry user handlers set over Dispatcher replugins. """
return self.handlers
def restoreHandlers(self,handlers):
""" Restores user-registered callbacks structure from dump previously obtained via dumpHandlers.
Used within the library to carry user handlers set over Dispatcher replugins. """
self.handlers=handlers
def _init(self):
""" Registers default namespaces/protocols/handlers. Used internally. """
self.RegisterNamespace('unknown')
self.RegisterNamespace(NS_STREAMS)
self.RegisterNamespace(self._owner.defaultNamespace)
self.RegisterProtocol('iq',Iq)
self.RegisterProtocol('presence',Presence)
self.RegisterProtocol('message',Message)
self.RegisterDefaultHandler(self.returnStanzaHandler)
self.RegisterHandler('error',self.streamErrorHandler,xmlns=NS_STREAMS)
def plugin(self, owner):
""" Plug the Dispatcher instance into Client class instance and send initial stream header. Used internally."""
self._init()
for method in self._old_owners_methods:
if method.__name__=='send': self._owner_send=method; break
self._owner.lastErrNode=None
self._owner.lastErr=None
self._owner.lastErrCode=None
self.StreamInit()
def plugout(self):
""" Prepares instance to be destructed. """
self.Stream.dispatch=None
self.Stream.DEBUG=None
self.Stream.features=None
self.Stream.destroy()
def StreamInit(self):
""" Send an initial stream header. """
self.Stream=simplexml.NodeBuilder()
self.Stream._dispatch_depth=2
self.Stream.dispatch=self.dispatch
self.Stream.stream_header_received=self._check_stream_start
self._owner.debug_flags.append(simplexml.DBG_NODEBUILDER)
self.Stream.DEBUG=self._owner.DEBUG
self.Stream.features=None
self._metastream=Node('stream:stream')
self._metastream.setNamespace(self._owner.Namespace)
self._metastream.setAttr('version','1.0')
self._metastream.setAttr('xmlns:stream',NS_STREAMS)
self._metastream.setAttr('to',self._owner.Server)
self._owner.send("<?xml version='1.0'?>%s>"%str(self._metastream)[:-2])
def _check_stream_start(self,ns,tag,attrs):
if ns<>NS_STREAMS or tag<>'stream':
raise ValueError('Incorrect stream start: (%s,%s). Terminating.'%(tag,ns))
def Process(self, timeout=0):
""" Check incoming stream for data waiting. If "timeout" is positive - block for as max. this time.
Returns:
1) length of processed data if some data were processed;
2) '0' string if no data were processed but link is alive;
3) 0 (zero) if underlying connection is closed.
Take note that in case of disconnection detect during Process() call
disconnect handlers are called automatically.
"""
for handler in self._cycleHandlers: handler(self)
if len(self._pendingExceptions) > 0:
_pendingException = self._pendingExceptions.pop()
raise _pendingException[0], _pendingException[1], _pendingException[2]
if self._owner.Connection.pending_data(timeout):
try: data=self._owner.Connection.receive()
except IOError: return
# # Begin gh0st addition
# # Temporary workaround
# # Todo: fix some false positives.
# # 2007-12-25, Added stanza size checking.
# is_ok = 1
# if not (re.search('stream:',data) or re.search('item',data)):
# try:
# simplexml.NodeBuilder(data)
# except ExpatError:
# self.DEBUG(10*'=' + '<< MALFORMED XML >>' + 10*'=')
# self.DEBUG(data)
# self.DEBUG(10*'=' + '<< END MALFORMED XML >>' + 10*'=')
# is_ok = 0
# if is_ok == 1:
# self.Stream.Parse(data)
# # End of gh0st addition
self.Stream.Parse(data)
if len(self._pendingExceptions) > 0:
_pendingException = self._pendingExceptions.pop()
raise _pendingException[0], _pendingException[1], _pendingException[2]
if data: return len(data)
return '0' # It means that nothing is received but link is alive.
def RegisterNamespace(self,xmlns,order='info'):
""" Creates internal structures for newly registered namespace.
You can register handlers for this namespace afterwards. By default one namespace
already registered (jabber:client or jabber:component:accept depending on context. """
self.DEBUG('Registering namespace "%s"'%xmlns,order)
self.handlers[xmlns]={}
self.RegisterProtocol('unknown',Protocol,xmlns=xmlns)
self.RegisterProtocol('default',Protocol,xmlns=xmlns)
def RegisterProtocol(self,tag_name,Proto,xmlns=None,order='info'):
""" Used to declare some top-level stanza name to dispatcher.
Needed to start registering handlers for such stanzas.
Iq, message and presence protocols are registered by default. """
if not xmlns: xmlns=self._owner.defaultNamespace
self.DEBUG('Registering protocol "%s" as %s(%s)'%(tag_name,Proto,xmlns), order)
self.handlers[xmlns][tag_name]={type:Proto, 'default':[]}
def RegisterNamespaceHandler(self,xmlns,handler,typ='',ns='', makefirst=0, system=0):
""" Register handler for processing all stanzas for specified namespace. """
self.RegisterHandler('default', handler, typ, ns, xmlns, makefirst, system)
def RegisterHandler(self,name,handler,typ='',ns='',xmlns=None, makefirst=0, system=0):
"""Register user callback as stanzas handler of declared type. Callback must take
(if chained, see later) arguments: dispatcher instance (for replying), incomed
return of previous handlers.
The callback must raise xmpp.NodeProcessed just before return if it want preven
callbacks to be called with the same stanza as argument _and_, more importantly
library from returning stanza to sender with error set (to be enabled in 0.2 ve
Arguments:
"name" - name of stanza. F.e. "iq".
"handler" - user callback.
"typ" - value of stanza's "type" attribute. If not specified any value match
"ns" - namespace of child that stanza must contain.
"chained" - chain together output of several handlers.
"makefirst" - insert handler in the beginning of handlers list instead of
adding it to the end. Note that more common handlers (i.e. w/o "typ" and "
will be called first nevertheless.
"system" - call handler even if NodeProcessed Exception were raised already.
"""
if not xmlns: xmlns=self._owner.defaultNamespace
self.DEBUG('Registering handler %s for "%s" type->%s ns->%s(%s)'%(handler,name,typ,ns,xmlns), 'info')
if not typ and not ns: typ='default'
if not self.handlers.has_key(xmlns): self.RegisterNamespace(xmlns,'warn')
if not self.handlers[xmlns].has_key(name): self.RegisterProtocol(name,Protocol,xmlns,'warn')
if not self.handlers[xmlns][name].has_key(typ+ns): self.handlers[xmlns][name][typ+ns]=[]
if makefirst: self.handlers[xmlns][name][typ+ns].insert(0,{'func':handler,'system':system})
else: self.handlers[xmlns][name][typ+ns].append({'func':handler,'system':system})
def RegisterHandlerOnce(self,name,handler,typ='',ns='',xmlns=None,makefirst=0, system=0):
""" Unregister handler after first call (not implemented yet). """
if not xmlns: xmlns=self._owner.defaultNamespace
self.RegisterHandler(name, handler, typ, ns, xmlns, makefirst, system)
def UnregisterHandler(self,name,handler,typ='',ns='',xmlns=None):
""" Unregister handler. "typ" and "ns" must be specified exactly the same as with registering."""
if not xmlns: xmlns=self._owner.defaultNamespace
if not self.handlers.has_key(xmlns): return
if not typ and not ns: typ='default'
for pack in self.handlers[xmlns][name][typ+ns]:
if handler==pack['func']: break
else: pack=None
try: self.handlers[xmlns][name][typ+ns].remove(pack)
except ValueError: pass
def RegisterDefaultHandler(self,handler):
""" Specify the handler that will be used if no NodeProcessed exception were raised.
This is returnStanzaHandler by default. """
self._defaultHandler=handler
def RegisterEventHandler(self,handler):
""" Register handler that will process events. F.e. "FILERECEIVED" event. """
self._eventHandler=handler
def returnStanzaHandler(self,conn,stanza):
""" Return stanza back to the sender with <feature-not-implemennted/> error set. """
if stanza.getType() in ['get','set']:
conn.send(Error(stanza,ERR_FEATURE_NOT_IMPLEMENTED))
def streamErrorHandler(self,conn,error):
name,text='error',error.getData()
for tag in error.getChildren():
if tag.getNamespace()==NS_XMPP_STREAMS:
if tag.getName()=='text': text=tag.getData()
else: name=tag.getName()
if name in stream_exceptions.keys(): exc=stream_exceptions[name]
else: exc=StreamError
raise exc((name,text))
def RegisterCycleHandler(self,handler):
""" Register handler that will be called on every Dispatcher.Process() call. """
if handler not in self._cycleHandlers: self._cycleHandlers.append(handler)
def UnregisterCycleHandler(self,handler):
""" Unregister handler that will is called on every Dispatcher.Process() call."""
if handler in self._cycleHandlers: self._cycleHandlers.remove(handler)
def Event(self,realm,event,data):
""" Raise some event. Takes three arguments:
1) "realm" - scope of event. Usually a namespace.
2) "event" - the event itself. F.e. "SUCESSFULL SEND".
3) data that comes along with event. Depends on event."""
if self._eventHandler: self._eventHandler(realm,event,data)
def dispatch(self,stanza,session=None,direct=0):
""" Main procedure that performs XMPP stanza recognition and calling apppropriate handlers for it.
Called internally. """
if not session: session=self
session.Stream._mini_dom=None
name=stanza.getName()
if not direct and self._owner._route:
if name == 'route':
if stanza.getAttr('error') == None:
if len(stanza.getChildren()) == 1:
stanza = stanza.getChildren()[0]
name=stanza.getName()
else:
for each in stanza.getChildren():
self.dispatch(each,session,direct=1)
return
elif name == 'presence':
return
elif name in ('features','bind'):
pass
else:
raise UnsupportedStanzaType(name)
if name=='features': session.Stream.features=stanza
xmlns=stanza.getNamespace()
if not self.handlers.has_key(xmlns):
self.DEBUG("Unknown namespace: " + xmlns,'warn')
xmlns='unknown'
if not self.handlers[xmlns].has_key(name):
self.DEBUG("Unknown stanza: " + name,'warn')
name='unknown'
else:
self.DEBUG("Got %s/%s stanza"%(xmlns,name), 'ok')
if stanza.__class__.__name__=='Node': stanza=self.handlers[xmlns][name][type](node=stanza)
typ=stanza.getType()
if not typ: typ=''
stanza.props=stanza.getProperties()
ID=stanza.getID()
session.DEBUG("Dispatching %s stanza with type->%s props->%s id->%s"%(name,typ,stanza.props,ID),'ok')
list=['default'] # we will use all handlers:
if self.handlers[xmlns][name].has_key(typ): list.append(typ) # from very common...
for prop in stanza.props:
if self.handlers[xmlns][name].has_key(prop): list.append(prop)
if typ and self.handlers[xmlns][name].has_key(typ+prop): list.append(typ+prop) # ...to very particular
chain=self.handlers[xmlns]['default']['default']
for key in list:
if key: chain = chain + self.handlers[xmlns][name][key]
output=''
if session._expected.has_key(ID):
user=0
if type(session._expected[ID])==type(()):
cb,args=session._expected[ID]
session.DEBUG("Expected stanza arrived. Callback %s(%s) found!"%(cb,args),'ok')
try: cb(session,stanza,**args)
except Exception, typ:
if typ.__class__.__name__<>'NodeProcessed': raise
else:
session.DEBUG("Expected stanza arrived!",'ok')
session._expected[ID]=stanza
else: user=1
for handler in chain:
if user or handler['system']:
try:
handler['func'](session,stanza)
except Exception, typ:
if typ.__class__.__name__<>'NodeProcessed':
self._pendingExceptions.insert(0, sys.exc_info())
return
user=0
if user and self._defaultHandler: self._defaultHandler(session,stanza)
def WaitForResponse(self, ID, timeout=DefaultTimeout):
""" Block and wait until stanza with specific "id" attribute will come.
If no such stanza is arrived within timeout, return None.
If operation failed for some reason then owner's attributes
lastErrNode, lastErr and lastErrCode are set accordingly. """
self._expected[ID]=None
has_timed_out=0
abort_time=time.time() + timeout
self.DEBUG("Waiting for ID:%s with timeout %s..." % (ID,timeout),'wait')
while not self._expected[ID]:
if not self.Process(0.04):
self._owner.lastErr="Disconnect"
return None
if time.time() > abort_time:
self._owner.lastErr="Timeout"
return None
response=self._expected[ID]
del self._expected[ID]
if response.getErrorCode():
self._owner.lastErrNode=response
self._owner.lastErr=response.getError()
self._owner.lastErrCode=response.getErrorCode()
return response
def SendAndWaitForResponse(self, stanza, timeout=DefaultTimeout):
""" Put stanza on the wire and wait for recipient's response to it. """
return self.WaitForResponse(self.send(stanza),timeout)
def SendAndCallForResponse(self, stanza, func, args={}):
""" Put stanza on the wire and call back when recipient replies.
Additional callback arguments can be specified in args. """
self._expected[self.send(stanza)]=(func,args)
def send(self,stanza):
""" Serialise stanza and put it on the wire. Assign an unique ID to it before send.
Returns assigned ID."""
if type(stanza) in [type(''), type(u'')]: return self._owner_send(stanza)
if not isinstance(stanza,Protocol): _ID=None
elif not stanza.getID():
global ID
ID+=1
_ID=`ID`
stanza.setID(_ID)
else: _ID=stanza.getID()
if self._owner._registered_name and not stanza.getAttr('from'): stanza.setAttr('from',self._owner._registered_name)
if self._owner._route and stanza.getName()!='bind':
to=self._owner.Server
if stanza.getTo() and stanza.getTo().getDomain():
to=stanza.getTo().getDomain()
frm=stanza.getFrom()
if frm.getDomain():
frm=frm.getDomain()
route=Protocol('route',to=to,frm=frm,payload=[stanza])
stanza=route
stanza.setNamespace(self._owner.Namespace)
stanza.setParent(self._metastream)
self._owner_send(stanza)
return _ID
def disconnect(self):
""" Send a stream terminator and and handle all incoming stanzas before stream closure. """
self._owner_send('</stream:stream>')
while self.Process(1): pass
|
|
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from ceilometer.compute.virt import inspector
from ceilometer.i18n import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class HyperVException(inspector.InspectorException):
pass
class UtilsV2(object):
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_PROC_SETTING = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING = "Msvm_MemorySettingData"
_SYNTH_ETH_PORT = 'Msvm_SyntheticEthernetPortSettingData'
_ETH_PORT_ALLOC = 'Msvm_EthernetPortAllocationSettingData'
_PORT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_STORAGE_ALLOC = 'Msvm_StorageAllocationSettingData'
_VS_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_METRICS_ME = 'Msvm_MetricForME'
_BASE_METRICS_VALUE = 'Msvm_BaseMetricValue'
_CPU_METRIC_NAME = 'Aggregated Average CPU Utilization'
_MEMORY_METRIC_NAME = 'Aggregated Average Memory Utilization'
_NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic'
_NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic'
# Disk metrics are supported from Hyper-V 2012 R2
_DISK_RD_METRIC_NAME = 'Disk Data Read'
_DISK_WR_METRIC_NAME = 'Disk Data Written'
_DISK_LATENCY_METRIC_NAME = 'Average Disk Latency'
_DISK_IOPS_METRIC_NAME = 'Average Normalized Disk Throughput'
def __init__(self, host='.'):
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._init_cimv2_wmi_conn(host)
self._host_cpu_info = None
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def _init_cimv2_wmi_conn(self, host):
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def get_host_cpu_info(self):
if not self._host_cpu_info:
host_cpus = self._conn_cimv2.Win32_Processor()
self._host_cpu_info = (host_cpus[0].MaxClockSpeed, len(host_cpus))
return self._host_cpu_info
def get_all_vms(self):
vms = [(v.ElementName, v.Name) for v in
self._conn.Msvm_ComputerSystem(['ElementName', 'Name'],
Caption="Virtual Machine")]
return vms
def get_cpu_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
cpu_sd = self._get_vm_resources(vm, self._PROC_SETTING)[0]
cpu_metrics_def = self._get_metric_def(self._CPU_METRIC_NAME)
cpu_metric_aggr = self._get_metrics(vm, cpu_metrics_def)
cpu_used = 0
if cpu_metric_aggr:
cpu_used = int(cpu_metric_aggr[0].MetricValue)
return (cpu_used,
int(cpu_sd.VirtualQuantity),
int(vm.OnTimeInMilliseconds))
def get_memory_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
memory_def = self._get_metric_def(self._MEMORY_METRIC_NAME)
metric_memory = self._get_metrics(vm, memory_def)
memory_usage = 0
if metric_memory:
memory_usage = int(metric_memory[0].MetricValue)
return memory_usage
def get_vnic_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
ports = self._get_vm_resources(vm, self._ETH_PORT_ALLOC)
vnics = self._get_vm_resources(vm, self._SYNTH_ETH_PORT)
metric_def_in = self._get_metric_def(self._NET_IN_METRIC_NAME)
metric_def_out = self._get_metric_def(self._NET_OUT_METRIC_NAME)
for port in ports:
vnic = [v for v in vnics if port.Parent == v.path_()][0]
metric_value_instances = self._get_metric_value_instances(
port.associators(wmi_result_class=self._PORT_ACL_SET_DATA),
self._BASE_METRICS_VALUE)
metric_values = self._sum_metric_values_by_defs(
metric_value_instances, [metric_def_in, metric_def_out])
yield {
'rx_mb': metric_values[0],
'tx_mb': metric_values[1],
'element_name': vnic.ElementName,
'address': vnic.Address
}
def get_disk_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_def_r = self._get_metric_def(self._DISK_RD_METRIC_NAME)
metric_def_w = self._get_metric_def(self._DISK_WR_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_def_r, metric_def_w])
# Thi sis e.g. the VHD file location
if disk.HostResource:
host_resource = disk.HostResource[0]
yield {
# Values are in megabytes
'read_mb': metric_values[0],
'write_mb': metric_values[1],
'instance_id': disk.InstanceID,
'host_resource': host_resource
}
def get_disk_latency_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_latency_def = self._get_metric_def(
self._DISK_LATENCY_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_latency_def])
yield {
'disk_latency': metric_values[0],
'instance_id': disk.InstanceID,
}
def get_disk_iops_count(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_def_iops = self._get_metric_def(self._DISK_IOPS_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_def_iops])
yield {
'iops_count': metric_values[0],
'instance_id': disk.InstanceID,
}
@staticmethod
def _sum_metric_values(metrics):
tot_metric_val = 0
for metric in metrics:
tot_metric_val += int(metric.MetricValue)
return tot_metric_val
def _sum_metric_values_by_defs(self, element_metrics, metric_defs):
metric_values = []
for metric_def in metric_defs:
if metric_def:
metrics = self._filter_metrics(element_metrics, metric_def)
metric_values.append(self._sum_metric_values(metrics))
else:
# In case the metric is not defined on this host
metric_values.append(0)
return metric_values
@staticmethod
def _get_metric_value_instances(elements, result_class):
instances = []
for el in elements:
associators = el.associators(wmi_result_class=result_class)
if associators:
instances.append(associators[0])
return instances
def _get_metric_values(self, element, metric_defs):
element_metrics = element.associators(
wmi_association_class=self._METRICS_ME)
return self._sum_metric_values_by_defs(element_metrics, metric_defs)
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
raise inspector.InstanceNotFoundException(
_('VM %s not found on Hyper-V') % vm_name)
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def _get_metrics(self, element, metric_def):
return self._filter_metrics(
element.associators(
wmi_association_class=self._METRICS_ME), metric_def)
@staticmethod
def _filter_metrics(all_metrics, metric_def):
return [v for v in all_metrics if
v.MetricDefinitionId == metric_def.Id]
def _get_metric_def(self, metric_def):
metric = self._conn.CIM_BaseMetricDefinition(ElementName=metric_def)
if metric:
return metric[0]
def _get_vm_setting_data(self, vm):
vm_settings = vm.associators(
wmi_result_class=self._VS_SETTING_DATA)
# Avoid snapshots
return [s for s in vm_settings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def _get_vm_resources(self, vm, resource_class):
setting_data = self._get_vm_setting_data(vm)
return setting_data.associators(wmi_result_class=resource_class)
|
|
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from oslo_db import exception as db_exc
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import joinedload
from neutron._i18n import _LI, _LW
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as n_const
from neutron.common import utils as n_utils
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db as l3agent_sch_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import l3agentscheduler
from neutron import manager
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2 import db as ml2_db
LOG = logging.getLogger(__name__)
class CentralizedSnatL3AgentBinding(model_base.BASEV2):
"""Represents binding between Neutron Centralized SNAT and L3 agents."""
__tablename__ = "csnat_l3_agent_bindings"
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'),
primary_key=True)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
primary_key=True)
host_id = sa.Column(sa.String(255))
csnat_gw_port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='CASCADE'))
l3_agent = orm.relationship(agents_db.Agent)
csnat_gw_port = orm.relationship(models_v2.Port)
class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
"""Mixin class for L3 DVR scheduler.
DVR currently supports the following use cases:
- East/West (E/W) traffic between VMs: this is handled in a
distributed manner across Compute Nodes without a centralized element.
This includes E/W traffic between VMs on the same Compute Node.
- North/South traffic for Floating IPs (FIP N/S): this is supported on the
distributed routers on Compute Nodes without any centralized element.
- North/South traffic for SNAT (SNAT N/S): this is supported via a
centralized element that handles the SNAT traffic.
To support these use cases, DVR routers rely on an L3 agent that runs on a
central node (also known as Network Node or Service Node), as well as, L3
agents that run individually on each Compute Node of an OpenStack cloud.
Each L3 agent creates namespaces to route traffic according to the use
cases outlined above. The mechanism adopted for creating and managing
these namespaces is via (Router, Agent) binding and Scheduling in general.
The main difference between distributed routers and centralized ones is
that in the distributed case, multiple bindings will exist, one for each
of the agents participating in the routed topology for the specific router.
These bindings are created in the following circumstances:
- A subnet is added to a router via router-interface-add, and that subnet
has running VM's deployed in it. A binding will be created between the
router and any L3 agent whose Compute Node is hosting the VM(s).
- An external gateway is set to a router via router-gateway-set. A binding
will be created between the router and the L3 agent running centrally
on the Network Node.
Therefore, any time a router operation occurs (create, update or delete),
scheduling will determine whether the router needs to be associated to an
L3 agent, just like a regular centralized router, with the difference that,
in the distributed case, the bindings required are established based on
the state of the router and the Compute Nodes.
"""
def dvr_update_router_addvm(self, context, port):
port_dict = self._core_plugin.get_port(context, port['id'])
port_host = port_dict['binding:host_id']
l3_agent_on_host = (self.get_l3_agents(
context, filters={'host': [port_host]}) or [None])[0]
if not l3_agent_on_host:
return
ips = port['fixed_ips']
router_ids = self.get_dvr_routers_by_portid(context, port['id'], ips)
for router_id in router_ids:
if not self.check_l3_agent_router_binding(
context, router_id, l3_agent_on_host['id']):
self.schedule_router(
context, router_id, candidates=[l3_agent_on_host])
LOG.debug('DVR: dvr_update_router_addvm %s ', router_id)
self.l3_rpc_notifier.routers_updated_on_host(
context, router_ids, port_host)
def get_dvr_routers_by_portid(self, context, port_id, fixed_ips=None):
"""Gets the dvr routers on vmport subnets."""
router_ids = set()
if fixed_ips is None:
port_dict = self._core_plugin.get_port(context, port_id)
fixed_ips = port_dict['fixed_ips']
for fixedip in fixed_ips:
vm_subnet = fixedip['subnet_id']
filter_sub = {'fixed_ips': {'subnet_id': [vm_subnet]},
'device_owner':
[n_const.DEVICE_OWNER_DVR_INTERFACE]}
subnet_ports = self._core_plugin.get_ports(
context, filters=filter_sub)
for subnet_port in subnet_ports:
router_ids.add(subnet_port['device_id'])
return router_ids
def get_subnet_ids_on_router(self, context, router_id):
"""Return subnet IDs for interfaces attached to the given router."""
subnet_ids = set()
filter_rtr = {'device_id': [router_id]}
int_ports = self._core_plugin.get_ports(context, filters=filter_rtr)
for int_port in int_ports:
int_ips = int_port['fixed_ips']
if int_ips:
int_subnet = int_ips[0]['subnet_id']
subnet_ids.add(int_subnet)
else:
LOG.debug('DVR: Could not find a subnet id '
'for router %s', router_id)
return subnet_ids
def check_ports_on_host_and_subnet(self, context, host,
port_id, subnet_id):
"""Check if there is any dvr serviceable port on the subnet_id."""
filter_sub = {'fixed_ips': {'subnet_id': [subnet_id]}}
ports = self._core_plugin.get_ports(context, filters=filter_sub)
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner'])
and port['binding:host_id'] == host
and port['id'] != port_id):
LOG.debug('DVR: %(port_status)s port exists for subnet '
'%(subnet_id)s on host %(host)s',
{'port_status': port['status'],
'subnet_id': subnet_id, 'host': host})
return True
return False
def dvr_deletens_if_no_port(self, context, port_id, port_host=None):
"""Delete the DVR namespace if no dvr serviced port exists."""
admin_context = context.elevated()
router_ids = self.get_dvr_routers_by_portid(admin_context, port_id)
if not port_host:
port_host = ml2_db.get_port_binding_host(admin_context.session,
port_id)
if not port_host:
LOG.debug('Host name not found for port %s', port_id)
return []
if not router_ids:
LOG.debug('No namespaces available for this DVR port %(port)s '
'on host %(host)s', {'port': port_id,
'host': port_host})
return []
removed_router_info = []
for router_id in router_ids:
subnet_ids = self.get_subnet_ids_on_router(admin_context,
router_id)
port_exists_on_subnet = False
for subnet in subnet_ids:
if self.check_ports_on_host_and_subnet(admin_context,
port_host,
port_id,
subnet):
port_exists_on_subnet = True
break
if port_exists_on_subnet:
continue
filter_rtr = {'device_id': [router_id],
'device_owner':
[n_const.DEVICE_OWNER_DVR_INTERFACE]}
int_ports = self._core_plugin.get_ports(
admin_context, filters=filter_rtr)
for port in int_ports:
dvr_binding = (ml2_db.
get_dvr_port_binding_by_host(context.session,
port['id'],
port_host))
if dvr_binding:
# unbind this port from router
dvr_binding['router_id'] = None
dvr_binding.update(dvr_binding)
agent = self._get_agent_by_type_and_host(context,
n_const.AGENT_TYPE_L3,
port_host)
info = {'router_id': router_id, 'host': port_host,
'agent_id': str(agent.id)}
removed_router_info.append(info)
LOG.debug('Router namespace %(router_id)s on host %(host)s '
'to be deleted', info)
return removed_router_info
def bind_snat_router(self, context, router_id, chosen_agent):
"""Bind the router to the chosen l3 agent."""
with context.session.begin(subtransactions=True):
binding = CentralizedSnatL3AgentBinding()
binding.l3_agent = chosen_agent
binding.router_id = router_id
context.session.add(binding)
LOG.debug('SNAT Router %(router_id)s is scheduled to L3 agent '
'%(agent_id)s', {'router_id': router_id,
'agent_id': chosen_agent.id})
def bind_dvr_router_servicenode(self, context, router_id,
chosen_snat_agent):
"""Bind the IR router to service node if not already hosted."""
query = (context.session.query(l3agent_sch_db.RouterL3AgentBinding).
filter_by(router_id=router_id))
for bind in query:
if bind.l3_agent_id == chosen_snat_agent.id:
LOG.debug('Distributed Router %(router_id)s already hosted '
'on snat l3_agent %(snat_id)s',
{'router_id': router_id,
'snat_id': chosen_snat_agent.id})
return
with context.session.begin(subtransactions=True):
binding = l3agent_sch_db.RouterL3AgentBinding()
binding.l3_agent = chosen_snat_agent
binding.router_id = router_id
context.session.add(binding)
LOG.debug('Binding the distributed router %(router_id)s to '
'the snat agent %(snat_id)s',
{'router_id': router_id,
'snat_id': chosen_snat_agent.id})
def bind_snat_servicenode(self, context, router_id, snat_candidates):
"""Bind the snat router to the chosen l3 service agent."""
chosen_snat_agent = random.choice(snat_candidates)
self.bind_snat_router(context, router_id, chosen_snat_agent)
return chosen_snat_agent
def unbind_snat(self, context, router_id, agent_id=None):
"""Unbind snat from the chosen l3 service agent.
Unbinds from all L3 agents hosting SNAT if passed agent_id is None
"""
with context.session.begin(subtransactions=True):
query = (context.session.
query(CentralizedSnatL3AgentBinding).
filter_by(router_id=router_id))
if agent_id:
query = query.filter_by(l3_agent_id=agent_id)
binding = query.first()
if not binding:
LOG.debug('no SNAT router binding found for router: '
'%(router)s, agent: %(agent)s',
{'router': router_id, 'agent': agent_id or 'any'})
return
query.delete()
LOG.debug('Deleted binding of the SNAT router %s', router_id)
return binding
def unbind_router_servicenode(self, context, router_id, binding):
"""Unbind the router from the chosen l3 service agent."""
port_found = False
with context.session.begin(subtransactions=True):
host = binding.l3_agent.host
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
for subnet in subnet_ids:
ports = (
self._core_plugin.get_ports_on_host_by_subnet(
context, host, subnet))
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner'])):
port_found = True
LOG.debug('One or more ports exist on the snat '
'enabled l3_agent host %(host)s and '
'router_id %(id)s',
{'host': host, 'id': router_id})
break
agent_id = binding.l3_agent_id
if not port_found:
context.session.query(
l3agent_sch_db.RouterL3AgentBinding).filter_by(
router_id=router_id, l3_agent_id=agent_id).delete(
synchronize_session=False)
if not port_found:
self.l3_rpc_notifier.router_removed_from_agent(
context, router_id, host)
LOG.debug('Removed binding for router %(router_id)s and '
'agent %(agent_id)s',
{'router_id': router_id, 'agent_id': agent_id})
return port_found
def unbind_snat_servicenode(self, context, router_id):
"""Unbind snat AND the router from the current agent."""
with context.session.begin(subtransactions=True):
binding = self.unbind_snat(context, router_id)
if binding:
self.unbind_router_servicenode(context, router_id, binding)
def get_snat_bindings(self, context, router_ids):
"""Retrieves the dvr snat bindings for a router."""
if not router_ids:
return []
query = context.session.query(CentralizedSnatL3AgentBinding)
query = query.options(joinedload('l3_agent')).filter(
CentralizedSnatL3AgentBinding.router_id.in_(router_ids))
return query.all()
def get_snat_candidates(self, sync_router, l3_agents):
"""Get the valid snat enabled l3 agents for the distributed router."""
candidates = []
is_router_distributed = sync_router.get('distributed', False)
if not is_router_distributed:
return candidates
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
agent_mode = agent_conf.get(n_const.L3_AGENT_MODE,
n_const.L3_AGENT_MODE_LEGACY)
if agent_mode != n_const.L3_AGENT_MODE_DVR_SNAT:
continue
router_id = agent_conf.get('router_id', None)
if router_id and router_id != sync_router['id']:
continue
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
candidates.append(l3_agent)
return candidates
def schedule_snat_router(self, context, router_id, sync_router):
"""Schedule the snat router on l3 service agent."""
active_l3_agents = self.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warn(_LW('No active L3 agents found for SNAT'))
return
snat_candidates = self.get_snat_candidates(sync_router,
active_l3_agents)
if not snat_candidates:
LOG.warn(_LW('No candidates found for SNAT'))
return
else:
try:
chosen_agent = self.bind_snat_servicenode(
context, router_id, snat_candidates)
except db_exc.DBDuplicateEntry:
LOG.info(_LI("SNAT already bound to a service node."))
return
self.bind_dvr_router_servicenode(
context, router_id, chosen_agent)
return chosen_agent
def _unschedule_router(self, context, router_id, agents_ids):
router = self.get_router(context, router_id)
if router.get('distributed', False):
# for DVR router unscheduling means just unscheduling SNAT portion
self.unbind_snat_servicenode(context, router_id)
else:
super(L3_DVRsch_db_mixin, self)._unschedule_router(
context, router_id, agents_ids)
def _get_active_l3_agent_routers_sync_data(self, context, host, agent,
router_ids):
if n_utils.is_extension_supported(self, n_const.L3_HA_MODE_EXT_ALIAS):
return self.get_ha_sync_data_for_host(context, host,
router_ids=router_ids,
active=True)
return self._get_dvr_sync_data(context, host, agent,
router_ids=router_ids, active=True)
def check_agent_router_scheduling_needed(self, context, agent, router):
if router.get('distributed'):
if router['external_gateway_info']:
return not self.get_snat_bindings(context, [router['id']])
return False
return super(L3_DVRsch_db_mixin,
self).check_agent_router_scheduling_needed(
context, agent, router)
def create_router_to_agent_binding(self, context, agent, router):
"""Create router to agent binding."""
router_id = router['id']
agent_id = agent['id']
if router['external_gateway_info'] and self.router_scheduler and (
router.get('distributed')):
try:
self.bind_snat_router(context, router_id, agent)
self.bind_dvr_router_servicenode(context,
router_id, agent)
except db_exc.DBError:
raise l3agentscheduler.RouterSchedulingFailed(
router_id=router_id,
agent_id=agent_id)
else:
super(L3_DVRsch_db_mixin, self).create_router_to_agent_binding(
context, agent, router)
def remove_router_from_l3_agent(self, context, agent_id, router_id):
binding = None
router = self.get_router(context, router_id)
if router['external_gateway_info'] and router.get('distributed'):
binding = self.unbind_snat(context, router_id, agent_id=agent_id)
# binding only exists when agent mode is dvr_snat
if binding:
notification_not_sent = self.unbind_router_servicenode(context,
router_id, binding)
if notification_not_sent:
self.l3_rpc_notifier.routers_updated(
context, [router_id], schedule_routers=False)
# Below Needs to be done when agent mode is legacy or dvr.
if not binding:
super(L3_DVRsch_db_mixin,
self).remove_router_from_l3_agent(
context, agent_id, router_id)
def _notify_l3_agent_new_port(resource, event, trigger, **kwargs):
LOG.debug('Received %(resource)s %(event)s', {
'resource': resource,
'event': event})
port = kwargs.get('port')
if not port:
return
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
mac_address_updated = kwargs.get('mac_address_updated')
update_device_up = kwargs.get('update_device_up')
context = kwargs['context']
if mac_address_updated or update_device_up:
l3plugin.dvr_vmarp_table_update(context, port, "add")
if n_utils.is_dvr_serviced(port['device_owner']):
l3plugin.dvr_update_router_addvm(context, port)
def _notify_port_delete(event, resource, trigger, **kwargs):
context = kwargs['context']
port = kwargs['port']
removed_routers = kwargs['removed_routers']
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
l3plugin.dvr_vmarp_table_update(context, port, "del")
for router in removed_routers:
l3plugin.remove_router_from_l3_agent(
context, router['agent_id'], router['router_id'])
def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
new_port = kwargs.get('port')
original_port = kwargs.get('original_port')
if new_port and original_port:
original_device_owner = original_port.get('device_owner', '')
if (original_device_owner.startswith('compute') and
not new_port.get('device_owner')):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
context = kwargs['context']
removed_routers = l3plugin.dvr_deletens_if_no_port(
context,
original_port['id'],
port_host=original_port['binding:host_id'])
if removed_routers:
removed_router_args = {
'context': context,
'port': original_port,
'removed_routers': removed_routers,
}
_notify_port_delete(
event, resource, trigger, **removed_router_args)
return
_notify_l3_agent_new_port(resource, event, trigger, **kwargs)
def subscribe():
registry.subscribe(
_notify_l3_agent_port_update, resources.PORT, events.AFTER_UPDATE)
registry.subscribe(
_notify_l3_agent_new_port, resources.PORT, events.AFTER_CREATE)
registry.subscribe(
_notify_port_delete, resources.PORT, events.AFTER_DELETE)
|
|
import os.path
import re
import shlex
import json
from xml.etree import cElementTree as etree
from vial.compat import PY2, bstr, filter, sstr, ustr
if PY2:
import urllib
import urlparse
import Cookie
from cStringIO import StringIO
else:
from urllib import parse as urllib
from urllib import parse as urlparse
from http import cookies as Cookie
from io import BytesIO as StringIO
from .multipart import encode_multipart
header_regex = re.compile(r'^\+?[-\w\d]+$')
value_regex = re.compile(r'^([-_\w\d]+)(:=|@=|=|:)(.+)$')
class PrepareException(Exception): pass
def get_line(content, pos):
return content.count('\n', 0, pos)
def get_heredocs(lines):
ball = '\n'.join(lines)
result = []
hds = re.finditer(r'(?sm)(\s+<<\s+(\w+))$\n(.+?)(\2)', ball)
for m in hds:
ss = m.start(1)
start = get_line(ball, m.start(2))
end = get_line(ball, m.start(4))
body = m.group(3)
if body and body[-1] == '\n':
body = body[:-1]
result.append((start, end, ball[:ss].splitlines()[start], body))
return result
def find_request(lines, line):
for s, e, l, body in get_heredocs(lines):
if s <= line <= e:
return l, body, e
l = line
while l > 0:
lcontent = lines[l-1]
if not lcontent.strip() or lcontent[0] == '#':
break
l -= 1
if l <= 0:
break
line = l
bodylines = []
for l in lines[line+1:]:
if not l.strip():
break
bodylines.append(l)
return lines[line], '\n'.join(bodylines) or None, line + len(bodylines)
def parse_request_line(line, input_func=None, pwd_func=None):
line = line.rstrip()
query = []
form = []
files = []
headers = {}
result = {'query': query, 'form': form, 'files': files, 'headers': headers}
rparts = line.split(None, 2)
if len(rparts) < 2:
return None
result['method'], result['url'] = rparts[0], rparts[1]
if len(rparts) > 2:
tail = rparts[2]
else:
tail = ''
if tail:
parts = shlex.split(tail, True)
try:
pos = parts.index('|')
except ValueError:
pass
else:
result['templates'] = filter(None, (r.strip() for r in ''.join(parts[pos+1:]).split(',')))
parts = parts[:pos]
if len(parts) >= 2:
if parts[-2] == '<':
result['body_from_file'] = parts[-1]
parts = parts[:-2]
for p in parts:
m = value_regex.match(p)
if m:
param, op, value = m.group(1, 2, 3)
if value == '__pwd__' and pwd_func:
value = pwd_func(param)
if value == '__input__' and input_func:
value = input_func(param)
if op == '=':
query.append((param, value))
elif op == ':':
headers[param] = value
elif op == ':=':
form.append((param, value))
elif op == '@=':
files.append((param, value))
return result
def prepare_request(lines, line, headers, input_func=None, pwd_func=None):
rline, body, rend = find_request(lines, line)
raw = parse_request_line(rline, input_func, pwd_func)
if not raw:
raise PrepareException('Invalid format: METHOD uri [qs_param=value] [form_param:=value] [file_param@=value] '
'[Header:value] [< /filename-with-body] [| tpl1,tpl2] [<< HEREDOC]')
headers.update(raw['headers'])
if body is None and 'body_from_file' in raw:
try:
with open(raw['body_from_file']) as f:
body = f.read()
except Exception as e:
raise PrepareException('Can\'t open body file {}: {}'.format(raw['body_from_file'], e))
if (body and 'content-type' not in headers
and (body[:1000].lstrip() or ' ')[0] in '{['):
try:
json.loads(body)
except ValueError:
pass
else:
headers.set('Content-Type', 'application/json')
if body is None and (raw['files'] or headers.get('Content-Type') == 'multipart/form-data'):
files = []
for k, v in raw['files']:
fname = os.path.basename(v)
try:
with open(v, 'rb') as f:
content = f.read()
except Exception as e:
raise PrepareException('Error opening file param {}: {}'.format(v, e))
files.append((k, {'filename': fname, 'content': content}))
body, h = encode_multipart(raw['form'], files)
headers.update(h)
if body is None and raw['form']:
body = urllib.urlencode(raw['form'])
headers.set('Content-Type', 'application/x-www-form-urlencoded')
return raw['method'], raw['url'], raw['query'], body, raw.get('templates', []), rend
def send_collector(connection):
connection._sdata = b''
oldsend = connection.send
def send(data):
if len(connection._sdata) <= 65536:
connection._sdata += data
if len(connection._sdata) > 65536:
connection._sdata += b'\n...TRUNCATED...'
return oldsend(data)
connection.send = send
return connection
class Headers(object):
def __init__(self, headers=None):
self.headers = headers or [('User-Agent', 'vial-http')]
def set(self, header, value):
self.headers = [r for r in self.headers if r[0].lower() != header.lower()]
self.headers.append((header, value))
def get(self, name, default=None):
try:
return self[name]
except KeyError:
return default
def __getitem__(self, name):
result = [r[1] for r in self.headers if r[0].lower() == name.lower()]
if not result:
raise KeyError(name)
return result[0]
def update(self, headers):
for k, v in headers.items():
self.set(k, v)
def add(self, header, value):
self.headers.append((header, value))
def __contains__(self, header):
return any(h.lower() == header.lower() for h, _ in self.headers)
def pop(self, header, default=None):
result = default
headers = []
for h, v in self.headers:
if h.lower() == header.lower():
result = v
else:
headers.append((h, v))
self.headers = headers
return result
def iteritems(self):
return self.headers
def items(self):
return self.headers
def __iter__(self):
return (h for h, _ in self.headers)
def copy(self, *names):
result = Headers()
for name in names:
v = self.get(name)
if v is not None:
result.set(name, v)
return result
def get_headers_and_templates(lines, line):
headers = Headers()
templates = {}
it = iter(lines[:line])
while True:
l = next(it, None)
if l is None:
break
if l.startswith('TEMPLATE '):
name, sep, here = l[len('TEMPLATE '):].strip().partition('<<')
name = name.strip()
here = here.strip()
tlines = []
while True:
l = next(it, None)
if l is None:
break
if sep:
pos = l.find(here)
if pos == 0:
break
elif pos > 0:
tlines.append(l[:pos])
break
elif not l.strip():
break
tlines.append(l)
templates[name] ='\n'.join(tlines)
else:
try:
header, value = l.split(':', 1)
except ValueError:
continue
if header_regex.match(header):
if header[0] == '+':
headers.add(header[1:], value.strip())
else:
headers.set(header, value.strip())
return headers, templates
def render_template(template, **ctx):
def sub(match):
try:
return eval(match.group(1), ctx, ctx)
except KeyError:
return "None"
try:
return re.sub(r'\$\{(.+?)\}', sub, template)
except Exception as e:
return 'ERROR: {}'.format(e)
def pretty_xml(text, out, ident=' '):
from xml.sax.saxutils import quoteattr, escape
from collections import Counter
ns_aliases = {}
ns_cache = {}
ns_cnt = Counter()
def get_alias(tag):
try:
return ns_cache[tag]
except KeyError:
pass
pos = tag.find('}')
if pos < 0:
result = tag
else:
rtag = tag[pos+1:]
prefix = ns_aliases[tag[1:pos]]
result = '{}:{}'.format(prefix, rtag) if prefix else rtag
ns_cache[tag] = result
return result
buf = StringIO(text)
for (event, elem) in etree.iterparse(buf, ('start-ns',)):
alias = elem[0]
if alias in ns_cnt:
if not alias:
alias = 'ns'
falias = alias
while falias in ns_cnt:
ns_cnt[alias] += 1
falias = '{}{}'.format(alias, ns_cnt[alias])
alias = falias
ns_aliases[elem[1]] = alias
def _render(elem, level, first, use_level):
tag = get_alias(elem.tag)
attrib = ['{}={}'.format(get_alias(k), sstr(quoteattr(v), 'utf-8'))
for k, v in sorted(elem.attrib.items())]
attrib = ustr((' ' + ' '.join(attrib)) if attrib else '', 'utf-8')
if first:
ns = ' ' + ' '.join('xmlns{}={}'.format((':' + v) if v else v, quoteattr(k))
for k, v in ns_aliases.items())
else:
ns = ''
if use_level:
nl = '\n' + ident * level
else:
nl = ''
txt = elem.text
txt = escape(txt) if txt and txt.strip() else ''
tail = txt
has_children = False
for child in elem:
if not has_children:
out.write(u'{}<{}{}{}>{}'.format(nl, tag, ns, attrib, txt).encode('utf-8'))
has_children = True
_render(child, level+1, False, not tail)
tail = child.tail
tail = escape(txt) if tail and tail.strip() else ''
if tail:
out.write(bstr(tail, 'utf-8'))
if has_children:
if not tail:
nl = '\n' + ident * level
else:
nl = ''
out.write(u'{}</{}>'.format(nl, tag, txt).encode('utf-8'))
else:
if txt:
out.write(u'{}<{}{}{}>{}</{}>'.format(nl, tag, ns, attrib, txt, tag).encode('utf-8'))
else:
out.write(u'{}<{}{}{}/>'.format(nl, tag, ns, attrib, txt).encode('utf-8'))
return txt
buf.seek(0)
_render(etree.parse(buf).getroot(), 0, True, False)
def get_connection_settings(url, headers):
u = urlparse.urlsplit(url)
if not u.hostname:
host = headers.pop('host', '')
if not host.startswith('http://') and not host.startswith('https://'):
host = 'http://' + host
u = urlparse.urlsplit(host + url)
vconnect = headers.pop('vial-connect', None)
if vconnect:
if not vconnect.startswith('http://') and not vconnect.startswith('https://'):
vconnect = 'http://' + vconnect
vu = urlparse.urlsplit(vconnect)
return (vu.hostname, vu.port), u._replace(scheme=vu.scheme)
return (u.hostname, u.port), u
class CookieJar(object):
def __init__(self):
self.cookies = Cookie.SimpleCookie()
def load(self, response):
if PY2:
cheaders = response.msg.getheaders('set-cookie')
else:
cheaders = response.msg.get_all('set-cookie')
for h in cheaders or []:
self.cookies.load(h)
|
|
import binascii
import errno
import functools
import hashlib
import importlib
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
import tempfile
import threading
import time
from typing import Optional, Sequence, Tuple, Any, Union
import uuid
import grpc
import warnings
try:
from grpc import aio as aiogrpc
except ImportError:
from grpc.experimental import aio as aiogrpc
import inspect
from inspect import signature
from pathlib import Path
import numpy as np
import ray
import ray._private.gcs_utils as gcs_utils
import ray.ray_constants as ray_constants
from ray._private.gcs_pubsub import construct_error_message
from ray._private.tls_utils import load_certs_from_env
# Import psutil after ray so the packaged version is used.
import psutil
pwd = None
if sys.platform != "win32":
import pwd
logger = logging.getLogger(__name__)
# Linux can bind child processes' lifetimes to that of their parents via prctl.
# prctl support is detected dynamically once, and assumed thereafter.
linux_prctl = None
# Windows can bind processes' lifetimes to that of kernel-level "job objects".
# We keep a global job object to tie its lifetime to that of our own process.
win32_job = None
win32_AssignProcessToJobObject = None
def get_user_temp_dir():
if "RAY_TMPDIR" in os.environ:
return os.environ["RAY_TMPDIR"]
elif sys.platform.startswith("linux") and "TMPDIR" in os.environ:
return os.environ["TMPDIR"]
elif sys.platform.startswith("darwin") or sys.platform.startswith("linux"):
# Ideally we wouldn't need this fallback, but keep it for now for
# for compatibility
tempdir = os.path.join(os.sep, "tmp")
else:
tempdir = tempfile.gettempdir()
return tempdir
def get_ray_temp_dir():
return os.path.join(get_user_temp_dir(), "ray")
def _random_string():
id_hash = hashlib.shake_128()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest(ray_constants.ID_SIZE)
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
def format_error_message(exception_message, task_exception=False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message (str): A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
"""
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines)
def push_error_to_driver(worker, error_type, message, job_id=None):
"""Push an error message to the driver to be printed in the background.
Args:
worker: The worker to use.
error_type (str): The type of the error.
message (str): The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
worker.core_worker.push_error(job_id, error_type, message, time.time())
def publish_error_to_driver(
error_type, message, job_id=None, redis_client=None, gcs_publisher=None
):
"""Push an error message to the driver to be printed in the background.
Normally the push_error_to_driver function should be used. However, in some
instances, the raylet client is not available, e.g., because the
error happens in Python before the driver or worker has connected to the
backend processes.
Args:
error_type (str): The type of the error.
message (str): The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
redis_client: The redis client to use.
gcs_publisher: The GCS publisher to use. If specified, ignores
redis_client.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
error_data = construct_error_message(job_id, error_type, message, time.time())
if gcs_publisher:
gcs_publisher.publish_error(job_id.hex().encode(), error_data)
elif redis_client:
pubsub_msg = gcs_utils.PubSubMessage()
pubsub_msg.id = job_id.binary()
pubsub_msg.data = error_data.SerializeToString()
redis_client.publish(
"ERROR_INFO:" + job_id.hex(), pubsub_msg.SerializeToString()
)
else:
raise ValueError("One of redis_client and gcs_publisher needs to be specified!")
def random_string():
"""Generate a random string to use as an ID.
Note that users may seed numpy, which could cause this function to generate
duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't
interfere with the state of the user's random number generator, so we
extract the state of the random number generator and reset it after we are
done.
TODO(rkn): If we want to later guarantee that these are generated in a
deterministic manner, then we will need to make some changes here.
Returns:
A random byte string of length ray_constants.ID_SIZE.
"""
# Get the state of the numpy random number generator.
numpy_state = np.random.get_state()
# Try to use true randomness.
np.random.seed(None)
# Generate the random ID.
random_id = np.random.bytes(ray_constants.ID_SIZE)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
return random_id
def decode(byte_str: str, allow_none: bool = False, encode_type: str = "utf-8"):
"""Make this unicode in Python 3, otherwise leave it as bytes.
Args:
byte_str: The byte string to decode.
allow_none: If true, then we will allow byte_str to be None in which
case we will return an empty string. TODO(rkn): Remove this flag.
This is only here to simplify upgrading to flatbuffers 1.10.0.
Returns:
A byte string in Python 2 and a unicode string in Python 3.
"""
if byte_str is None and allow_none:
return ""
if not isinstance(byte_str, bytes):
raise ValueError(f"The argument {byte_str} must be a bytes object.")
if sys.version_info >= (3, 0):
return byte_str.decode(encode_type)
else:
return byte_str
def ensure_str(s, encoding="utf-8", errors="strict"):
"""Coerce *s* to `str`.
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, str):
return s
else:
assert isinstance(s, bytes)
return s.decode(encoding, errors)
def binary_to_object_ref(binary_object_ref):
return ray.ObjectRef(binary_object_ref)
def binary_to_task_id(binary_task_id):
return ray.TaskID(binary_task_id)
def binary_to_hex(identifier):
hex_identifier = binascii.hexlify(identifier)
if sys.version_info >= (3, 0):
hex_identifier = hex_identifier.decode()
return hex_identifier
def hex_to_binary(hex_identifier):
return binascii.unhexlify(hex_identifier)
# TODO(qwang): Remove these hepler functions
# once we separate `WorkerID` from `UniqueID`.
def compute_job_id_from_driver(driver_id):
assert isinstance(driver_id, ray.WorkerID)
return ray.JobID(driver_id.binary()[0 : ray.JobID.size()])
def compute_driver_id_from_job(job_id):
assert isinstance(job_id, ray.JobID)
rest_length = ray_constants.ID_SIZE - job_id.size()
driver_id_str = job_id.binary() + (rest_length * b"\xff")
return ray.WorkerID(driver_id_str)
def get_cuda_visible_devices():
"""Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.
Returns:
devices (List[str]): If CUDA_VISIBLE_DEVICES is set, returns a
list of strings representing the IDs of the visible GPUs.
If it is not set or is set to NoDevFiles, returns empty list.
"""
gpu_ids_str = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if gpu_ids_str is None:
return None
if gpu_ids_str == "":
return []
if gpu_ids_str == "NoDevFiles":
return []
# GPU identifiers are given as strings representing integers or UUIDs.
return list(gpu_ids_str.split(","))
last_set_gpu_ids = None
def set_cuda_visible_devices(gpu_ids):
"""Set the CUDA_VISIBLE_DEVICES environment variable.
Args:
gpu_ids (List[str]): List of strings representing GPU IDs.
"""
if os.environ.get("RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES"):
return
global last_set_gpu_ids
if last_set_gpu_ids == gpu_ids:
return # optimization: already set
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in gpu_ids])
last_set_gpu_ids = gpu_ids
def resources_from_resource_arguments(
default_num_cpus,
default_num_gpus,
default_memory,
default_object_store_memory,
default_resources,
default_accelerator_type,
runtime_num_cpus,
runtime_num_gpus,
runtime_memory,
runtime_object_store_memory,
runtime_resources,
runtime_accelerator_type,
):
"""Determine a task's resource requirements.
Args:
default_num_cpus: The default number of CPUs required by this function
or actor method.
default_num_gpus: The default number of GPUs required by this function
or actor method.
default_memory: The default heap memory required by this function
or actor method.
default_object_store_memory: The default object store memory required
by this function or actor method.
default_resources: The default custom resources required by this
function or actor method.
runtime_num_cpus: The number of CPUs requested when the task was
invoked.
runtime_num_gpus: The number of GPUs requested when the task was
invoked.
runtime_memory: The heap memory requested when the task was invoked.
runtime_object_store_memory: The object store memory requested when
the task was invoked.
runtime_resources: The custom resources requested when the task was
invoked.
Returns:
A dictionary of the resource requirements for the task.
"""
if runtime_resources is not None:
resources = runtime_resources.copy()
elif default_resources is not None:
resources = default_resources.copy()
else:
resources = {}
if "CPU" in resources or "GPU" in resources:
raise ValueError(
"The resources dictionary must not " "contain the key 'CPU' or 'GPU'"
)
elif "memory" in resources or "object_store_memory" in resources:
raise ValueError(
"The resources dictionary must not "
"contain the key 'memory' or 'object_store_memory'"
)
assert default_num_cpus is not None
resources["CPU"] = (
default_num_cpus if runtime_num_cpus is None else runtime_num_cpus
)
if runtime_num_gpus is not None:
resources["GPU"] = runtime_num_gpus
elif default_num_gpus is not None:
resources["GPU"] = default_num_gpus
# Order of arguments matter for short circuiting.
memory = runtime_memory or default_memory
object_store_memory = runtime_object_store_memory or default_object_store_memory
if memory is not None:
resources["memory"] = ray_constants.to_memory_units(memory, round_up=True)
if object_store_memory is not None:
resources["object_store_memory"] = ray_constants.to_memory_units(
object_store_memory, round_up=True
)
if runtime_accelerator_type is not None:
resources[
f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}" f"{runtime_accelerator_type}"
] = 0.001
elif default_accelerator_type is not None:
resources[
f"{ray_constants.RESOURCE_CONSTRAINT_PREFIX}" f"{default_accelerator_type}"
] = 0.001
return resources
class Unbuffered(object):
"""There's no "built-in" solution to programatically disabling buffering of
text files. Ray expects stdout/err to be text files, so creating an
unbuffered binary file is unacceptable.
See
https://mail.python.org/pipermail/tutor/2003-November/026645.html.
https://docs.python.org/3/library/functions.html#open
"""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def open_log(path, unbuffered=False, **kwargs):
"""
Opens the log file at `path`, with the provided kwargs being given to
`open`.
"""
# Disable buffering, see test_advanced_3.py::test_logging_to_driver
kwargs.setdefault("buffering", 1)
kwargs.setdefault("mode", "a")
kwargs.setdefault("encoding", "utf-8")
stream = open(path, **kwargs)
if unbuffered:
return Unbuffered(stream)
else:
return stream
def get_system_memory():
"""Return the total amount of system memory in bytes.
Returns:
The total amount of system memory in bytes.
"""
# Try to accurately figure out the memory limit if we are in a docker
# container. Note that this file is not specific to Docker and its value is
# often much larger than the actual amount of memory.
docker_limit = None
# For cgroups v1:
memory_limit_filename = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
# For cgroups v2:
memory_limit_filename_v2 = "/sys/fs/cgroup/memory.max"
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, "r") as f:
docker_limit = int(f.read())
elif os.path.exists(memory_limit_filename_v2):
with open(memory_limit_filename_v2, "r") as f:
docker_limit = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().total
if docker_limit is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_limit, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def _get_docker_cpus(
cpu_quota_file_name="/sys/fs/cgroup/cpu/cpu.cfs_quota_us",
cpu_period_file_name="/sys/fs/cgroup/cpu/cpu.cfs_period_us",
cpuset_file_name="/sys/fs/cgroup/cpuset/cpuset.cpus",
cpu_max_file_name="/sys/fs/cgroup/cpu.max",
) -> Optional[float]:
# TODO (Alex): Don't implement this logic oursleves.
# Docker has 2 underyling ways of implementing CPU limits:
# https://docs.docker.com/config/containers/resource_constraints/#configure-the-default-cfs-scheduler
# 1. --cpuset-cpus 2. --cpus or --cpu-quota/--cpu-period (--cpu-shares is a
# soft limit so we don't worry about it). For Ray's purposes, if we use
# docker, the number of vCPUs on a machine is whichever is set (ties broken
# by smaller value).
cpu_quota = None
# See: https://bugs.openjdk.java.net/browse/JDK-8146115
if os.path.exists(cpu_quota_file_name) and os.path.exists(cpu_period_file_name):
try:
with open(cpu_quota_file_name, "r") as quota_file, open(
cpu_period_file_name, "r"
) as period_file:
cpu_quota = float(quota_file.read()) / float(period_file.read())
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
# Look at cpu.max for cgroups v2
elif os.path.exists(cpu_max_file_name):
try:
max_file = open(cpu_max_file_name).read()
quota_str, period_str = max_file.split()
if quota_str.isnumeric() and period_str.isnumeric():
cpu_quota = float(quota_str) / float(period_str)
else:
# quota_str is "max" meaning the cpu quota is unset
cpu_quota = None
except Exception:
logger.exception("Unexpected error calculating docker cpu quota.")
if (cpu_quota is not None) and (cpu_quota < 0):
cpu_quota = None
elif cpu_quota == 0:
# Round up in case the cpu limit is less than 1.
cpu_quota = 1
cpuset_num = None
if os.path.exists(cpuset_file_name):
try:
with open(cpuset_file_name) as cpuset_file:
ranges_as_string = cpuset_file.read()
ranges = ranges_as_string.split(",")
cpu_ids = []
for num_or_range in ranges:
if "-" in num_or_range:
start, end = num_or_range.split("-")
cpu_ids.extend(list(range(int(start), int(end) + 1)))
else:
cpu_ids.append(int(num_or_range))
cpuset_num = len(cpu_ids)
except Exception:
logger.exception("Unexpected error calculating docker cpuset ids.")
# Possible to-do: Parse cgroups v2's cpuset.cpus.effective for the number
# of accessible CPUs.
if cpu_quota and cpuset_num:
return min(cpu_quota, cpuset_num)
return cpu_quota or cpuset_num
def get_num_cpus() -> int:
cpu_count = multiprocessing.cpu_count()
if os.environ.get("RAY_USE_MULTIPROCESSING_CPU_COUNT"):
logger.info(
"Detected RAY_USE_MULTIPROCESSING_CPU_COUNT=1: Using "
"multiprocessing.cpu_count() to detect the number of CPUs. "
"This may be inconsistent when used inside docker. "
"To correctly detect CPUs, unset the env var: "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT`."
)
return cpu_count
try:
# Not easy to get cpu count in docker, see:
# https://bugs.python.org/issue36054
docker_count = _get_docker_cpus()
if docker_count is not None and docker_count != cpu_count:
# Don't log this warning if we're on K8s or if the warning is
# explicitly disabled.
if (
"RAY_DISABLE_DOCKER_CPU_WARNING" not in os.environ
and "KUBERNETES_SERVICE_HOST" not in os.environ
):
logger.warning(
"Detecting docker specified CPUs. In "
"previous versions of Ray, CPU detection in containers "
"was incorrect. Please ensure that Ray has enough CPUs "
"allocated. As a temporary workaround to revert to the "
"prior behavior, set "
"`RAY_USE_MULTIPROCESSING_CPU_COUNT=1` as an env var "
"before starting Ray. Set the env var: "
"`RAY_DISABLE_DOCKER_CPU_WARNING=1` to mute this warning."
)
# TODO (Alex): We should probably add support for fractional cpus.
if int(docker_count) != float(docker_count):
logger.warning(
f"Ray currently does not support initializing Ray"
f"with fractional cpus. Your num_cpus will be "
f"truncated from {docker_count} to "
f"{int(docker_count)}."
)
docker_count = int(docker_count)
cpu_count = docker_count
except Exception:
# `nproc` and cgroup are linux-only. If docker only works on linux
# (will run in a linux VM on other platforms), so this is fine.
pass
return cpu_count
def get_used_memory():
"""Return the currently used system memory in bytes
Returns:
The total amount of used memory
"""
# Try to accurately figure out the memory usage if we are in a docker
# container.
docker_usage = None
# For cgroups v1:
memory_usage_filename = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
# For cgroups v2:
memory_usage_filename_v2 = "/sys/fs/cgroup/memory.current"
if os.path.exists(memory_usage_filename):
with open(memory_usage_filename, "r") as f:
docker_usage = int(f.read())
elif os.path.exists(memory_usage_filename_v2):
with open(memory_usage_filename_v2, "r") as f:
docker_usage = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = psutil.virtual_memory().used
if docker_usage is not None:
# We take the min because the cgroup limit is very large if we aren't
# in Docker.
return min(docker_usage, psutil_memory_in_bytes)
return psutil_memory_in_bytes
def estimate_available_memory():
"""Return the currently available amount of system memory in bytes.
Returns:
The total amount of available memory in bytes. Based on the used
and total memory.
"""
return get_system_memory() - get_used_memory()
def get_shared_memory_bytes():
"""Get the size of the shared memory file system.
Returns:
The size of the shared memory file system in bytes.
"""
# Make sure this is only called on Linux.
assert sys.platform == "linux" or sys.platform == "linux2"
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
finally:
os.close(shm_fd)
return shm_avail
def check_oversized_function(
pickled: bytes, name: str, obj_type: str, worker: "ray.Worker"
) -> None:
"""Send a warning message if the pickled function is too large.
Args:
pickled: the pickled function.
name: name of the pickled object.
obj_type: type of the pickled object, can be 'function',
'remote function', or 'actor'.
worker: the worker used to send warning message. message will be logged
locally if None.
"""
length = len(pickled)
if length <= ray_constants.FUNCTION_SIZE_WARN_THRESHOLD:
return
elif length < ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD:
warning_message = (
"The {} {} is very large ({} MiB). "
"Check that its definition is not implicitly capturing a large "
"array or other object in scope. Tip: use ray.put() to put large "
"objects in the Ray object store."
).format(obj_type, name, length // (1024 * 1024))
if worker:
push_error_to_driver(
worker,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,
"Warning: " + warning_message,
job_id=worker.current_job_id,
)
else:
error = (
"The {} {} is too large ({} MiB > FUNCTION_SIZE_ERROR_THRESHOLD={}"
" MiB). Check that its definition is not implicitly capturing a "
"large array or other object in scope. Tip: use ray.put() to "
"put large objects in the Ray object store."
).format(
obj_type,
name,
length // (1024 * 1024),
ray_constants.FUNCTION_SIZE_ERROR_THRESHOLD // (1024 * 1024),
)
raise ValueError(error)
def is_main_thread():
return threading.current_thread().getName() == "MainThread"
def detect_fate_sharing_support_win32():
global win32_job, win32_AssignProcessToJobObject
if win32_job is None and sys.platform == "win32":
import ctypes
try:
from ctypes.wintypes import BOOL, DWORD, HANDLE, LPVOID, LPCWSTR
kernel32 = ctypes.WinDLL("kernel32")
kernel32.CreateJobObjectW.argtypes = (LPVOID, LPCWSTR)
kernel32.CreateJobObjectW.restype = HANDLE
sijo_argtypes = (HANDLE, ctypes.c_int, LPVOID, DWORD)
kernel32.SetInformationJobObject.argtypes = sijo_argtypes
kernel32.SetInformationJobObject.restype = BOOL
kernel32.AssignProcessToJobObject.argtypes = (HANDLE, HANDLE)
kernel32.AssignProcessToJobObject.restype = BOOL
kernel32.IsDebuggerPresent.argtypes = ()
kernel32.IsDebuggerPresent.restype = BOOL
except (AttributeError, TypeError, ImportError):
kernel32 = None
job = kernel32.CreateJobObjectW(None, None) if kernel32 else None
job = subprocess.Handle(job) if job else job
if job:
from ctypes.wintypes import DWORD, LARGE_INTEGER, ULARGE_INTEGER
class JOBOBJECT_BASIC_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("PerProcessUserTimeLimit", LARGE_INTEGER),
("PerJobUserTimeLimit", LARGE_INTEGER),
("LimitFlags", DWORD),
("MinimumWorkingSetSize", ctypes.c_size_t),
("MaximumWorkingSetSize", ctypes.c_size_t),
("ActiveProcessLimit", DWORD),
("Affinity", ctypes.c_size_t),
("PriorityClass", DWORD),
("SchedulingClass", DWORD),
]
class IO_COUNTERS(ctypes.Structure):
_fields_ = [
("ReadOperationCount", ULARGE_INTEGER),
("WriteOperationCount", ULARGE_INTEGER),
("OtherOperationCount", ULARGE_INTEGER),
("ReadTransferCount", ULARGE_INTEGER),
("WriteTransferCount", ULARGE_INTEGER),
("OtherTransferCount", ULARGE_INTEGER),
]
class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(ctypes.Structure):
_fields_ = [
("BasicLimitInformation", JOBOBJECT_BASIC_LIMIT_INFORMATION),
("IoInfo", IO_COUNTERS),
("ProcessMemoryLimit", ctypes.c_size_t),
("JobMemoryLimit", ctypes.c_size_t),
("PeakProcessMemoryUsed", ctypes.c_size_t),
("PeakJobMemoryUsed", ctypes.c_size_t),
]
debug = kernel32.IsDebuggerPresent()
# Defined in <WinNT.h>; also available here:
# https://docs.microsoft.com/en-us/windows/win32/api/jobapi2/nf-jobapi2-setinformationjobobject
JobObjectExtendedLimitInformation = 9
JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400
JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000
buf = JOBOBJECT_EXTENDED_LIMIT_INFORMATION()
buf.BasicLimitInformation.LimitFlags = (
(0 if debug else JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE)
| JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION
| JOB_OBJECT_LIMIT_BREAKAWAY_OK
)
infoclass = JobObjectExtendedLimitInformation
if not kernel32.SetInformationJobObject(
job, infoclass, ctypes.byref(buf), ctypes.sizeof(buf)
):
job = None
win32_AssignProcessToJobObject = (
kernel32.AssignProcessToJobObject if kernel32 is not None else False
)
win32_job = job if job else False
return bool(win32_job)
def detect_fate_sharing_support_linux():
global linux_prctl
if linux_prctl is None and sys.platform.startswith("linux"):
try:
from ctypes import c_int, c_ulong, CDLL
prctl = CDLL(None).prctl
prctl.restype = c_int
prctl.argtypes = [c_int, c_ulong, c_ulong, c_ulong, c_ulong]
except (AttributeError, TypeError):
prctl = None
linux_prctl = prctl if prctl else False
return bool(linux_prctl)
def detect_fate_sharing_support():
result = None
if sys.platform == "win32":
result = detect_fate_sharing_support_win32()
elif sys.platform.startswith("linux"):
result = detect_fate_sharing_support_linux()
return result
def set_kill_on_parent_death_linux():
"""Ensures this process dies if its parent dies (fate-sharing).
Linux-only. Must be called in preexec_fn (i.e. by the child).
"""
if detect_fate_sharing_support_linux():
import signal
PR_SET_PDEATHSIG = 1
if linux_prctl(PR_SET_PDEATHSIG, signal.SIGKILL, 0, 0, 0) != 0:
import ctypes
raise OSError(ctypes.get_errno(), "prctl(PR_SET_PDEATHSIG) failed")
else:
assert False, "PR_SET_PDEATHSIG used despite being unavailable"
def set_kill_child_on_death_win32(child_proc):
"""Ensures the child process dies if this process dies (fate-sharing).
Windows-only. Must be called by the parent, after spawning the child.
Args:
child_proc: The subprocess.Popen or subprocess.Handle object.
"""
if isinstance(child_proc, subprocess.Popen):
child_proc = child_proc._handle
assert isinstance(child_proc, subprocess.Handle)
if detect_fate_sharing_support_win32():
if not win32_AssignProcessToJobObject(win32_job, int(child_proc)):
import ctypes
raise OSError(ctypes.get_last_error(), "AssignProcessToJobObject() failed")
else:
assert False, "AssignProcessToJobObject used despite being unavailable"
def set_sigterm_handler(sigterm_handler):
"""Registers a handler for SIGTERM in a platform-compatible manner."""
if sys.platform == "win32":
# Note that these signal handlers only work for console applications.
# TODO(mehrdadn): implement graceful process termination mechanism
# SIGINT is Ctrl+C, SIGBREAK is Ctrl+Break.
signal.signal(signal.SIGBREAK, sigterm_handler)
else:
signal.signal(signal.SIGTERM, sigterm_handler)
def try_make_directory_shared(directory_path):
try:
os.chmod(directory_path, 0o0777)
except OSError as e:
# Silently suppress the PermissionError that is thrown by the chmod.
# This is done because the user attempting to change the permissions
# on a directory may not own it. The chmod is attempted whether the
# directory is new or not to avoid race conditions.
# ray-project/ray/#3591
if e.errno in [errno.EACCES, errno.EPERM]:
pass
else:
raise
def try_to_create_directory(directory_path):
"""Attempt to create a directory that is globally readable/writable.
Args:
directory_path: The path of the directory to create.
"""
directory_path = os.path.expanduser(directory_path)
os.makedirs(directory_path, exist_ok=True)
# Change the log directory permissions so others can use it. This is
# important when multiple people are using the same machine.
try_make_directory_shared(directory_path)
def try_to_symlink(symlink_path, target_path):
"""Attempt to create a symlink.
If the symlink path exists and isn't a symlink, the symlink will not be
created. If a symlink exists in the path, it will be attempted to be
removed and replaced.
Args:
symlink_path: The path at which to create the symlink.
target_path: The path the symlink should point to.
"""
symlink_path = os.path.expanduser(symlink_path)
target_path = os.path.expanduser(target_path)
if os.path.exists(symlink_path):
if os.path.islink(symlink_path):
# Try to remove existing symlink.
try:
os.remove(symlink_path)
except OSError:
return
else:
# There's an existing non-symlink file, don't overwrite it.
return
try:
os.symlink(target_path, symlink_path)
except OSError:
return
def get_user():
if pwd is None:
return ""
try:
return pwd.getpwuid(os.getuid()).pw_name
except Exception:
return ""
def get_function_args(callable):
all_parameters = frozenset(signature(callable).parameters)
return list(all_parameters)
def get_conda_bin_executable(executable_name):
"""
Return path to the specified executable, assumed to be discoverable within
the 'bin' subdirectory of a conda installation. Adapted from
https://github.com/mlflow/mlflow.
"""
# Use CONDA_EXE as per https://github.com/conda/conda/issues/7126
if "CONDA_EXE" in os.environ:
conda_bin_dir = os.path.dirname(os.environ["CONDA_EXE"])
return os.path.join(conda_bin_dir, executable_name)
return executable_name
def get_conda_env_dir(env_name):
"""Find and validate the conda directory for a given conda environment.
For example, given the environment name `tf1`, this function checks
the existence of the corresponding conda directory, e.g.
`/Users/scaly/anaconda3/envs/tf1`, and returns it.
"""
conda_prefix = os.environ.get("CONDA_PREFIX")
if conda_prefix is None:
# The caller is neither in a conda env or in (base) env. This is rare
# because by default, new terminals start in (base), but we can still
# support this case.
conda_exe = os.environ.get("CONDA_EXE")
if conda_exe is None:
raise ValueError(
"Cannot find environment variables set by conda. "
"Please verify conda is installed."
)
# Example: CONDA_EXE=$HOME/anaconda3/bin/python
# Strip out /bin/python by going up two parent directories.
conda_prefix = str(Path(conda_exe).parent.parent)
# There are two cases:
# 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and
# CONDA_PREFIX=$HOME/anaconda3
# 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and
# CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name
if os.environ.get("CONDA_DEFAULT_ENV") == "base":
# Caller's curent environment is (base).
# Not recommended by conda, but we can still support it.
if env_name == "base":
# Desired environment is (base), located at e.g. $HOME/anaconda3
env_dir = conda_prefix
else:
# Desired environment is user-created, e.g.
# $HOME/anaconda3/envs/$env_name
env_dir = os.path.join(conda_prefix, "envs", env_name)
else:
# Now `conda_prefix` should be something like
# $HOME/anaconda3/envs/$current_env_name
# We want to replace the last component with the desired env name.
conda_envs_dir = os.path.split(conda_prefix)[0]
env_dir = os.path.join(conda_envs_dir, env_name)
if not os.path.isdir(env_dir):
raise ValueError(
"conda env "
+ env_name
+ " not found in conda envs directory. Run `conda env list` to "
+ "verify the name is correct."
)
return env_dir
def get_call_location(back=1):
"""
Get the location (filename and line number) of a function caller, `back`
frames up the stack.
Args:
back (int): The number of frames to go up the stack, not including this
function.
"""
stack = inspect.stack()
try:
frame = stack[back + 1]
return f"{frame.filename}:{frame.lineno}"
except IndexError:
return "UNKNOWN"
# Used to only print a deprecation warning once for a given function if we
# don't wish to spam the caller.
_PRINTED_WARNING = set()
# The following is inspired by
# https://github.com/tensorflow/tensorflow/blob/dec8e0b11f4f87693b67e125e67dfbc68d26c205/tensorflow/python/util/deprecation.py#L274-L329
def deprecated(
instructions=None, removal_release=None, removal_date=None, warn_once=True
):
"""
Creates a decorator for marking functions as deprecated. The decorator
will log a deprecation warning on the first (or all, see `warn_once` arg)
invocations, and will otherwise leave the wrapped function unchanged.
Args:
instructions (str): Instructions for the caller to update their code.
removal_release (str): The release in which this deprecated function
will be removed. Only one of removal_release and removal_date
should be specified. If neither is specfieid, we'll warning that
the function will be removed "in a future release".
removal_date (str): The date on which this deprecated function will be
removed. Only one of removal_release and removal_date should be
specified. If neither is specfieid, we'll warning that
the function will be removed "in a future release".
warn_once (bool): If true, the deprecation warning will only be logged
on the first invocation. Otherwise, the deprecation warning will
be logged on every invocation. Defaults to True.
Returns:
A decorator to be used for wrapping deprecated functions.
"""
if removal_release is not None and removal_date is not None:
raise ValueError(
"Only one of removal_release and removal_date should be specified."
)
def deprecated_wrapper(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
global _PRINTED_WARNING
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING.add(func)
msg = (
"From {}: {} (from {}) is deprecated and will ".format(
get_call_location(), func.__name__, func.__module__
)
+ "be removed "
+ (
f"in version {removal_release}."
if removal_release is not None
else f"after {removal_date}"
if removal_date is not None
else "in a future version"
)
+ (f" {instructions}" if instructions is not None else "")
)
warnings.warn(msg)
return func(*args, **kwargs)
return new_func
return deprecated_wrapper
def import_attr(full_path: str):
"""Given a full import path to a module attr, return the imported attr.
For example, the following are equivalent:
MyClass = import_attr("module.submodule.MyClass")
from module.submodule import MyClass
Returns:
Imported attr
"""
if full_path is None:
raise TypeError("import path cannot be None")
last_period_idx = full_path.rfind(".")
attr_name = full_path[last_period_idx + 1 :]
module_name = full_path[:last_period_idx]
module = importlib.import_module(module_name)
return getattr(module, attr_name)
def get_wheel_filename(
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Returns the filename used for the nightly Ray wheel.
Args:
sys_platform (str): The platform as returned by sys.platform. Examples:
"darwin", "linux", "win32"
ray_version (str): The Ray version as returned by ray.__version__ or
`ray --version`. Examples: "2.0.0.dev0"
py_version (str):
The major and minor Python versions concatenated. Examples: "36",
"37", "38", "39"
Returns:
The wheel file name. Examples:
ray-2.0.0.dev0-cp38-cp38-manylinux2014_x86_64.whl
"""
assert py_version in ["36", "37", "38", "39"], py_version
os_strings = {
"darwin": "macosx_10_15_x86_64"
if py_version in ["38", "39"]
else "macosx_10_15_intel",
"linux": "manylinux2014_x86_64",
"win32": "win_amd64",
}
assert sys_platform in os_strings, sys_platform
wheel_filename = (
f"ray-{ray_version}-cp{py_version}-"
f"cp{py_version}{'m' if py_version in ['36', '37'] else ''}"
f"-{os_strings[sys_platform]}.whl"
)
return wheel_filename
def get_master_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Return the URL for the wheel from a specific commit."""
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://s3-us-west-2.amazonaws.com/ray-wheels/master/"
f"{ray_commit}/{filename}"
)
def get_release_wheel_url(
ray_commit: str = ray.__commit__,
sys_platform: str = sys.platform,
ray_version: str = ray.__version__,
py_version: str = f"{sys.version_info.major}{sys.version_info.minor}",
) -> str:
"""Return the URL for the wheel for a specific release."""
filename = get_wheel_filename(
sys_platform=sys_platform, ray_version=ray_version, py_version=py_version
)
return (
f"https://ray-wheels.s3-us-west-2.amazonaws.com/releases/"
f"{ray_version}/{ray_commit}/{filename}"
)
# e.g. https://ray-wheels.s3-us-west-2.amazonaws.com/releases/1.4.0rc1/e7c7
# f6371a69eb727fa469e4cd6f4fbefd143b4c/ray-1.4.0rc1-cp36-cp36m-manylinux201
# 4_x86_64.whl
def validate_namespace(namespace: str):
if not isinstance(namespace, str):
raise TypeError("namespace must be None or a string.")
elif namespace == "":
raise ValueError(
'"" is not a valid namespace. ' "Pass None to not specify a namespace."
)
def init_grpc_channel(
address: str,
options: Optional[Sequence[Tuple[str, Any]]] = None,
asynchronous: bool = False,
):
grpc_module = aiogrpc if asynchronous else grpc
if os.environ.get("RAY_USE_TLS", "0").lower() in ("1", "true"):
server_cert_chain, private_key, ca_cert = load_certs_from_env()
credentials = grpc.ssl_channel_credentials(
certificate_chain=server_cert_chain,
private_key=private_key,
root_certificates=ca_cert,
)
channel = grpc_module.secure_channel(address, credentials, options=options)
else:
channel = grpc_module.insecure_channel(address, options=options)
return channel
def check_dashboard_dependencies_installed() -> bool:
"""Returns True if Ray Dashboard dependencies are installed.
Checks to see if we should start the dashboard agent or not based on the
Ray installation version the user has installed (ray vs. ray[default]).
Unfortunately there doesn't seem to be a cleaner way to detect this other
than just blindly importing the relevant packages.
"""
try:
import ray.dashboard.optional_deps # noqa: F401
return True
except ImportError:
return False
def internal_kv_get_with_retry(gcs_client, key, namespace, num_retries=20):
result = None
if isinstance(key, str):
key = key.encode()
for _ in range(num_retries):
try:
result = gcs_client.internal_kv_get(key, namespace)
except Exception as e:
if isinstance(e, grpc.RpcError) and e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Get failed")
result = None
if result is not None:
break
else:
logger.debug(f"Fetched {key}=None from redis. Retrying.")
time.sleep(2)
if not result:
raise RuntimeError(
f"Could not read '{key.decode()}' from GCS. Did GCS start successfully?"
)
return result
def internal_kv_put_with_retry(gcs_client, key, value, namespace, num_retries=20):
if isinstance(key, str):
key = key.encode()
error = None
for _ in range(num_retries):
try:
return gcs_client.internal_kv_put(
key, value, overwrite=True, namespace=namespace
)
except grpc.RpcError as e:
if e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.UNKNOWN,
):
logger.warning(
f"Unable to connect to GCS at {gcs_client.address}. "
"Check that (1) Ray GCS with matching version started "
"successfully at the specified address, and (2) there is "
"no firewall setting preventing access."
)
else:
logger.exception("Internal KV Put failed")
time.sleep(2)
error = e
# Reraise the last grpc.RpcError.
raise error
def compute_version_info():
"""Compute the versions of Python, and Ray.
Returns:
A tuple containing the version information.
"""
ray_version = ray.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
return ray_version, python_version
def get_directory_size_bytes(path: Union[str, Path] = ".") -> int:
"""Get the total size of a directory in bytes, including subdirectories."""
total_size_bytes = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is a symbolic link
if not os.path.islink(fp):
total_size_bytes += os.path.getsize(fp)
return total_size_bytes
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class TransportProtocol(Enum):
udp = "Udp"
tcp = "Tcp"
class IPAllocationMethod(Enum):
static = "Static"
dynamic = "Dynamic"
class IPVersion(Enum):
ipv4 = "IPv4"
ipv6 = "IPv6"
class SecurityRuleProtocol(Enum):
tcp = "Tcp"
udp = "Udp"
asterisk = "*"
class SecurityRuleAccess(Enum):
allow = "Allow"
deny = "Deny"
class SecurityRuleDirection(Enum):
inbound = "Inbound"
outbound = "Outbound"
class RouteNextHopType(Enum):
virtual_network_gateway = "VirtualNetworkGateway"
vnet_local = "VnetLocal"
internet = "Internet"
virtual_appliance = "VirtualAppliance"
none = "None"
class ApplicationGatewayProtocol(Enum):
http = "Http"
https = "Https"
class ApplicationGatewayCookieBasedAffinity(Enum):
enabled = "Enabled"
disabled = "Disabled"
class ApplicationGatewayBackendHealthServerHealth(Enum):
unknown = "Unknown"
up = "Up"
down = "Down"
partial = "Partial"
draining = "Draining"
class ApplicationGatewaySkuName(Enum):
standard_small = "Standard_Small"
standard_medium = "Standard_Medium"
standard_large = "Standard_Large"
waf_medium = "WAF_Medium"
waf_large = "WAF_Large"
class ApplicationGatewayTier(Enum):
standard = "Standard"
waf = "WAF"
class ApplicationGatewaySslProtocol(Enum):
tl_sv1_0 = "TLSv1_0"
tl_sv1_1 = "TLSv1_1"
tl_sv1_2 = "TLSv1_2"
class ApplicationGatewayRequestRoutingRuleType(Enum):
basic = "Basic"
path_based_routing = "PathBasedRouting"
class ApplicationGatewayOperationalState(Enum):
stopped = "Stopped"
starting = "Starting"
running = "Running"
stopping = "Stopping"
class ApplicationGatewayFirewallMode(Enum):
detection = "Detection"
prevention = "Prevention"
class AuthorizationUseStatus(Enum):
available = "Available"
in_use = "InUse"
class ExpressRouteCircuitPeeringAdvertisedPublicPrefixState(Enum):
not_configured = "NotConfigured"
configuring = "Configuring"
configured = "Configured"
validation_needed = "ValidationNeeded"
class ExpressRouteCircuitPeeringType(Enum):
azure_public_peering = "AzurePublicPeering"
azure_private_peering = "AzurePrivatePeering"
microsoft_peering = "MicrosoftPeering"
class ExpressRouteCircuitPeeringState(Enum):
disabled = "Disabled"
enabled = "Enabled"
class Access(Enum):
allow = "Allow"
deny = "Deny"
class ExpressRouteCircuitSkuTier(Enum):
standard = "Standard"
premium = "Premium"
class ExpressRouteCircuitSkuFamily(Enum):
unlimited_data = "UnlimitedData"
metered_data = "MeteredData"
class ServiceProviderProvisioningState(Enum):
not_provisioned = "NotProvisioned"
provisioning = "Provisioning"
provisioned = "Provisioned"
deprovisioning = "Deprovisioning"
class LoadDistribution(Enum):
default = "Default"
source_ip = "SourceIP"
source_ip_protocol = "SourceIPProtocol"
class ProbeProtocol(Enum):
http = "Http"
tcp = "Tcp"
class NetworkOperationStatus(Enum):
in_progress = "InProgress"
succeeded = "Succeeded"
failed = "Failed"
class EffectiveRouteSource(Enum):
unknown = "Unknown"
user = "User"
virtual_network_gateway = "VirtualNetworkGateway"
default = "Default"
class EffectiveRouteState(Enum):
active = "Active"
invalid = "Invalid"
class ProvisioningState(Enum):
succeeded = "Succeeded"
updating = "Updating"
deleting = "Deleting"
failed = "Failed"
class AssociationType(Enum):
associated = "Associated"
contains = "Contains"
class Direction(Enum):
inbound = "Inbound"
outbound = "Outbound"
class Protocol(Enum):
tcp = "TCP"
udp = "UDP"
class NextHopType(Enum):
internet = "Internet"
virtual_appliance = "VirtualAppliance"
virtual_network_gateway = "VirtualNetworkGateway"
vnet_local = "VnetLocal"
hyper_net_gateway = "HyperNetGateway"
none = "None"
class PcProtocol(Enum):
tcp = "TCP"
udp = "UDP"
any = "Any"
class PcStatus(Enum):
not_started = "NotStarted"
running = "Running"
stopped = "Stopped"
error = "Error"
unknown = "Unknown"
class PcError(Enum):
internal_error = "InternalError"
agent_stopped = "AgentStopped"
capture_failed = "CaptureFailed"
local_file_failed = "LocalFileFailed"
storage_failed = "StorageFailed"
class Origin(Enum):
local = "Local"
inbound = "Inbound"
outbound = "Outbound"
class Severity(Enum):
error = "Error"
warning = "Warning"
class IssueType(Enum):
unknown = "Unknown"
agent_stopped = "AgentStopped"
guest_firewall = "GuestFirewall"
dns_resolution = "DnsResolution"
socket_bind = "SocketBind"
network_security_rule = "NetworkSecurityRule"
user_defined_route = "UserDefinedRoute"
port_throttled = "PortThrottled"
platform = "Platform"
class ConnectionStatus(Enum):
unknown = "Unknown"
connected = "Connected"
disconnected = "Disconnected"
degraded = "Degraded"
class VirtualNetworkPeeringState(Enum):
initiated = "Initiated"
connected = "Connected"
disconnected = "Disconnected"
class VirtualNetworkGatewayType(Enum):
vpn = "Vpn"
express_route = "ExpressRoute"
class VpnType(Enum):
policy_based = "PolicyBased"
route_based = "RouteBased"
class VirtualNetworkGatewaySkuName(Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
vpn_gw1 = "VpnGw1"
vpn_gw2 = "VpnGw2"
vpn_gw3 = "VpnGw3"
class VirtualNetworkGatewaySkuTier(Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
vpn_gw1 = "VpnGw1"
vpn_gw2 = "VpnGw2"
vpn_gw3 = "VpnGw3"
class BgpPeerState(Enum):
unknown = "Unknown"
stopped = "Stopped"
idle = "Idle"
connecting = "Connecting"
connected = "Connected"
class ProcessorArchitecture(Enum):
amd64 = "Amd64"
x86 = "X86"
class VirtualNetworkGatewayConnectionStatus(Enum):
unknown = "Unknown"
connecting = "Connecting"
connected = "Connected"
not_connected = "NotConnected"
class VirtualNetworkGatewayConnectionType(Enum):
ipsec = "IPsec"
vnet2_vnet = "Vnet2Vnet"
express_route = "ExpressRoute"
vpn_client = "VPNClient"
class IpsecEncryption(Enum):
none = "None"
des = "DES"
des3 = "DES3"
aes128 = "AES128"
aes192 = "AES192"
aes256 = "AES256"
gcmaes128 = "GCMAES128"
gcmaes192 = "GCMAES192"
gcmaes256 = "GCMAES256"
class IpsecIntegrity(Enum):
md5 = "MD5"
sha1 = "SHA1"
sha256 = "SHA256"
gcmaes128 = "GCMAES128"
gcmaes192 = "GCMAES192"
gcmaes256 = "GCMAES256"
class IkeEncryption(Enum):
des = "DES"
des3 = "DES3"
aes128 = "AES128"
aes192 = "AES192"
aes256 = "AES256"
class IkeIntegrity(Enum):
md5 = "MD5"
sha1 = "SHA1"
sha256 = "SHA256"
sha384 = "SHA384"
class DhGroup(Enum):
none = "None"
dh_group1 = "DHGroup1"
dh_group2 = "DHGroup2"
dh_group14 = "DHGroup14"
dh_group2048 = "DHGroup2048"
ecp256 = "ECP256"
ecp384 = "ECP384"
dh_group24 = "DHGroup24"
class PfsGroup(Enum):
none = "None"
pfs1 = "PFS1"
pfs2 = "PFS2"
pfs2048 = "PFS2048"
ecp256 = "ECP256"
ecp384 = "ECP384"
pfs24 = "PFS24"
|
|
######################################################################
# Cloud Routes Web Application
# -------------------------------------------------------------------
# Users Class
######################################################################
from werkzeug.security import generate_password_hash, check_password_hash
import hashlib
import rethinkdb as r
import time
from monitors import Monitor
from reactions import Reaction
class User(object):
def __init__(self, uid=None):
''' Initialize the User Class '''
self.uid = uid
self.email = None
self.username = None
self.status = None
self.company = None
self.contact = None
self.domains = {}
self.reactions = {}
self.monitors = {}
self.acttype = None
self.stripe = None
self.stripeid = None
self.subplans = 2
self.subscription = None
self.payments = None
self.subscribed_to_newsletter = False
self.confirmed = False
self.confirmed_on = None
self.upgraded = False
self.monitorCount = None
self.reactionCount = None
self.config = None
def createUser(self, userdata, rdb):
'''
Given-
userdata = {
"username": "foo",
"password": "foo_password",
"email": "[email protected]",
"company": "foobar",
"contact": "[email protected]""
}
- create a new user in the RethinkDB database
'''
jsondata = {}
jsondata['username'] = userdata['username']
jsondata['password'] = self.createPass(userdata['password'])
jsondata['email'] = userdata['email']
jsondata['status'] = 'active'
jsondata['company'] = userdata['company']
jsondata['acttype'] = self.config['DEFAULT_PACKAGE']
jsondata['contact'] = userdata['contact']
jsondata['stripe'] = self.stripe
jsondata['stripeid'] = self.stripeid
jsondata['subplans'] = self.subplans
jsondata['payments'] = self.config['DEFAULT_PAYMENTS']
jsondata['subscription'] = self.config['PACKAGES'][self.config['DEFAULT_PACKAGE']]['subscription']
jsondata['subscribed_to_newsletter'] = self.subscribed_to_newsletter
jsondata['creation_time'] = time.time()
jsondata['confirmed'] = False
if self.is_active(userdata['username'], rdb):
return 'exists'
else:
results = r.table('users').insert(jsondata).run(rdb)
if results['inserted'] == 1:
return results['generated_keys'][0]
else:
return False
def saltPass(self, password):
''' Create a appsalt + user password hash (better than default) '''
salty_pass = self.config['PASSWORD_SALT'] + password
return hashlib.sha512(salty_pass).hexdigest()
def createPass(self, password):
''' Create a salted hashed password '''
password = self.saltPass(password)
return generate_password_hash(password)
def setPass(self, newpass, rdb):
''' Set a password in the database '''
password = self.createPass(newpass)
results = r.table('users').get(self.uid).update(
{'password': password}).run(rdb)
if results['replaced'] == 1:
return True
else:
return False
def getUID(self, username, rdb):
''' Lookup a users uid by username '''
results = r.table('users').filter(
r.row['username'] == username).run(rdb)
xdata = {}
for x in results:
key = x['username']
value = x['id']
xdata[key] = value
if username in xdata:
return xdata[username]
else:
return False
def get(self, method, lookup, rdb):
''' Lookup the user by the uid '''
if method == 'uid':
uid = lookup
elif method == 'username':
uid = self.getUID(lookup, rdb)
results = r.table('users').get(uid).run(rdb)
data = results
if data:
self.email = results['email']
self.uid = results['id']
self.username = results['username']
self.status = results['status']
self.company = results['company']
self.contact = results['contact']
self.acttype = results['acttype']
self.stripeid = results['stripeid']
self.stripe = results['stripe']
self.subplans = results['subplans']
self.payments = results['payments']
self.subscription = results['subscription']
self.creation_time = results['creation_time']
self.confirmed = results['confirmed']
## Identify number of monitors and reactions
monitor = Monitor()
reaction = Reaction()
self.monitorCount = monitor.count(self.uid, rdb)
self.reactionCount = reaction.count(self.uid, rdb)
return self
else:
return None
def checkPass(self, password, rdb):
''' Check if the password supplied is valid '''
results = r.table('users').get(self.uid).run(rdb)
data = results
if not data:
return "No data found"
else:
if check_password_hash(data['password'], password):
self.setPass(password, rdb)
return True
else:
password = self.saltPass(password)
return check_password_hash(data['password'], password)
def is_active(self, username, rdb):
''' Check if a user exists or not '''
count = r.table('users').filter(
{'username': username}).count().run(rdb)
if count >= 1:
self.active = True
else:
self.active = False
return self.active
def is_confirmed(self, username, rdb):
''' Check if a user is confirmed or not '''
results = r.table('users').filter({'username': username}).run(rdb)
if results:
return self.confirmed
else:
return "No data found"
def getDomains(self, rdb):
''' Returns a list of domain id's that this user owns '''
# Get Domains
results = r.table('domains').filter({'uid': self.uid}).run(rdb)
domains = {}
for x in results:
domains[x['id']] = x
self.domains = domains
return self.domains
def getReactions(self, rdb):
''' Returns a list of reaction id's that this user owns '''
# Get Reactions
results = r.table('reactions').filter(
{'uid': self.uid}).order_by('name').run(rdb)
reactions = {}
for x in results:
reactions[x['id']] = x
self.reactions = reactions
return self.reactions
def getMonitors(self, rdb):
''' Returns a list of monitor id's that this user owns '''
# Get Monitors
results = r.table('monitors').filter(
{'uid': self.uid}).order_by('name').run(rdb)
monitors = {}
for x in results:
monitors[x['id']] = x
self.monitors = monitors
return self.monitors
def getEvents(self, rdb):
''' Returns a list of events from the events table for this user '''
# Get Events
results = r.table('events').filter({'uid': self.uid}).order_by(
r.desc('time')).run(rdb)
eventsbycid = {}
for event in results:
if event['cid'] in eventsbycid:
eventsbycid[event['cid']].append(event)
else:
eventsbycid[event['cid']] = [ event ]
return eventsbycid
def setSubscription(self, rdb):
'''
This will set a users subscription
to the specified subscription plan
'''
# Get User id
results = r.table('users').get(self.uid).update(
{
'acttype': self.acttype,
'stripeid': self.stripeid,
'stripe': self.stripe,
'subscription': self.subscription,
'subplans': self.subplans
}
).run(rdb)
if results:
loginfo = {}
loginfo['type'] = "setSubscription"
loginfo['uid'] = self.uid
loginfo['acttype'] = self.acttype
loginfo['subplans'] = self.subplans
loginfo['subscription'] = self.subscription
loginfo['time'] = time.time()
logresult = r.table('subscription_history').insert(
loginfo).run(rdb)
return True
else:
return False
if __name__ == '__main__': # pragma: no cover
pass # pragma: no cover
|
|
import numpy as np
import ms
import shifter
import numpy as np
import scipy.optimize as op
from scipy import ndimage
import h5py
import time
from scipy.linalg import cho_factor, cho_solve
from interruptible_pool import InterruptiblePool
from nll_grad import nll_grad_lnX
from nll_grad_fb import v2_fit_single_patch , v3_fit_single_patch
from nll_ctr import fit
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import sampler
F = h5py.File('samplerx3.hdf5','r')#["sampler"]
K = F["samplerx3"]
Nthreads = 15 #number of threads
def fg(args):
return nll_grad_lnX(*args)
def fit_single_patch(data, mask, psf, old_flux, old_back, floor, gain):
C = floor + gain * np.abs(old_flux * psf + old_back)[mask]
A = np.ones((C.size, 2))
A[:, 1] = psf[mask]
AT = A.T
ATICA = np.dot(AT, A / C[:, None])
ATICY = np.dot(AT, data[mask] / C)
return np.dot(np.linalg.inv(ATICA), ATICY)
class stuff(object):
def __init__(self, data, cx, cy, masks,
f = 5e-2, g = 1e-2, fl = 1e-5, H = 3, epsilon = 1e-2,
min_iter=5, max_iter=10, check_iter=5, tol=1.e-8):
""" inputs of the code: NxD data matrix and NxD mask matrix;
data contains images of stars, and mask contains questionable
pixels in each image.
N = the number of stars
D = the number of pixels in each patch
H = upsampling factor
cx, cy = centroiding offsets
f = floor variance of the nosie
fl = floor of the PSF model
"""
#self.tr = tr
#self.ts = ts
self.N = data.shape[0] #number of observations
self.D = data.shape[1] #input dimensionality
self.H = H #upsampling factor
self.epsilon = epsilon #smoothness parameter
self.data = np.atleast_2d(data) #making sure the data has the right dimension
self.masks = np.atleast_2d(masks) #making sure the mask has the right dimension
self.dx = cx #list of centroid offsets x
self.dy = cy #list of centroid offsets y
self.M = int(self.D**.5)
self.f = f #floor variance of noise model
self.fl = fl #floor of the PSF model
self.g = g
""" outputs of the code:
H*H*D-dimensional mean vector: X
N-dimensional flux vector: F
N-dimensional background: B
"""
self.F = np.zeros((self.N)) #Creating an N-dimensional Flux vector.
self.B = np.zeros((self.N)) #one flat-field per star
self.lnX = np.ones((self.D*self.H*self.H)) #log(X)
self.g = g #gain of the noise model
""" initialization of X, F, B by means of subtracting the median!(to initialize the background B),
normalizing (to intialize the flux F),
shifting, and upsampling (to initialize the mean X)"""
self.initialize()
#""" recording parameters after each iteration """
self.write_pars_to_file(0)
""" updating F, B, centroids, X"""
self.update(max_iter, check_iter, min_iter, tol)
def initialize(self):
"""
initializing the parameters
"""
self.masks[self.masks == 0] = -1
self.masks[self.masks > 0] = False
self.masks[self.masks < 0] = True
self.masks = self.masks == True
#print self.masks[0]
m = int((self.D)**.5)
self.d2d = self.data.reshape(self.N , m , m)
self.dm = np.zeros((self.N, self.D))
X = np.zeros_like(self.lnX)
for i in range(self.N):
self.B[i] = np.array([self.d2d[i,m/2-4:m/2+5,-1:].mean(),
self.d2d[i,m/2-4:m/2+5,:1].mean(),
self.d2d[i,:1,m/2-4:m/2+5].mean(),
self.d2d[i,-1:,m/2-4:m/2+5].mean()]).mean()
self.dm[i] = self.data[i]-self.B[i]
self.dm -= self.dm.min()
self.F[i] = np.sum(self.dm[i])
self.dm[i] /= self.F[i]
shifted = shifter.shifter(self.dm[i], self.dx[i], self.dy[i])
obs = ndimage.interpolation.zoom(shifted.reshape(25,25), self.H,
output = None, order=3, mode='constant',
cval=0.0, prefilter=True).flatten()
X += obs.flatten()
X /= self.N
X[X<0] = self.fl
X[X==0] = self.fl
self.lnX = np.log(X)
"""
XX = np.exp(self.lnX).reshape(self.H*self.M ,self.H*self.M) + self.fl
plt.imshow(XX , interpolation = None , norm = LogNorm())
plt.title(r"$X_{\mathtt{initial}}$")
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.show()
gradx = self.func_grad_lnX_Nthreads(self.lnX)[1].reshape(75,75)
plt.imshow(np.abs(gradx) , interpolation = None , norm = LogNorm())
plt.title(r"$|d\mathtt{NLL}/d(\mathtt{lnX})|_{\mathtt{initial}}$")
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.show()
"""
def write_pars_to_file(self, step):
f = h5py.File("trial_iter_%d.h5"%(step), 'w')
grp = f.create_group('model_pars') # create group
columns = ['F', 'B', 'lnX']
n_cols = len(columns) # number of columns
col_fmt = [] # column format
for column in columns:
column_attr = getattr(self, column)
# write out file
grp.create_dataset(column, data=column_attr)
# save metadata
metadata = [ 'fl', 'f', 'g']
for metadatum in metadata:
grp.attrs[metadatum] = getattr(self, metadatum)
f.close()
def lsq_update_FB(self):
"""least square optimization of F&B"""
for p in range(self.N):
#fit_single_patch(data, mask, psf, old_flux, old_back, floor, gain)
Kp = np.array(K[str(p)])
mask = self.masks[p]
Y = self.data[p]
old_flux, old_back = self.F[p], self.B[p]
self.X = np.exp(self.lnX)
psf = np.dot(self.X + self.fl, Kp)
for i in range(10):
old_back, old_flux = fit_single_patch(Y, mask, psf, old_flux, old_back, self.f, self.g)
self.B[p], self.F[p] = old_back, old_flux
"""
modelp = np.dot(self.X + fl, K[p])
A = np.vstack([np.ones(self.D), modelp]).T
C = f + g*np.abs(modelp)
Y = self.data[p]
mask = self.masks[p]
A = A[mask]
C = C[mask]
Y = Y[mask]
AT = A.T
# this is the idea : C[self.masks[p]] = np.inf
ATICA = np.dot(AT, A/C[:,None])
factor = cho_factor(ATICA, overwrite_a = True)
x = cho_solve(factor, np.dot(AT, Y/C))
self.B[p], self.F[p] = x[0], x[1]
"""
def bfgs_update_FB(self):
MS = h5py.File('masked_samplerx3.hdf5','r')
MD = h5py.File('masked_data.hdf5','r')
masked_samplers = MS["masked_samplerx3"]#[tr]
masked_data = MD["masked_data"]#[tr]
for p in range(self.N):
#print masked_samplers
Kp = masked_samplers[str(p)]
Y = masked_data[str(p)]
theta = self.B[p], self.F[p]
psf = np.dot(np.exp(self.lnX) + self.fl, Kp)
grad_func = v3_fit_single_patch
#print grad_func
x = op.fmin_l_bfgs_b(grad_func, x0=theta, fprime = None, \
args=(Y, psf, self.f, self.g), approx_grad = False, \
bounds = [(0.,100.), (1.,10.**7.)], m=10, factr=1000., pgtol=1e-08, epsilon=1e-08, maxfun=60)
#print p, x
self.B[p], self.F[p] = x[0]
MS.close()
MD.close()
def update_centroids(self):
MD = h5py.File('masked_data.hdf5','r')
masked_data = MD["masked_data"]
#updating the sampling matrices: we donot overwrite the original ones because
#the new ones depende on the variance model, and the variance model is not
#perfect at the moment!
GG = h5py.File('masked_samplerx3.hdf5','w')
Grp = GG.create_group("masked_samplerx3")
for p in range(self.N):
xp, yp = fit((self.dx[p], self.dy[p]), \
masked_data[str(p)], self.masks[p], \
self.X, self.F[p], self.B[p], \
self.f, self.g, self.fl)
masked_dset = sampler.imatrix_new(self.M, self.H, xp , yp)[: , self.masks[p]]
Grp.create_dataset(str(p), data = masked_dset)
GG.close()
MD.close()
def func_grad_lnX_Nthreads(self, params):
"""
Use multiprocessing to calculate negative-log-likelihood and gradinets
w.r.t lnX, plus the terms coming from th regularization terms.
"""
n_samples = self.N
self.lnX = params
#self.fl, self.f, self.g, self.H, Nthreads = args
Pool = InterruptiblePool(Nthreads)
mapfn = Pool.map
Nchunk = np.ceil(1. / Nthreads * n_samples).astype(np.int)
arglist = [None] * Nthreads
for i in range(Nthreads):
s = int(i * Nchunk)
e = int(s + Nchunk)
arglist[i] = (self.lnX, self.F, self.B, self.fl, self.f, self.g, self.H, s, e)
result = list(mapfn(fg, [ars for ars in arglist]))
nll, grad = result[0]
a = time.time()
for i in range(1, Nthreads):
nll += result[i][0]
grad += result[i][1]
#print "adding up nll's from individual threads", time.time() - a
Pool.close()
Pool.terminate()
Pool.join()
#computing the regularization term and its derivative w.r.t lnX
reg_func, reg_grad = self.reg_func_grad_lnX()
return nll + reg_func, grad + reg_grad
def reg_func_grad_lnX(self):
""" returns regularization term in NLL
and its derivative w.r.t lnX"""
self.X = np.exp(self.lnX)
b = int((self.D)**.5)
Z = self.X.reshape((self.H*b, self.H*b))
c= np.zeros_like(Z)
c[:,:-1] += Z[:, 1:]
c[:, 1:] += Z[:,:-1]
c[1:, :] += Z[:-1,:]
c[:-1,:] += Z[1:, :]
grad = 2.*self.epsilon*(4.*Z - c).flatten()*self.X
#grad = grad*self.X
func = self.epsilon*np.sum((Z[:,1:]-Z[:,:-1])**2.)+ self.epsilon*np.sum((Z[1:,:]-Z[:-1,:])**2.)
return func , grad
"""
def func_lnX_grad_lnX(self, params , *args):
returns Gradient w.r.t Log(X) & NLL,
replaced by func_lnX_grad_lnX_Nthreads,
keeping this for sanity check for now
self.data, self.F, self.B, K = args
self.lnX = params
n_samples = self.data.shape[0]
self.X = np.exp(self.lnX)
b = int((self.D)**.5)
Z = self.X.reshape((self.H*b, self.H*b))
c=np.zeros_like(Z)
c[:,:-1] += Z[:, 1:]
c[:, 1:] += Z[:,:-1]
c[1:, :] += Z[:-1,:]
c[:-1,:] += Z[1:, :]
grad = 2.*self.epsilon*(4.*Z - c).flatten()*self.X
#grad = grad*self.X
func = self.epsilon*np.sum((Z[:,1:]-Z[:,:-1])**2.)+ self.epsilon*np.sum((Z[1:,:]-Z[:-1,:])**2.)
for p in range(self.N):
Kp = np.array(K[str(p)])
Y = self.data[p]
modelp = self.F[p]*np.dot(self.X+self.fl, Kp) + self.B[p]
mask = self.masks[p]
Y = Y[mask]
modelp = modelp[mask]
ep = Y - modelp
varp = self.f + self.g*np.abs(modelp)
gradp = -1.*self.F[p]*Kp
gradp = gradp[:,mask]
gainp = (self.g/2.)*(varp**-1. - ep**2./varp**2.)
gradp = self.X[:,None]*gradp*(ep/varp - gainp)[None,:]
Gradp = gradp.sum(axis = 1)
grad += Gradp
func += 0.5*np.sum(((ep)**2.)/varp) + .5*np.sum(np.log(varp))
return func, grad
"""
"""
def grad_lnX(self, params , *args):
self.F, self.B = args
self.lnX = params
return self.func_lnX_grad_lnx[1]
def func_lnX(self, params , *args):
self.F, self.B = args
self.lnX = params
return self.func_lnX_grad_lnx[0]
def grad_F(self, params, *args):
self.lnX, self.B = args
self.F = params
self.X = np.exp(self.lnX)
grad = np.zeros_like(self.F)
for p in range(self.N):
Kp = K[p]
y = self.data[p]
mask = self.masks[p]
nmodelp = np.dot(self.X+fl,Kp)
modelp = self.F[p]*nmodelp + self.B[p]
y = y[mask]
nmodelp = nmodelp[mask]
modelp = modelp[mask]
residualp = y - modelp
#residualp[self.mask[p]!=0] = 0 #excluding flagged pixels from contributing to gradient_X
varp = f + g*np.abs(modelp)
gradp = -1.*nmodelp
gainp = (g/2.)*nmodelp*(varp**-1. - residualp**2./varp**2.)
#gainp[modelp<0] *= -1. #var=f+g|model| to account for numerical artifacts when sr model is sampled at the data grid
grad[p] = np.sum(residualp*gradp/varp) + np.sum(gainp)
return grad
def grad_B(self, params, *args):
self.lnX, self.F = args
self.B = params
self.X = np.exp(self.lnX)
grad = np.zeros_like(self.B)
for p in range(self.N):
y = self.data[p]
Kp = K[p]
modelp = self.F[p]*np.dot(self.X+fl,Kp) + self.B[p]
mask = self.masks[p]
y = y[mask]
modelp = modelp[mask]
varp = f+g*np.abs(modelp)
residualp = y - modelp
#residualp[self.mask[p]!=0] = 0 #excluding flagged pixels from contributing to gradient_X
gainp = - (g/2.)*(residualp**2./varp**2.) + (g/2.)*(varp**-1.)
#gainp[modelp<0] *= -1. #var=f+g|model| to account for numerical artifacts when sr model is sampled at the data grid
grad[p] = -1.*np.sum(residualp/varp) + np.sum(gainp)
return grad
def func_F(self , params, *args):
self.lnX, self.B = args
self.F = params
return self.nll()
def func_B(self, params, *args):
self.lnX, self.F = args
self.B = params
return self.nll()
"""
def bfgs_lnX(self, num_funccalls):
x = op.fmin_l_bfgs_b(self.func_grad_lnX_Nthreads, x0=self.lnX, fprime = None, \
args=(), approx_grad = False, \
bounds = [(np.log(1e-5), 0.) for _ in self.lnX], m=10, factr=10.0, pgtol=1e-5, epsilon=1e-8, maxfun=num_funccalls)
gx = x[2]["grad"]
print x
print gx
#X = np.exp(self.lnX).reshape(self.H*self.M ,self.H*self.M) + self.fl
#plt.imshow(np.abs(gx).reshape(100,100), interpolation = None, norm = LogNorm())
#plt.colorbar()
#plt.show()
self.lnX = x[0]
def bfgs_F(self):
x = op.fmin_l_bfgs_b(self.func_F,x0=self.F, fprime = self.grad_F,args=(self.lnX, self.B), approx_grad = False, \
bounds = None, m=10, factr=1000., pgtol=1e-02, epsilon=1e-02, maxfun=20)
#print x
self.F = x[0]
def bfgs_B(self):
x = op.fmin_l_bfgs_b(self.func_B,x0=self.B, fprime = self.grad_B,args=(self.lnX, self.F), approx_grad = False, \
bounds = None, m=10, factr=1000., pgtol=1e-02, epsilon=1e-02, maxfun=20)
#print x
self.B = x[0]
def nll(self):
self.X = np.exp(self.lnX)
b = int((self.D)**.5)
Z = self.X.reshape((self.H*b, self.H*b))
nll = self.epsilon*np.sum((Z[:,1:]-Z[:,:-1])**2.) + self.epsilon*np.sum((Z[1:,:]-Z[:-1,:])**2.)
for i in range(self.N):
Ki = np.array(K[str(i)])
Y = self.data[i]
model_i = self.F[i]*np.dot(self.X+self.fl, Ki) + self.B[i]
mask = self.masks[i]
Y = Y[mask]
model_i = model_i[mask]
var_i = self.f + self.g*np.abs(model_i)
residual_i = Y - model_i
nll += 0.5*np.sum(((residual_i)**2.)/var_i) + 0.5*np.sum(np.log(var_i))
return nll
def update(self, max_iter, check_iter, min_iter, tol):
nll = self.nll()
print "starting NLL is:", nll
np.savetxt("superb_wfc_mean_iter_%d.txt"%(0) , self.lnX ,fmt='%.64f')
for i in range(1, max_iter+1):
a = time.time()
self.bfgs_update_FB()
print time.time() - a
a = time.time()
#self.update_centroids()
print time.time() - a
a = time.time()
self.bfgs_lnX(200)
print time.time() - a
np.savetxt("superb_wfc_mean_iter_%d_nfljadid.txt"%(i) , self.lnX ,fmt='%.64f')
np.savetxt("superb_wfc_flux_iter_%d_nfjadid.txt"%(i) , self.F ,fmt='%.64f')
np.savetxt("superb_wfc_bkg_iter_%d_nfljadid.txt"%(i) , self.B ,fmt='%.64f')
"""
if (i==max_iter):
X = (np.exp(self.lnX)+self.fl).reshape(self.H*self.M ,self.H*self.M)
plt.imshow(X , interpolation = None , norm = LogNorm())
plt.title(r"$X_{\mathtt{final}}$")
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.show()"""
#np.savetxt("superb_wfc_mean_iter_%d.txt"%(i+1) , self.lnX ,fmt='%.64f')
"""
gradx = self.func_grad_lnX_Nthreads(self.lnX)[1].reshape(75,75)
plt.imshow(np.abs(gradx) , interpolation = None , norm = LogNorm())
plt.title(r"$|d\mathtt{NLL}/d(\mathtt{lnX})|_{\mathtt{final}}$")
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.show()"""
"""
a = time.time()
W = self.func_lnX_grad_lnX(self.lnX, self.data, self.F, self.B, K )
print time.time() - a
print W
a = time.time()
Q = self.func_lnX_grad_lnX_Nthreads(self.lnX)
#self.fl, self.f, self.g, self.H, Nthreads = args
print time.time() - a
print Q
print Q[0] - W[0], np.sum((Q[1] - W[1])**2.)s
"""
if np.mod(i, check_iter) == 0:
new_nll = new_nll = self.nll()
print 'NLL at step %d is:' % (i+1), new_nll
if (((nll - new_nll) / nll) < tol) & (min_iter < i):
print 'Stopping at step %d with NLL:' % i, new_nll
self.nll = new_nll
break
else:
nll = new_nll
self.nll = new_nll
F.close()
|
|
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handles sequence of messages that are used to send OMCI to the ONU
"""
import structlog
from scapy.automaton import ATMT
from voltha.adapters.microsemi_olt.BaseOltAutomaton import BaseOltAutomaton
from voltha.adapters.microsemi_olt.PAS5211 import PAS5211MsgSendFrame, PAS5211MsgGetOltVersionResponse, PAS5211MsgSendFrameResponse, \
PAS5211EventFrameReceived, PAS5211MsgHeader, PAS5211SetVlanGenConfigResponse
from voltha.extensions.omci.omci_frame import OmciFrame
from voltha.adapters.microsemi_olt.PAS5211 import PAS5211GetOnuAllocs, PAS5211GetOnuAllocsResponse, PAS5211GetSnInfo, \
PAS5211GetSnInfoResponse, PAS5211GetOnusRange, PAS5211GetOnusRangeResponse, PAS5211MsgSetOnuOmciPortId, \
PAS5211MsgSetOnuOmciPortIdResponse, PAS5211MsgSetOnuAllocId, PAS5211MsgSetOnuAllocIdResponse, \
PAS5211SetSVlanAtConfig, PAS5211SetSVlanAtConfigResponse, PAS5211SetVlanDownConfig, \
PAS5211SetVlanDownConfigResponse, PAS5211SetDownVlanHandl, PAS5211SetDownVlanHandlResponse, \
PAS5211SetUplinkVlanHandl, PAS5211SetDownstreamPolicingConfigResponse, PAS5211SetDownstreamPolicingConfig, \
PAS5211SetPortIdPolicingConfig, PAS5211UnsetPortIdPolicingConfig, \
PAS5211MsgSendDbaAlgorithmMsg, PAS5211MsgSendDbaAlgorithmMsgResponse, \
PAS5211SetUpstreamPolicingConfigResponse, PAS5211SetUpstreamPolicingConfig, \
PAS5211MsgSetPortIdConfig, PAS5211MsgSetPortIdConfigResponse, \
PAS5211MsgGetOnuIdByPortId, PAS5211MsgGetOnuIdByPortIdResponse, \
PAS5211SetVlanUplinkConfiguration, PAS5211SetVlanUplinkConfigurationResponse, PAS5211SetUplinkVlanHandlResponse, PAS5211SetVlanGenConfig, PAS5211SetVlanGenConfigResponse, \
PAS5211GetPortIdDownstreamPolicingConfig, PAS5211GetPortIdDownstreamPolicingConfigResponse, PAS5211RemoveDownstreamPolicingConfig, \
PAS5211MsgHeader, PAS5211UnsetPortIdPolicingConfigResponse, PAS5211RemoveDownstreamPolicingConfigResponse, \
PAS5211SetPortIdPolicingConfigResponse, PAS5211EventAlarmNotification
from voltha.adapters.microsemi_olt.PAS5211_constants import OMCI_GEM_IWTP_IW_OPT_8021P_MAPPER, PON_FALSE, \
PON_1_TO_1_VLAN_MODE, PON_TRUE, PON_VLAN_UNUSED_TAG, PON_VLAN_UNUSED_PRIORITY, PON_VLAN_REPLACE_PRIORITY, \
PON_OUTPUT_VLAN_PRIO_HANDLE_INCOMING_VLAN, PON_VLAN_UNCHANGED_PRIORITY, PON_OUTPUT_VLAN_PRIO_HANDLE_DONT_CHANGE, \
PON_OUTPUT_VLAN_PRIO_HANDLE_DL_VLAN_TABLE, PON_DL_VLAN_SVLAN_REMOVE, PON_DL_VLAN_CVLAN_NO_CHANGE, \
PON_VLAN_DEST_DATAPATH, GEM_DIR_BIDIRECT, OMCI_MAC_BRIDGE_PCD_LANFCS_FORWARDED, \
OMCI_MAC_BRIDGE_PCD_ENCAP_METHOD_LLC, OMCI_8021P_MSP_UNMARKED_FRAME_TAG_FRAME, OMCI_8021P_MSP_TP_TYPE_NULL, \
OMCI_EX_VLAN_TAG_OCD_ASSOCIATION_TYPE_PPTP_ETH_UNI, OMCI_EX_VLAN_TAG_OCD_DS_MODE_US_INVERSE, PMC_UPSTREAM_PORT, \
PON_DISABLE, PON_VLAN_CHANGE_TAG, PON_VLAN_DONT_CHANGE_TAG, PON_PORT_TYPE_GEM, PON_PORT_DESTINATION_CNI0, PON_ENABLE, SLA_gr_bw_gros, PYTHAGORAS_UPDATE_AID_SLA, \
SLA_gr_bw_gros, SLA_be_bw_gros, SLA_gr_bw_fine, SLA_be_bw_fine, PYTHAGORAS_DBA_DATA_COS, PYTHAGORAS_DBA_STATUS_REPORT_NSR, \
PMC_OFAL_NO_POLICY, UPSTREAM, DOWNSTREAM
log = structlog.get_logger()
MAX_RETRIES = 10
TIMEOUT = 5
class OltRemoveFlowStateMachine(BaseOltAutomaton):
onu_id = None
channel_id = None
port_id = None
onu_session_id = None
alloc_id = None
policy_id = None
retries = 0
def parse_args(self, debug=0, store=0, **kwargs):
self.onu_id = kwargs.pop('onu_id')
self.channel_id = kwargs.pop('channel_id')
self.port_id = kwargs.pop('port_id')
self.onu_session_id = kwargs.pop('onu_session_id')
self.alloc_id = kwargs.pop('alloc_id')
BaseOltAutomaton.parse_args(self, debug=debug, store=store, **kwargs)
def master_filter(self, pkt):
if not super(OltRemoveFlowStateMachine, self).master_filter(pkt):
return False
if PAS5211MsgHeader in pkt:
if PAS5211EventAlarmNotification not in pkt:
if PAS5211MsgGetOltVersionResponse not in pkt:
if pkt[PAS5211MsgHeader].channel_id == self.channel_id:
if pkt[PAS5211MsgHeader].onu_id == self.onu_id:
if OmciFrame not in pkt:
if PAS5211MsgSendFrameResponse not in pkt:
return True
return False
"""
States
"""
# Uplink states...
@ATMT.state(initial=1)
def send_msg(self):
log.debug('olt-flow-state-machine-start')
@ATMT.state()
def wait_set_port_id_configuration_response(self):
pass
@ATMT.state()
def wait_get_onu_id_by_port_id_response(self):
pass
@ATMT.state()
def wait_unset_port_id_downlink_policing_response(self):
pass
@ATMT.state(error=1)
def error(self, msg):
log.error(msg)
raise self.end()
@ATMT.state(final=1)
def end(self):
log.debug('olt-flow-state-machine-end')
# pass
"""
Utils
"""
def px(self, pkt):
return self.p(pkt, channel_id=self.channel_id,
onu_id=self.onu_id,
onu_session_id=self.onu_session_id)
"""
Transitions
"""
@ATMT.condition(send_msg)
def remove_flow(self):
self.send_get_onu_id_by_port_id(self.device.device, self.port_id)
raise self.wait_get_onu_id_by_port_id_response()
def timeout_wait_get_onu_id_by_port_id_response(self):
#log.debug('api-proxy-timeout')
if self.retries < MAX_RETRIES:
self.retries += 1
self.send_get_onu_id_by_port_id(self.device.device, self.port_id)
else:
raise self.error("Timeout for message PAS5211MsgGetOnuIdByPortIdResponse")
@ATMT.receive_condition(wait_get_onu_id_by_port_id_response)
def wait_for_get_onu_id_by_port_id_response(self, pkt):
#log.debug('api-proxy-response')
if PAS5211MsgGetOnuIdByPortIdResponse in pkt:
log.debug('[RESPONSE] PAS5211MsgGetOnuIdByPortIdResponse')
self.send_unset_port_id_downlink_policing(self.device.device, 1, self.port_id)
raise self.wait_unset_port_id_downlink_policing_response()
else:
log.debug('Unexpected pkt {}'.format(pkt.summary()))
@ATMT.timeout(wait_unset_port_id_downlink_policing_response, TIMEOUT)
def timeout_wait_unset_port_id_downlink_policing_response(self):
#log.debug('api-proxy-timeout')
if self.retries < MAX_RETRIES:
self.retries += 1
self.send_unset_port_id_downlink_policing(self.device.device, 1, self.port_id)
else:
raise self.error("Timeout for message PAS5211UnsetPortIdPolicingConfigResponse")
@ATMT.receive_condition(wait_unset_port_id_downlink_policing_response)
def wait_for_unset_port_id_downlink_policing_response(self, pkt):
#log.debug('api-proxy-response')
if PAS5211UnsetPortIdPolicingConfigResponse in pkt:
log.debug('[RESPONSE] PAS5211UnsetPortIdPolicingConfigResponse')
self.send_set_port_id_configuration(self.device.device, PON_DISABLE, self.port_id, self.alloc_id)
raise self.wait_set_port_id_configuration_response()
else:
log.debug('Unexpected pkt {}'.format(pkt.summary()))
@ATMT.timeout(wait_set_port_id_configuration_response, TIMEOUT)
def timeout_wait_set_port_id_configuration_response(self):
#log.debug('api-proxy-timeout')
if self.retries < MAX_RETRIES:
self.retries += 1
self.send_set_port_id_configuration(self.device.device, PON_DISABLE, self.port_id, self.alloc_id)
else:
raise self.error("Timeout for message PAS5211MsgSetPortIdConfigResponse")
@ATMT.receive_condition(wait_set_port_id_configuration_response)
def wait_for_set_port_id_configuration_response(self, pkt):
#log.debug('api-proxy-response')
if PAS5211MsgSetPortIdConfigResponse in pkt:
log.debug('[RESPONSE] PAS5211MsgSetPortIdConfigResponse')
self.end()
else:
log.debug('Unexpected pkt {}'.format(pkt.summary()))
""" - - - - - - - create_double_vlan_flow_olt_config - - - - - - - """
def send_set_port_id_configuration(self, device, activate, port_id, alloc_id):
msg = PAS5211MsgSetPortIdConfig(
# port_id=1000 + device.proxy_address.onu_id,
port_id=port_id,
activate=activate,
alloc_id=alloc_id,
type=PON_PORT_TYPE_GEM,
destination=PON_PORT_DESTINATION_CNI0
)
self.send(self.px(msg))
log.debug("[SENT] PAS5211MsgSetPortIdConfig")
def send_get_onu_id_by_port_id(self, device, port_id):
msg = PAS5211MsgGetOnuIdByPortId(
# port_id=1000 + device.proxy_address.onu_id
port_id=port_id
)
self.send(self.px(msg))
log.debug("[SENT] PAS5211MsgGetOnuIdByPortId")
def send_unset_port_id_downlink_policing(self, device, dir, port_id):
msg = PAS5211UnsetPortIdPolicingConfig(direction=dir, port_id=port_id)
self.send(self.px(msg))
log.debug("[SENT] PAS5211UnsetPortIdPolicingConfig")
""" - - - - - - - END create_double_vlan_flow_olt_config - - - - - - - """
|
|
# -*- coding: UTF-8 -*-
# Copyright 2008-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""
Defines extended database field classes and utility functions
related to fields.
"""
import logging ; logger = logging.getLogger(__name__)
import datetime
from decimal import Decimal
from django import http
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ValidationError
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import NOT_PROVIDED
from django.utils.functional import cached_property
from lino.core.utils import resolve_field, full_model_name, resolve_model
from lino.core.exceptions import ChangedAPI
from lino.core.diff import ChangeWatcher
from lino.core import constants
from lino.utils import isiterable
from lino.utils import get_class_attr
from lino.utils import IncompleteDate
from lino.utils import quantities
from lino.utils import choosers
from lino.utils.quantities import Duration
def validate_incomplete_date(value):
"""Raise ValidationError if user enters e.g. a date 30.02.2009.
"""
try:
value.as_date()
except ValueError:
raise ValidationError(_("Invalid date"))
def set_default_verbose_name(f):
"""
If the verbose_name of a ForeignKey was not set by user code, Lino sets it
to the verbose_name of the model pointed to. This rule holds also for
virtual FK fields.
For every FK field defined on a model (including virtual FK fields) this is
called during kernel startup. Django sets the `verbose_name` of every
field to ``field.name.replace('_', ' ')``.
For virtual FK fields defined on an actor or an action it is called a bit
later. These fields don't have a name.
"""
if f.name is None:
if f.verbose_name is None:
f.verbose_name = f.remote_field.model._meta.verbose_name
elif f.verbose_name == f.name.replace('_', ' '):
f.verbose_name = f.remote_field.model._meta.verbose_name
class PasswordField(models.CharField):
"""Stored as plain text in database, but not displayed in user
interface.
"""
pass
class RichTextField(models.TextField):
# See :doc:`/dev/textfield`.
def __init__(self, *args, **kw):
self.textfield_format = kw.pop(
'format', kw.pop('textfield_format', None))
self.bleached = kw.pop('bleached', None)
super(RichTextField, self).__init__(*args, **kw)
def set_format(self, fmt):
self.textfield_format = fmt
class PercentageField(models.DecimalField):
"""
A field to express a percentage.
The database stores this like a DecimalField.
Plain HTML adds a "%".
"""
def __init__(self, *args, **kwargs):
defaults = dict(
max_length=5,
max_digits=5,
decimal_places=2,
)
defaults.update(kwargs)
super(PercentageField, self).__init__(*args, **defaults)
class TimeField(models.TimeField):
"""
Like a TimeField, but allowed values are between
:attr:`calendar_start_hour
<lino.core.site.Site.calendar_start_hour>` and
:attr:`calendar_end_hour <lino.core.site.Site.calendar_end_hour>`.
"""
pass
class DatePickerField(models.DateField):
"""
A DateField that uses a DatePicker instead of a normal DateWidget.
Doesn't yet work.
"""
pass
class MonthField(models.DateField):
"""
A DateField that uses a MonthPicker instead of a normal DateWidget
"""
def __init__(self, *args, **kw):
models.DateField.__init__(self, *args, **kw)
# def PriceField(*args, **kwargs):
# defaults = dict(
# max_length=10,
# max_digits=10,
# decimal_places=2,
# )
# defaults.update(kwargs)
# return models.DecimalField(*args, **defaults)
class PriceField(models.DecimalField):
"""
A thin wrapper around Django's `DecimalField
<https://docs.djangoproject.com/en/3.1/ref/models/fields/#decimalfield>`_
which adds default values for `decimal_places`, `max_length` and
`max_digits`.
"""
def __init__(self, verbose_name=None, max_digits=10, **kwargs):
defaults = dict(
max_length=max_digits,
max_digits=max_digits,
decimal_places=2,
)
defaults.update(kwargs)
super(PriceField, self).__init__(verbose_name, **defaults)
#~ class MyDateField(models.DateField):
#~ def formfield(self, **kwargs):
#~ fld = super(MyDateField, self).formfield(**kwargs)
# ~ # display size is smaller than full size:
#~ fld.widget.attrs['size'] = "8"
#~ return fld
"""
http://stackoverflow.com/questions/454436/unique-fields-that-allow-nulls-in-django
answer Dec 20 '09 at 3:40 by mightyhal
http://stackoverflow.com/a/1934764
"""
# class NullCharField(models.CharField): # subclass the CharField
# description = "CharField that stores empty strings as NULL instead of ''."
# def __init__(self, *args, **kwargs):
# defaults = dict(blank=True, null=True)
# defaults.update(kwargs)
# super(NullCharField, self).__init__(*args, **defaults)
# # this is the value right out of the db, or an instance
# def to_python(self, value):
# # ~ if isinstance(value, models.CharField): #if an instance, just return the instance
# if isinstance(value, six.string_types): # if a string, just return the value
# return value
# if value is None: # if the db has a NULL (==None in Python)
# return '' # convert it into the Django-friendly '' string
# else:
# return value # otherwise, return just the value
# def get_db_prep_value(self, value, connection, prepared=False):
# # catches value right before sending to db
# # if Django tries to save '' string, send the db None (NULL)
# if value == '':
# return None
# else:
# return value # otherwise, just pass the value
class FakeField(object):
"""
Base class for :class:`RemoteField` and :class:`DisplayField`.
"""
model = None
db_column = None
choices = []
primary_key = False
editable = False
name = None
null = True
serialize = False
verbose_name = None
help_text = None
preferred_width = 30
preferred_height = 3
max_digits = None
decimal_places = None
default = NOT_PROVIDED
generate_reverse_relation = False # needed when AFTER17
remote_field = None
blank = True # 20200425
wildcard_data_elem = False
"""Whether to consider this field as wildcard data element.
"""
sortable_by = None
"""
A list of names of real fields to be used for sorting when this
fake field is selected. For remote fields this is set
automatically, on virtual fields you can set it yourself.
"""
# required by Django 1.8+:
is_relation = False
concrete = False
auto_created = False
column = None
empty_values = set([None, ''])
# required by Django 1.10+:
one_to_many = False
one_to_one = False
# required since 20171003
rel = None
def __init__(self, **kwargs):
for k, v in kwargs.items():
if not hasattr(self, k):
raise Exception("{} has no attribute {}".format(self, k))
setattr(self, k, v)
def is_enabled(self, lh):
"""
Overridden by mti.EnableChild
"""
return self.editable
def clean(self, raw_value, obj):
# needed for Django 1.8
return raw_value
def has_default(self):
return self.default is not NOT_PROVIDED
def get_default(self):
return self.default
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname = name
self.column = None
self.concrete = False
# if self.verbose_name is None and self.name:
# self.verbose_name = self.name.replace('_', ' ')
class RemoteField(FakeField):
"""
A field on a related object.
Remote fields are created by
:meth:`lino.core.model.Model.get_data_elem` when needed.
.. attribute:: field
The bottom-level (leaf) field object.
"""
#~ primary_key = False
#~ editable = False
def __init__(self, getter, name, fld, setter=None, **kwargs):
self.func = getter
self.name = name
self.attname = name
# self.db_column = name # 20200423
self.field = fld
# for k in ('verbose_name', 'help_text', 'blank', 'default', 'null'):
# kwargs.setdefault(k, getattr(fld, k))
self.verbose_name = fld.verbose_name
self.help_text = fld.help_text
# self.blank = fld.blank
self.blank = True
self.default = None
# self.null = fld.null
# self.null = getattr(fld, 'null', None)
self.max_length = getattr(fld, 'max_length', None)
self.max_digits = getattr(fld, 'max_digits', None)
self.decimal_places = getattr(fld, 'decimal_places', None)
self.sortable_by = [ name ]
self.setter = setter
if setter is not None:
self.editable = True
self.choices = getattr(fld, 'choices', None)
super(RemoteField, self).__init__(**kwargs)
#~ print 20120424, self.name
#~ settings.SITE.register_virtual_field(self)
# The remote_field of a FK field has nothing to do with our RemoteField,
# it is set by Django on each FK field and points to
if isinstance(fld, VirtualField) and isinstance(fld.return_type, models.ForeignKey):
fld.lino_resolve_type() # 20200425
fk = fld.return_type
elif isinstance(fld, models.ForeignKey):
fk = fld
else:
fk = None
if fk is not None:
# if not fk.remote_field:
# raise Exception("20200425 {} has no remote_field".format(fk))
self.remote_field = fk.remote_field
from lino.core import store
store.get_atomizer(self.remote_field, self, name)
def value_from_object(self, obj, ar=None):
"""
Return the value of this field in the specified model instance
`obj`. `ar` may be `None`, it's forwarded to the getter
method who may decide to return values depending on it.
"""
m = self.func
return m(obj, ar)
def __get__(self, instance, owner):
if instance is None:
return self
return self.value_from_object(instance)
class DisplayField(FakeField):
"""
A field to be rendered like a normal read-only form field, but with
plain HTML instead of an ``<input>`` tag.
This is to be used as
the `return_type` of a :class:`VirtualField`.
The value to be represented is either some unicode text, a
translatable text or a :mod:`HTML element <etgen.html>`.
"""
choices = None
blank = True # 20200425
drop_zone = None
max_length = None
def __init__(self, verbose_name=None, **kwargs):
self.verbose_name = verbose_name
super(DisplayField, self).__init__(**kwargs)
# the following dummy methods are never called but needed when
# using a DisplayField as return_type of a VirtualField
def to_python(self, *args, **kw):
return None
# raise NotImplementedError(
# "{}.to_python({},{})".format(self.name, args, kw))
def save_form_data(self, *args, **kw):
raise NotImplementedError
def value_to_string(self, *args, **kw):
raise NotImplementedError
def value_from_object(self, obj, ar=None):
return self.default
class HtmlBox(DisplayField):
"""
Like :class:`DisplayField`, but to be rendered as a panel rather
than as a form field.
"""
pass
# class VirtualGetter(object):
# """A wrapper object for getting the content of a virtual field
# programmatically.
# """
# def __init__(self, vf, instance):
# self.vf = vf
# self.instance = instance
# def __call__(self, ar=None):
# return self.vf.value_from_object(self.instance, ar)
# # def __get__(self, instance, owner):
# # return self.vf.value_from_object(instance, None)
# def __getattr__(self, name):
# obj = self.vf.value_from_object(self.instance, None)
# return getattr(obj, name)
# def __repr__(self):
# return "<{0}>.{1}".format(repr(self.instance), self.vf.name)
class VirtualModel:
def __init__(self, model):
self.wrapped_model = model
self._meta = model._meta
VFIELD_ATTRIBS = frozenset('''to_python choices save_form_data
value_to_string max_length remote_field
max_digits verbose_name decimal_places wildcard_data_elem
blank'''.split())
class VirtualField(FakeField):
"""
Represents a virtual field. Values of virtual fields are not stored
in the database, but computed on the fly each time they get
read. Django doesn't see them.
A virtual field must have a `return_type`, which can be either a
Django field type (CharField, TextField, IntegerField,
BooleanField, ...) or one of Lino's custom fields
:class:`DisplayField`, :class:`HtmlBox` or :class:`RequestField`.
The `get` must be a callable which takes two arguments: `obj` the
database object and `ar` an action request.
The :attr:`model` of a VirtualField is the class where the field
was *defined*. This can be an abstract model. The VirtualField
instance does not have a list of the concrete models which use it
(because they inherit from that class).
"""
def __init__(self, return_type, get, **kwargs):
"""
Normal VirtualFields are read-only and not editable.
We don't want to require application developers to explicitly
specify `editable=False` in their return_type::
@dd.virtualfield(dd.PriceField(_("Total")))
def total(self, ar=None):
return self.total_excl + self.total_vat
"""
self.return_type = return_type # a Django Field instance
self.get = get
# if isinstance(return_type, FakeField):
# sortable_by = return_type.sortable_by
# self.sortable_by = sortable_by
# if sortable_by and isinstance(sortable_by, list):
# sortable_by = sortable_by[0]
# self.column = sortable_by
# for k in VFIELD_ATTRIBS:
# setattr(self, k, getattr(return_type, k, None))
settings.SITE.register_virtual_field(self)
super(VirtualField, self).__init__(**kwargs)
def lino_resolve_type(self):
"""
Called on every virtual field when all models are loaded.
"""
f = self.return_type
if isinstance(f, str):
try:
f = self.return_type = resolve_field(f)
except Exception as e:
raise Exception(
"Invalid return type spec {} for {} : {}".format(f, self, e))
if isinstance(f, FakeField):
sortable_by = f.sortable_by
self.sortable_by = sortable_by
if sortable_by and isinstance(sortable_by, list):
sortable_by = sortable_by[0]
self.column = sortable_by
if isinstance(f, models.ForeignKey):
f.remote_field.model = resolve_model(f.remote_field.model)
set_default_verbose_name(f)
self.get_lookup = f.remote_field.get_lookup # 20200425
self.get_path_info = f.remote_field.get_path_info # 20200425
self.remote_field = f.remote_field
for k in VFIELD_ATTRIBS:
setattr(self, k, getattr(f, k, None))
# copy help_text if it hasn't been set by help_texts_extractor
if f.help_text and not self.help_text:
self.help_text = f.help_text
# if self.name == 'detail_pointer':
# logger.info('20170905 resolve_type 1 %s on %s',
# self.name, self.verbose_name)
#~ removed 20120919 self.return_type.editable = self.editable
# if self.name == 'detail_pointer':
# logger.info('20170905 resolve_type done %s %s',
# self.name, self.verbose_name)
from lino.core import store
store.get_atomizer(self.model, self, self.name)
# print("20181023 Done: lino_resolve_type() for {}".format(self))
def override_getter(self, get):
self.get = get
def attach_to_model(self, model, name):
self.model = model
self.name = name
self.attname = name
if hasattr(self.return_type, 'model'):
# logger.info("20200425 return_type for virtual field %s has a model", self)
return
self.return_type.model = VirtualModel(model)
self.return_type.column = None
# if name == "overview":
# print("20181022", self, self.verbose_name)
#~ self.return_type.name = name
#~ self.return_type.attname = name
#~ if issubclass(model,models.Model):
#~ self.lino_resolve_type(model,name)
# must now be done by caller code:
# if AFTER17:
# model._meta.add_field(self, virtual=True)
# else:
# model._meta.add_virtual_field(self)
# if self.get is None:
# return
# if self.get.func_code.co_argcount != 2:
# if self.get.func_code.co_argcount == 2:
# getter = self.get
# def w(fld, obj, ar=None):
# return getter(obj, ar)
# self.get = w
# logger.warning("DeprecationWarning")
# else:
# msg = "Invalid getter for VirtualField {}".format(self)
# raise ChangedAPI(msg)
#~ logger.info('20120831 VirtualField %s.%s',full_model_name(model),name)
def __repr__(self):
if self.model is None:
return "{} {} ({})".format(
self.__class__.__name__, self.name, self.verbose_name)
# return super(VirtualField, self).__repr__()
return "%s.%s.%s" % (self.model.__module__,
self.model.__name__, self.name)
def get_default(self):
return self.return_type.get_default()
#~
def has_default(self):
return self.return_type.has_default()
def unused_contribute_to_class(self, cls, name):
# if defined in abstract base class, called once on each submodel
if self.name:
if self.name != name:
raise Exception("Attempt to re-use %s as %s in %s" % (
self.__class__.__name__, name, cls))
else:
self.name = name
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
self.model = cls
cls._meta.add_virtual_field(self)
#~ cls._meta.add_field(self)
def to_python(self, *args, **kwargs):
return self.return_type.to_python(*args, **kwargs)
#~ def save_form_data(self,*args,**kw): return self.return_type.save_form_data(*args,**kw)
#~ def value_to_string(self,*args,**kw): return self.return_type.value_to_string(*args,**kw)
#~ def get_choices(self): return self.return_type.choices
#~ choices = property(get_choices)
def set_value_in_object(self, ar, obj, value):
"""
Stores the specified `value` in the specified model instance
`obj`. `request` may be `None`.
Note that any implementation must return `obj`, and
callers must be ready to get another instance. This special
behaviour is needed to implement
:class:`lino.utils.mti.EnableChild`.
"""
pass
# if value is not None:
# raise NotImplementedError("Cannot write %s to field %s" %
# (value, self))
def value_from_object(self, obj, ar=None):
"""
Return the value of this field in the specified model instance
`obj`. `ar` may be `None`, it's forwarded to the getter
method who may decide to return values depending on it.
"""
m = self.get
#~ print self.field.name
# return m(self, obj, ar)
return m(obj, ar)
# try:
# return m(obj, ar)
# except TypeError as e:
# return "{} : {}".format(self, e)
def __get__(self, instance, owner):
if instance is None:
return self
return self.value_from_object(instance, None)
# return VirtualGetter(self, instance)
def __set__(self, instance, value):
return self.set_value_in_object(None, instance, value)
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be
used by Django.
"""
return sql, params
def virtualfield(return_type, **kwargs):
"""
Decorator to turn a method into a :class:`VirtualField`.
"""
def decorator(fn):
if isinstance(return_type, DummyField):
return DummyField(fn)
return VirtualField(return_type, fn, **kwargs)
return decorator
class Constant(object):
"""
Deserves more documentation.
"""
def __init__(self, text_fn):
self.text_fn = text_fn
def constant():
"""
Decorator to turn a function into a :class:`Constant`. The
function must accept one positional argument `datasource`.
"""
def decorator(fn):
return Constant(fn)
return decorator
class RequestField(VirtualField):
"""
A :class:`VirtualField` whose values are table action requests to
be rendered as a clickable integer containing the number of rows.
Clicking on it will open a window with the table.
"""
def __init__(self, get, *args, **kw):
kw.setdefault('max_length', 8)
VirtualField.__init__(self, DisplayField(*args, **kw), get)
def displayfield(*args, **kw):
"""
Decorator to turn a method into a :class:`VirtualField` of type
:class:`DisplayField`.
"""
return virtualfield(DisplayField(*args, **kw))
def htmlbox(*args, **kwargs):
"""
Decorator shortcut to turn a method into a a :class:`VirtualField`
of type :class:`HtmlBox`.
"""
return virtualfield(HtmlBox(*args, **kwargs))
def requestfield(*args, **kw):
"""
Decorator shortcut to turn a method into a a :class:`VirtualField`
of type :class:`RequestField`.
"""
def decorator(fn):
#~ def wrapped(*args):
#~ return fn(*args)
#~ return RequestField(wrapped,*args,**kw)
return RequestField(fn, *args, **kw)
return decorator
class CharField(models.CharField):
"""
An extension around Django's `models.CharField`.
Adds two keywords `mask_re` and `strip_chars_re` which, when using
the ExtJS front end, will be rendered as the `maskRe` and `stripCharsRe`
config options of `TextField` as described in the `ExtJS
documentation
<http://docs.sencha.com/extjs/3.4.0/#!/api/Ext.form.TextField>`__,
converting naming conventions as follows:
=============== ============ ==========================
regex regex A JavaScript RegExp object to be tested against the field value during validation (defaults to null). If the test fails, the field will be marked invalid using regexText.
mask_re maskRe An input mask regular expression that will be used to filter keystrokes that do not match (defaults to null). The maskRe will not operate on any paste events.
strip_chars_re stripCharsRe A JavaScript RegExp object used to strip unwanted content from the value before validation (defaults to null).
=============== ============ ==========================
Example usage:
belgian_phone_no = dd.CharField(max_length=15,strip_chars_re='')
"""
def __init__(self, *args, **kw):
self.strip_chars_re = kw.pop('strip_chars_re', None)
self.mask_re = kw.pop('mask_re', None)
self.regex = kw.pop('regex', None)
super(CharField, self).__init__(*args, **kw)
class QuantityField(models.CharField):
"""
A field that accepts :class:`Quantity
<lino.utils.quantities.Quantity>`, :class:`Percentage
<lino.utils.quantities.Percentage>` and :class:`Duration
<lino.utils.quantities.Duration>` values.
Implemented as a CharField (sorting or filter ranges may not work
as expected).
When you set `blank=True`, then you should also declare `null=True`.
"""
description = _("Quantity (Decimal or Duration)")
def __init__(self, *args, **kw):
kw.setdefault('max_length', 6)
super(QuantityField, self).__init__(*args, **kw)
if self.blank and not self.null:
raise ChangedAPI(
"When `blank` is True, `null` must be True as well.")
#~ def get_internal_type(self):
#~ return "CharField"
def to_python(self, value):
"""
Excerpt from `Django docs
<https://docs.djangoproject.com/en/3.1/howto/custom-model-fields/#converting-values-to-python-objects>`__:
As a general rule, :meth:`to_python` should deal gracefully with
any of the following arguments:
- An instance of the correct type (e.g., `Hand` in our ongoing example).
- A string (e.g., from a deserializer).
- `None` (if the field allows `null=True`)
I'd add "Any value allowed for this field when instantiating a model."
"""
if isinstance(value, Decimal):
return quantities.Quantity(value)
if value:
# try:
if isinstance(value, str):
return quantities.parse(value)
return quantities.Quantity(value)
# except Exception as e:
# raise ValidationError(
# "Invalid value {} for {} : {}".format(value, self, e))
return None
def from_db_value(self, value, expression, connection, context=None):
return quantities.parse(value) if value else self.get_default()
# def get_db_prep_value(self, value, connection, prepared=False):
# return str(value) if value else ''
def get_prep_value(self, value):
# if value is None:
# return ''
return str(value) if value else ''
class DurationField(QuantityField):
"""
A field that stores :class:`Duration
<lino.utils.quantities.Duration>` values as CHAR.
Note that you cannot use SUM or AVG agregators on these fields
since the database does not know how to calculate sums from them.
"""
def from_db_value(self, value, expression, connection, context=None):
return Duration(value) if value else self.get_default()
def to_python(self, value):
if isinstance(value, Duration):
return value
if value:
# if isinstance(value, six.string_types):
# return Duration(value)
return Duration(value)
return None
class IncompleteDateField(models.CharField):
"""
A field that behaves like a DateField, but accepts incomplete
dates represented using
:class:`lino.utils.format_date.IncompleteDate`.
"""
default_validators = [validate_incomplete_date]
def __init__(self, *args, **kw):
kw.update(max_length=11)
# msgkw = dict()
# msgkw.update(ex1=IncompleteDate(1980, 0, 0)
# .strftime(settings.SITE.date_format_strftime))
# msgkw.update(ex2=IncompleteDate(1980, 7, 0)
# .strftime(settings.SITE.date_format_strftime))
# msgkw.update(ex3=IncompleteDate(0, 7, 23)
# .strftime(settings.SITE.date_format_strftime))
kw.setdefault('help_text', _("""\
Uncomplete dates are allowed, e.g.
"00.00.1980" means "some day in 1980",
"00.07.1980" means "in July 1980"
or "23.07.0000" means "on a 23th of July"."""))
models.CharField.__init__(self, *args, **kw)
def deconstruct(self):
name, path, args, kwargs = super(IncompleteDateField, self).deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
# def get_internal_type(self):
# return "CharField"
def from_db_value(self, value, expression, connection, context=None):
return IncompleteDate.parse(value) if value else self.get_default()
# if value:
# return IncompleteDate.parse(value)
# return ''
def to_python(self, value):
if isinstance(value, IncompleteDate):
return value
if isinstance(value, datetime.date):
#~ return IncompleteDate(value.strftime("%Y-%m-%d"))
#~ return IncompleteDate(d2iso(value))
return IncompleteDate.from_date(value)
# if value:
# return IncompleteDate.parse(value)
# return ''
return IncompleteDate.parse(value) if value else ''
# def get_prep_value(self, value):
# return str(value)
def get_prep_value(self, value):
return str(value) if value else ''
# if value:
# return str(value)
# # return '"' + str(value) + '"'
# #~ return value.format("%04d%02d%02d")
# return ''
#~ def value_to_string(self, obj):
#~ value = self._get_val_from_obj(obj)
#~ return self.get_prep_value(value)
class Dummy(object):
pass
class DummyField(FakeField):
"""
Represents a field that doesn't exist in the current configuration
but might exist in other configurations. The "value" of a
DummyField is always `None`.
See e.g. :func:`ForeignKey` and :func:`fields_list`.
"""
# choices = []
# primary_key = False
def __init__(self, *args, **kw):
pass
# def __init__(self, name, *args, **kw):
# self.name = name
def __str__(self):
return self.name
def __get__(self, instance, owner):
if instance is None:
return self
return None
def get_default(self):
return None
def contribute_to_class(self, cls, name):
self.name = name
v = getattr(cls, name, NOT_PROVIDED)
if v is not NOT_PROVIDED:
msg = ("{0} cannot contribute to {1} because it has already "
"an attribute '{2}'.")
msg = msg.format(self, cls, name)
if settings.SITE.ignore_model_errors:
logger.warning(msg)
else:
raise Exception(msg)
setattr(cls, name, self)
def set_attributes_from_name(self, k):
pass
class RecurrenceField(models.CharField):
"""
Deserves more documentation.
"""
def __init__(self, *args, **kw):
kw.setdefault('max_length', 200)
models.CharField.__init__(self, *args, **kw)
def OneToOneField(*args, **kwargs):
"""
Instantiate a :class:`django.db.models.OneToOneField` using :func:`pointer_factory`.
"""
return pointer_factory(models.OneToOneField, *args, **kwargs)
def ForeignKey(*args, **kwargs):
"""
Instantiate a :class:`django.db.models.ForeignKey` using
:func:`pointer_factory`.
"""
return pointer_factory(models.ForeignKey, *args, **kwargs)
class CustomField(object):
"""
Mixin to create a custom field.
It defines a single method :meth:`create_layout_elem`.
"""
def create_layout_elem(self, base_class, layout_handle, field, **kw):
"""Return the widget to represent this field in the specified
`layout_handle`.
The widget must be an instance of the given `base_class`.
`self` and `field` are identical unless `self` is a
:class`RemoteField` or a :class:`VirtualField`.
"""
return None
class ImportedFields(object):
"""
Mixin for models which have "imported fields".
"""
_imported_fields = set()
@classmethod
def declare_imported_fields(cls, names):
cls._imported_fields = cls._imported_fields | set(
fields_list(cls, names))
#~ logger.info('20120801 %s.declare_imported_fields() --> %s' % (
#~ cls,cls._imported_fields))
class TableRow(object):
"""Base class for everything that can be used as a table row. """
_lino_default_table = None
hidden_columns = frozenset()
"""If specified, this is the default value for
:attr:`hidden_columns<lino.core.tables.AbstractTable.hidden_columns>`
of every `Table` on this model.
"""
@classmethod
def setup_parameters(cls, params):
"""Inheritable hook for defining parameters for every actor on this model.
Called at site startup once for each actor using this model.
Toes not return anything. Receives a `dict` object `params` and is
expected to update that `dict`, which will be used to fill the actor's
:attr:`parameters`.
See also :meth:`get_simple_parameters`.
"""
pass
@classmethod
def get_simple_parameters(cls):
"""
Return or yield a list of names of simple parameter fields of every
actor that uses this model.
When the list contains names for which no parameter field is
defined, then Lino creates that parameter field as a copy of
the database field of the same name.
This is also called by :meth:`get_title_tags`, you don't need to
manually define title tags for simple parameters.
"""
return []
@classmethod
def get_default_table(self):
"""Used internally. Lino chooses during the kernel startup, for each
model, one of the discovered Table subclasses as the "default
table".
"""
return self._lino_default_table # set in dbtables.py
@classmethod
def get_data_elem(cls, name):
return None
# v = getattr(cls, name, None)
# if isinstance(v, VirtualField):
# return v
# return getattr(cls, name, None)
# return get_class_attr(cls, name)
# v = get_class_attr(cls, name)
# if v is not None:
# if isinstance(v, fields.DummyField):
# return v
# raise Exception("Oops, {} on {} is {}".format(name, cls, v))
def obj2href(self, ar, *args, **kwargs):
if ar is None:
if len(args):
return args[0]
return str(self)
return ar.obj2html(self, *args, **kwargs)
def get_detail_action(self, ar):
"""Return the (bound) detail action to use for showing this object in
a detail window. Return `None` when no detail form exists or
the requesting user has no permission to see it.
`ar` is the action request that asks to see the detail.
If the action request's actor can be used for this model,
then use its `detail_action`. Otherwise use the
`detail_action` of this model's default table.
When `ar` is `None`, the permission check is bypassed.
If `self` has a special attribute `_detail_action` defined,
return this. This magic is used by
:meth:`Menu.add_instance_action
<lino.core.menus.Menu.add_instance_action>`.
Usage example: :class:`courses.Course <lino_xl.lib.courses.Course>`
overrides this to return the detail action depending on the
:term:`activity layout`.
"""
a = getattr(self, '_detail_action', None)
# print("20201230 get_detail_action", ar.actor, ar.actor.model, self.__class__)
if a is None:
if ar and ar.actor and ar.actor.model \
and self.__class__ is ar.actor.model:
a = ar.actor.detail_action
else:
# if ar and ar.actor and ar.actor.model:
# print("20170902 {} : {} is not {}".format(
# ar.actor, self.__class__, ar.actor.model))
dt = self.__class__.get_default_table()
if dt is not None:
# a = dt.get_request_detail_action(ar)
a = dt.detail_action
if a is None or ar is None:
return a
if a.get_view_permission(ar.get_user().user_type):
return a
def get_choices_text(self, request, actor, field):
return str(self)
def get_overview_elems(self, ar):
# return [ar.obj2html(self)]
return [self.obj2href(ar)]
def save_existing_instance(self, ar):
watcher = ChangeWatcher(self)
# print("20210213 save_existing_instance", ar.ah, ar.rqdata, self.disabled_fields)
ar.ah.store.form2obj(ar, ar.rqdata, self, False)
self.full_clean()
self.save_watched_instance(ar, watcher)
def wildcard_data_elems(model):
"""
Yield names to be used as wildcard in the :attr:`column_names` of a
table or when :func:`fields_list` finds a ``*``.
"""
meta = model._meta
for f in meta.fields:
# if not isinstance(f, fields.RichTextField):
if isinstance(f, VirtualField):
if f.wildcard_data_elem:
yield f
else:
if not getattr(f, '_lino_babel_field', False):
yield f
for f in meta.many_to_many:
yield f
for f in meta.private_fields:
if not isinstance(f, VirtualField) or f.wildcard_data_elem:
yield f
# todo: for slave in self.report.slaves
def use_as_wildcard(de):
if de.name.endswith('_ptr'):
return False
return True
def fields_list(model, field_names):
"""
Return a set with the names of the specified fields, checking
whether each of them exists.
Arguments: `model` is any subclass of `django.db.models.Model`. It
may be a string with the full name of a model
(e.g. ``"myapp.MyModel"``). `field_names` is a single string with
a space-separated list of field names.
If one of the names refers to a dummy field, this name will be ignored
silently.
For example if you have a model `MyModel` with two fields `foo` and
`bar`, then ``dd.fields_list(MyModel,"foo bar")`` will return
``['foo','bar']`` and ``dd.fields_list(MyModel,"foo baz")`` will raise
an exception.
TODO: either rename this to `fields_set` or change it to return an
iterable on the fields.
"""
lst = set()
names_list = field_names.split()
for name in names_list:
if name == '*':
explicit_names = set()
for name in names_list:
if name != '*':
explicit_names.add(name)
for de in wildcard_data_elems(model):
if not isinstance(de, DummyField):
if de.name not in explicit_names:
if use_as_wildcard(de):
lst.add(de.name)
else:
e = model.get_data_elem(name)
if e is None:
raise FieldDoesNotExist(
"No data element %r in %s" % (name, model))
if not hasattr(e, 'name'):
raise FieldDoesNotExist(
"%s %r in %s has no name" % (e.__class__, name, model))
if isinstance(e, DummyField):
pass
else:
lst.add(e.name)
return lst
def pointer_factory(cls, othermodel, *args, **kw):
"""
Instantiate a `ForeignKey` or `OneToOneField` with some subtle
differences:
- It supports `othermodel` being `None` or the name of some
non-installed model and returns a :class:`DummyField` in that
case. This difference is useful when designing reusable models.
- Explicitly sets the default value for `on_delete
<https://docs.djangoproject.com/en/1.11/ref/models/fields/#django.db.models.ForeignKey.on_delete>`__
to ``CASCADE`` (as required by Django 2).
"""
if othermodel is None:
return DummyField(othermodel, *args, **kw)
if isinstance(othermodel, str):
if not settings.SITE.is_installed_model_spec(othermodel):
return DummyField(othermodel, *args, **kw)
kw.setdefault('on_delete', models.CASCADE)
return cls(othermodel, *args, **kw)
def make_remote_field(model, name):
parts = name.split('__')
if len(parts) == 1:
return
# It's going to be a RemoteField
# logger.warning("20151203 RemoteField %s in %s", name, cls)
from lino.core import store
cls = model
field_chain = []
editable = False
leaf_chooser = None
for n in parts:
if model is None:
return
# raise Exception(
# "Invalid remote field {0} for {1}".format(name, cls))
if isinstance(model, str):
# Django 1.9 no longer resolves the
# rel.model of ForeignKeys on abstract
# models, so we do it here.
model = resolve_model(model)
# logger.warning("20151203 %s", model)
fld = model.get_data_elem(n)
if fld is None:
return
# raise Exception(
# "Invalid RemoteField %s.%s (no field %s in %s)" %
# (full_model_name(model), name, n, full_model_name(model)))
# make sure that the atomizer gets created.
store.get_atomizer(model, fld, fld.name)
if isinstance(fld, VirtualField):
fld.lino_resolve_type()
leaf_chooser = choosers.check_for_chooser(model, fld)
field_chain.append(fld)
if isinstance(fld, models.OneToOneRel):
editable = True
if getattr(fld, 'remote_field', None):
model = fld.remote_field.model
else:
model = None
if leaf_chooser is not None:
d = choosers.get_choosers_dict(cls)
d[name] = leaf_chooser
def getter(obj, ar=None):
try:
for fld in field_chain:
if obj is None:
return None
obj = fld._lino_atomizer.full_value_from_object(
obj, ar)
return obj
except Exception as e:
# raise
msg = "Error while computing {}: {} ({} in {})"
raise Exception(msg.format(
name, e, fld, field_chain))
# ~ if False: # only for debugging
if True: # see 20130802
logger.exception(e)
return str(e)
return None
if not editable:
rf = RemoteField(getter, name, fld)
# choosers.check_for_chooser(model, rf)
return rf
def setter(obj, value):
# logger.info("20180712 %s setter() %s", name, value)
# all intermediate fields are OneToOneRel
target = obj
try:
for fld in field_chain:
# print("20180712a %s" % fld)
if isinstance(fld, models.OneToOneRel):
reltarget = getattr(target, fld.name, None)
if reltarget is None:
rkw = { fld.field.name: target}
# print(
# "20180712 create {}({})".format(
# fld.related_model, rkw))
reltarget = fld.related_model(**rkw)
reltarget.full_clean()
reltarget.save()
setattr(target, fld.name, reltarget)
target.full_clean()
target.save()
# print("20180712b {}.{} = {}".format(
# target, fld.name, reltarget))
target = reltarget
else:
setattr(target, fld.name, value)
target.full_clean()
target.save()
# print(
# "20180712c setattr({},{},{}".format(
# target, fld.name, value))
return True
except Exception as e:
raise e.__class__(
"Error while setting %s: %s" % (name, e))
# ~ if False: # only for debugging
if True: # see 20130802
logger.exception(e)
return str(e)
return False
rf = RemoteField(getter, name, fld, setter)
# choosers.check_for_chooser(model, rf)
return rf
# # would be nice for lino_xl.lib.vat.VatItemBase.item_total
# class FieldAlias(VirtualField):
# def __init__(self, orig_name):
# ...
#
#
def choices_for_field(ar, holder, field):
"""
Return the choices for the given field and the given HTTP request
whose `holder` is either a Model, an Actor or an Action.
"""
if not holder.get_view_permission(ar.request.user.user_type):
raise Exception(
"{user} has no permission for {holder}".format(
user=ar.request.user, holder=holder))
# model = holder.get_chooser_model()
chooser = holder.get_chooser_for_field(field.name)
# logger.info('20140822 choices_for_field(%s.%s) --> %s',
# holder, field.name, chooser)
if chooser:
qs = chooser.get_request_choices(ar, holder)
if not isiterable(qs):
raise Exception("%s.%s_choices() returned non-iterable %r" % (
holder.model, field.name, qs))
if chooser.simple_values:
def row2dict(obj, d):
d[constants.CHOICES_TEXT_FIELD] = str(obj)
d[constants.CHOICES_VALUE_FIELD] = obj
return d
elif chooser.instance_values:
# same code as for ForeignKey
def row2dict(obj, d):
d[constants.CHOICES_TEXT_FIELD] = holder.get_choices_text(
obj, ar.request, field)
d[constants.CHOICES_VALUE_FIELD] = obj.pk
return d
else: # values are (value, text) tuples
def row2dict(obj, d):
d[constants.CHOICES_TEXT_FIELD] = str(obj[1])
d[constants.CHOICES_VALUE_FIELD] = obj[0]
return d
return (qs, row2dict)
if field.choices:
qs = field.choices
def row2dict(obj, d):
if type(obj) is list or type(obj) is tuple:
d[constants.CHOICES_TEXT_FIELD] = str(obj[1])
d[constants.CHOICES_VALUE_FIELD] = obj[0]
else:
d[constants.CHOICES_TEXT_FIELD] = holder.get_choices_text(
obj, ar.request, field)
d[constants.CHOICES_VALUE_FIELD] = str(obj)
return d
return (qs, row2dict)
if isinstance(field, VirtualField):
field = field.return_type
if isinstance(field, RemoteField):
field = field.field
if isinstance(field, VirtualField): # 20200425
field = field.return_type
if isinstance(field, models.ForeignKey):
m = field.remote_field.model
t = m.get_default_table()
qs = t.request(request=ar.request).data_iterator
# logger.info('20120710 choices_view(FK) %s --> %s', t, qs.query)
def row2dict(obj, d):
d[constants.CHOICES_TEXT_FIELD] = holder.get_choices_text(
obj, ar.request, field)
d[constants.CHOICES_VALUE_FIELD] = obj.pk
return d
else:
raise http.Http404("No choices for %s" % field)
return (qs, row2dict)
|
|
"""
******** Models for test_data.py ***********
The following classes are for testing basic data marshalling, including
NULL values, where allowed.
The basic idea is to have a model for each Django data type.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from .base import BaseModel
class BinaryData(models.Model):
data = models.BinaryField(null=True)
class BooleanData(models.Model):
data = models.BooleanField(default=False)
class CharData(models.Model):
data = models.CharField(max_length=30, null=True)
class DateData(models.Model):
data = models.DateField(null=True)
class DateTimeData(models.Model):
data = models.DateTimeField(null=True)
class DecimalData(models.Model):
data = models.DecimalField(null=True, decimal_places=3, max_digits=5)
class EmailData(models.Model):
data = models.EmailField(null=True)
class FileData(models.Model):
data = models.FileField(null=True)
class FilePathData(models.Model):
data = models.FilePathField(null=True)
class FloatData(models.Model):
data = models.FloatField(null=True)
class IntegerData(models.Model):
data = models.IntegerField(null=True)
class BigIntegerData(models.Model):
data = models.BigIntegerField(null=True)
# class ImageData(models.Model):
# data = models.ImageField(null=True)
class GenericIPAddressData(models.Model):
data = models.GenericIPAddressField(null=True)
class NullBooleanData(models.Model):
data = models.NullBooleanField(null=True)
class PositiveIntegerData(models.Model):
data = models.PositiveIntegerField(null=True)
class PositiveSmallIntegerData(models.Model):
data = models.PositiveSmallIntegerField(null=True)
class SlugData(models.Model):
data = models.SlugField(null=True)
class SmallData(models.Model):
data = models.SmallIntegerField(null=True)
class TextData(models.Model):
data = models.TextField(null=True)
class TimeData(models.Model):
data = models.TimeField(null=True)
class Tag(models.Model):
"""A tag on an item."""
data = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["data"]
class GenericData(models.Model):
data = models.CharField(max_length=30)
tags = GenericRelation(Tag)
# The following test classes are all for validation
# of related objects; in particular, forward, backward,
# and self references.
class Anchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(max_length=30)
class Meta:
ordering = ('id',)
class UniqueAnchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(unique=True, max_length=30)
class FKData(models.Model):
data = models.ForeignKey(Anchor, models.SET_NULL, null=True)
class M2MData(models.Model):
data = models.ManyToManyField(Anchor)
class O2OData(models.Model):
# One to one field can't be null here, since it is a PK.
data = models.OneToOneField(Anchor, models.CASCADE, primary_key=True)
class FKSelfData(models.Model):
data = models.ForeignKey('self', models.CASCADE, null=True)
class M2MSelfData(models.Model):
data = models.ManyToManyField('self', symmetrical=False)
class FKDataToField(models.Model):
data = models.ForeignKey(UniqueAnchor, models.SET_NULL, null=True, to_field='data')
class FKDataToO2O(models.Model):
data = models.ForeignKey(O2OData, models.SET_NULL, null=True)
class M2MIntermediateData(models.Model):
data = models.ManyToManyField(Anchor, through='Intermediate')
class Intermediate(models.Model):
left = models.ForeignKey(M2MIntermediateData, models.CASCADE)
right = models.ForeignKey(Anchor, models.CASCADE)
extra = models.CharField(max_length=30, blank=True, default="doesn't matter")
# The following test classes are for validating the
# deserialization of objects that use a user-defined
# field as the primary key.
# Some of these data types have been commented out
# because they can't be used as a primary key on one
# or all database backends.
class BooleanPKData(models.Model):
data = models.BooleanField(primary_key=True, default=False)
class CharPKData(models.Model):
data = models.CharField(max_length=30, primary_key=True)
# class DatePKData(models.Model):
# data = models.DateField(primary_key=True)
# class DateTimePKData(models.Model):
# data = models.DateTimeField(primary_key=True)
class DecimalPKData(models.Model):
data = models.DecimalField(primary_key=True, decimal_places=3, max_digits=5)
class EmailPKData(models.Model):
data = models.EmailField(primary_key=True)
# class FilePKData(models.Model):
# data = models.FileField(primary_key=True)
class FilePathPKData(models.Model):
data = models.FilePathField(primary_key=True)
class FloatPKData(models.Model):
data = models.FloatField(primary_key=True)
class IntegerPKData(models.Model):
data = models.IntegerField(primary_key=True)
# class ImagePKData(models.Model):
# data = models.ImageField(primary_key=True)
class GenericIPAddressPKData(models.Model):
data = models.GenericIPAddressField(primary_key=True)
# This is just a Boolean field with null=True, and we can't test a PK value of NULL.
# class NullBooleanPKData(models.Model):
# data = models.NullBooleanField(primary_key=True)
class PositiveIntegerPKData(models.Model):
data = models.PositiveIntegerField(primary_key=True)
class PositiveSmallIntegerPKData(models.Model):
data = models.PositiveSmallIntegerField(primary_key=True)
class SlugPKData(models.Model):
data = models.SlugField(primary_key=True)
class SmallPKData(models.Model):
data = models.SmallIntegerField(primary_key=True)
# class TextPKData(models.Model):
# data = models.TextField(primary_key=True)
# class TimePKData(models.Model):
# data = models.TimeField(primary_key=True)
class UUIDData(models.Model):
data = models.UUIDField(primary_key=True)
class FKToUUID(models.Model):
data = models.ForeignKey(UUIDData, models.CASCADE)
# Tests for handling fields with pre_save functions, or
# models with save functions that modify data
class AutoNowDateTimeData(models.Model):
data = models.DateTimeField(null=True, auto_now=True)
class ModifyingSaveData(models.Model):
data = models.IntegerField(null=True)
def save(self, *args, **kwargs):
"""
A save method that modifies the data in the object.
A user-defined save() method isn't called when objects are deserialized
(#4459).
"""
self.data = 666
super().save(*args, **kwargs)
# Tests for serialization of models using inheritance.
# Regression for #7202, #7350
class AbstractBaseModel(models.Model):
parent_data = models.IntegerField()
class Meta:
abstract = True
class InheritAbstractModel(AbstractBaseModel):
child_data = models.IntegerField()
class InheritBaseModel(BaseModel):
child_data = models.IntegerField()
class ExplicitInheritBaseModel(BaseModel):
parent = models.OneToOneField(BaseModel, models.CASCADE, parent_link=True)
child_data = models.IntegerField()
class LengthModel(models.Model):
data = models.IntegerField()
def __len__(self):
return self.data
|
|
import DistributedNPCToonBaseAI
from toontown.toonbase import TTLocalizer, ToontownGlobals
from direct.fsm import ClassicFSM, State
from direct.task.Task import Task
class DistributedNPCScientistAI(DistributedNPCToonBaseAI.DistributedNPCToonBaseAI):
def __init__(self, air, npcId, questCallback = None, hq = 0):
DistributedNPCToonBaseAI.DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.scientistFSM = ClassicFSM.ClassicFSM('Scientist', [
State.State('Neutral',
self.enterNeutral,
self.exitNeutral, [
'Phase0',
'Phase1',
'Phase2',
'Phase2_5',
'Phase3',
'Phase4',
'Phase5',
'Off']),
State.State('Phase0',
self.enterPhase0,
self.exitPhase0, [
'Phase1',
'Neutral']),
State.State('Phase1',
self.enterPhase1,
self.exitPhase1, [
'Phase2',
'Neutral']),
State.State('Phase2',
self.enterPhase2,
self.exitPhase2, [
'Phase2_5',
'Neutral']),
State.State('Phase2_5',
self.enterPhase2_5,
self.exitPhase2_5, [
'Phase3',
'Neutral']),
State.State('Phase3',
self.enterPhase3,
self.exitPhase3, [
'Phase4',
'Neutral']),
State.State('Phase4',
self.enterPhase4,
self.exitPhase4, [
'Phase5',
'Neutral']),
State.State('Phase5',
self.enterPhase5,
self.exitPhase5, [
'Neutral']),
State.State('Off',
self.enterOff,
self.exitOff, [])],
'Neutral', 'Off')
if self.npcId == 2018 or self.npcId == 2019:
self.startAnimState = 'ScientistJealous'
elif self.npcId == 2020:
self.startAnimState = 'ScientistEmcee'
self.scientistFSM.enterInitialState()
def selectPhase(self, newPhase):
try:
if newPhase <= 4:
gotoPhase = '0'
elif newPhase <= 6:
gotoPhase = '1'
elif newPhase <= 11:
gotoPhase = '2'
elif newPhase <= 12:
gotoPhase = '2_5'
elif newPhase <= 13:
gotoPhase = '3'
elif newPhase <= 14:
gotoPhase = '4'
elif newPhase <= 15:
gotoPhase = '5'
else:
if not self.scientistFSM.getCurrentState() == self.scientistFSM.getStateNamed('Neutral'):
self.scientistFSM.request('Neutral')
return
gotoPhase = 'Phase' + gotoPhase
if not self.scientistFSM.getCurrentState() == self.scientistFSM.getStateNamed(gotoPhase):
self.scientistFSM.request(gotoPhase)
except:
self.notify.warning('Illegal phase transition requested')
def startIfNeeded(self):
if hasattr(simbase.air, 'holidayManager') and simbase.air.holidayManager:
self.curPhase = self.getPhaseToRun()
if self.curPhase != -1:
self.selectPhase(self.curPhase)
def getPhaseToRun(self):
result = -1
enoughInfoToRun = False
if simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_HOLIDAY) or simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_EXT_HOLIDAY):
if hasattr(simbase.air, 'SillyMeterMgr'):
enoughInfoToRun = True
else:
self.notify.debug('simbase.air does not have SillyMeterMgr')
else:
self.notify.debug('holiday is not running')
self.notify.debug('enoughInfoToRun = %s' % enoughInfoToRun)
if enoughInfoToRun and simbase.air.SillyMeterMgr.getIsRunning():
result = simbase.air.SillyMeterMgr.getCurPhase()
return result
def enterNeutral(self):
self.accept('SillyMeterPhase', self.selectPhase)
self.startIfNeeded()
def exitNeutral(self):
self.ignore('SillyMeterPhase')
def enterPhase0(self):
if self.npcId == 2020:
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsPhasePreTopTopic, endPause=30)
elif self.npcId == 2018 or self.npcId == 2019:
self.d_setAnimState('ScientistJealous', 1.0)
self.accept('SillyMeterPhase', self.selectPhase)
def exitPhase0(self):
if self.npcId == 2020:
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsPhasePreTopTopic)
self.ignore('SillyMeterPhase')
def enterPhase1(self):
if self.npcId == 2020:
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsPhasePreTopTopic, endPause=30)
elif self.npcId == 2018 or self.npcId == 2019:
self.d_setAnimState('ScientistJealous', 1.0)
self.accept('SillyMeterPhase', self.selectPhase)
def exitPhase1(self):
if self.npcId == 2020:
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsPhasePreTopTopic)
self.ignore('SillyMeterPhase')
def enterPhase2(self):
if self.npcId == 2020:
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsPhasePreTopTopic, endPause=30)
elif self.npcId == 2018 or self.npcId == 2019:
self.d_setAnimState('ScientistWork', 1.0)
self.accept('SillyMeterPhase', self.selectPhase)
def exitPhase2(self):
if self.npcId == 2020:
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsPhasePreTopTopic)
self.ignore('SillyMeterPhase')
def enterPhase2_5(self):
if self.npcId == 2020:
if simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_EXT_HOLIDAY):
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsExtPhaseTopTopic, endPause=30)
else:
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsPhaseTopTopic, endPause=30)
elif self.npcId == 2018 or self.npcId == 2019:
self.d_setAnimState('ScientistLessWork', 1.0)
self.accept('SillyMeterPhase', self.selectPhase)
def exitPhase2_5(self):
if self.npcId == 2020:
if simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_EXT_HOLIDAY):
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsExtPhaseTopTopic)
else:
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsPhaseTopTopic)
self.ignore('SillyMeterPhase')
def enterPhase3(self):
if self.npcId == 2020:
if simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_EXT_HOLIDAY):
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsExtPhaseTopTopic, endPause=30)
else:
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsPhasePostTopTopic, endPause=30)
elif self.npcId == 2018 or self.npcId == 2019:
self.d_setAnimState('ScientistPlay', 1.0)
self.accept('SillyMeterPhase', self.selectPhase)
def exitPhase3(self):
if self.npcId == 2020:
if simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_EXT_HOLIDAY):
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsExtPhaseTopTopic)
else:
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsPhasePostTopTopic)
self.ignore('SillyMeterPhase')
def enterPhase4(self):
if self.npcId == 2020:
if simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_EXT_HOLIDAY):
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsExtPhaseTopTopic, endPause=30)
else:
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsPhasePostTopTopic, endPause=30)
elif self.npcId == 2018 or self.npcId == 2019:
self.d_setAnimState('ScientistPlay', 1.0)
self.accept('SillyMeterPhase', self.selectPhase)
def exitPhase4(self):
if self.npcId == 2020:
if simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_EXT_HOLIDAY):
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsExtPhaseTopTopic)
else:
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsPhasePostTopTopic)
self.ignore('SillyMeterPhase')
def enterPhase5(self):
if self.npcId == 2020:
if simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_EXT_HOLIDAY):
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsExtPhaseTopTopic, endPause=30)
else:
self.air.dialogueManager.requestDialogue(self, TTLocalizer.AprilToonsPhasePostTopTopic, endPause=30)
elif self.npcId == 2018 or self.npcId == 2019:
self.d_setAnimState('ScientistPlay', 1.0)
self.accept('SillyMeterPhase', self.selectPhase)
def exitPhase5(self):
if self.npcId == 2020:
if simbase.air.holidayManager.isHolidayRunning(ToontownGlobals.SILLYMETER_EXT_HOLIDAY):
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsExtPhaseTopTopic)
else:
self.air.dialogueManager.leaveDialogue(self, TTLocalizer.AprilToonsPhasePostTopTopic)
self.ignore('SillyMeterPhase')
def enterOff(self):
pass
def exitOff(self):
pass
def delete(self):
self.scientistFSM.requestFinalState()
if hasattr(self, 'scientistFSM'):
del self.scientistFSM
DistributedNPCToonBaseAI.DistributedNPCToonBaseAI.delete(self)
|
|
import sys
import os
from keras.models import load_model
from keras.utils import plot_model
from custom_objects import CustomObjects
from model_def import ModelDef
from run_config import RunConfig
import datetime
Train=1
Generate=2
class ModelUtils(object):
iteration = 0
config = None
mode = Train
model_filename = ""
model_tag = ""
testdata_filename = ""
output_dir = ""
h5_model_filename = ""
h5_weights_filename = ""
output_fn = ""
output_file = None
csv_logger_fn = ""
csv_logger = None
logfile_fn = ""
logfile = None
iteration_counter_fn = None
model_def = None
one_off_generate_len = None
load_weights = None
def __init__(self):
self.buffered_logs = []
self.log("====================================================")
self.log("Started New Run at:", datetime.datetime.now())
self.log("PID:", os.getpid())
self.log("====================================================")
if len(sys.argv) < 2:
print("training usage: lstm_c2_generation <tagname> [test data filename>] [load model filename]")
print("for example\n lstm_c2_generation test1 test/LDC97S44-8k.c2cb")
print("if test data filename or load model filename are excluded, the settings in config.json will be used if it exists")
print("if load model filename is set to 'none' then the coded model definition will be used, regardless of what is set in the config.json file.\n")
print("generator usage: lstm_c2_generation [tagname] --generate=<base filename> [--seed_index=<'random'|frame num|time in seconds>] [--generate-len=<frames>] <test data filename> <load model filename>")
PRINT("loading weights: lstm_c2_generation [tagname] --load-weights=<path to .h5 file> <test data filename> <load model filename>")
print("for example\n lstm_c2_generation --generate=audiofile --seed_index=60s --generate-len=500 test/LDC97S44-8k.c2cb out/realmodel/model-600.h5")
exit()
named_args = {}
basic_args = []
self.named_args = named_args
self.basic_args = basic_args
print('arguments:', sys.argv)
for i, arg in enumerate(sys.argv[1:]):
self.log('arg', i, arg)
print('arg', 1, arg)
if arg[0:2] == "--":
a = arg.split("=")
key = a[0][2:]
named_args[key] = a[1]
else:
basic_args.append(arg)
if named_args.get('generate', None):
self.generate_name = named_args['generate']
self.mode = Generate
self.model_tag = basic_args[0]
self.log("mode: Generate")
else:
self.model_tag = basic_args[0]
self.mode = Train
self.log("mode: Train")
if self.training_mode():
self.output_dir="out/"+str(self.model_tag)+"/"
self.output_fn=self.output_dir+"out-c2cb-"
try:
os.makedirs(self.output_dir)
except OSError:
print("the tag ", self.model_tag, " has been used")
print("continuing where we left off")
else:
self.output_dir="out/"+str(self.model_tag)+"/"
self.output_fn="generated/"+str(self.generate_name)
self.config = RunConfig(self)
if len(basic_args) > 1:
self.testdata_filename = basic_args[1]
self.config.test_data_fn = self.testdata_filename
self.log("using command line test data filename:", self.config.test_data_fn)
if len(basic_args) > 2:
self.model_filename = basic_args[2]
#self.log("using command line model filename:",self.model_filename)
else:
self.model_filename = self.config.model_filename
#self.log("using configured model_filename:",self.config.model_filename)
if named_args.get('generate-len', None):
self.config.one_off_generate_len = int(named_args['generate-len'])
if named_args.get('load-weights', None):
self.load_weights = named_args['load-weights']
self.log("loading weights from a weights file:", self.load_weights)
self.model_filename = 'none'
else:
self.log("not loading weights from a weights file")
self.h5_model_filename=self.output_dir+"model-"
self.h5_weights_filename=self.output_dir+"weights-"
if self.training_mode():
from keras.callbacks import CSVLogger
self.csv_logger_fn = self.output_dir + 'training.log'
self.csv_logger = CSVLogger(self.csv_logger_fn, append=True)
self.iteration_counter_fn = self.output_dir + "iteration_counter"
self.gen_counter_fn = self.output_dir + "gen_counter"
self.logfile_fn = self.output_dir + "log"
self.logfile = open(self.logfile_fn, "a", 1)
def setup_seed_start(self, generator):
if self.named_args.get('seed_index', None):
seed_index = self.named_args['seed_index']
if seed_index == 'random':
self.log("Setting seed start index to 'random'")
generator.set_random_seed_start_index()
elif seed_index.find('s') > 0:
self.log("Setting seed start index to:", seed_index)
seed_index = seed_index[0:-1]
generator.set_time_seed_start_index(int(seed_index))
else:
self.log("Setting seed start index to:", seed_index)
generator.set_frame_seed_start_index(int(seed_index))
def load_model(self):
self.log("loading model: " + self.model_filename)
self.model_def.model = load_model(self.model_filename, custom_objects=self.custom_objects())
if self.training_mode():
self.log("saving config after loading model")
self.config.model_filename = self.model_filename
self.config.save_config()
else:
self.log("not saving config after loading model")
self.log_model_summary()
return self.model_def.model
def save_json_model(self, update_num=0):
model = self.model_def.model
json_string = model.to_json()
print("saving json model")
n = "jmodel-"+str(update_num)+".json"
mfile= open(self.output_dir + n, "w")
mfile.write(json_string)
mfile.close
def save_h5_model(self, iteration):
model = self.model_def.model
fn = self.h5_model_filename+str(iteration)+".h5"
res = model.save(fn)
self.config.model_filename = fn
self.write_iteration_count(iteration)
self.config.save_config()
#plot_model(model, to_file=self.output_dir+'vis-model-'+str(iteration)+'.png')
return res
def save_weights(self, iteration):
model = self.model_def.model
return model.save_weights(self.h5_weights_filename+str(iteration)+".h5")
def open_output_file(self, iteration, output_index=None):
if self.training_mode():
output_fn = self.output_fn+str(iteration)
else:
output_fn = self.output_fn
if output_index is not None:
output_fn += '_' + str(output_index)
self.output_file = open(output_fn, "wb")
return output_fn
def after_iteration(self, iteration):
#self.write_iteration_count(self, iteration)
return
def write_iteration_count(self, iteration):
self.config.start_iteration = iteration
with open(self.iteration_counter_fn, "w") as f:
f.write(str(iteration))
def write_gen_count(self, iteration):
with open(self.gen_counter_fn, "w") as f:
f.write(str(iteration))
def read_iteration_count(self):
res = []
if self.iteration_counter_fn and os.path.isfile(self.iteration_counter_fn):
with open(self.iteration_counter_fn) as f:
res = f.readlines()
if len(res) == 1:
i = int(res[0])
self.iteration = i
self.log("Continuing from a previous run at iteration: ", i)
return i
else:
self.iteration = 0
self.log("No iteration file found. Setting to 0.")
return 0
def log(self, *inargs):
if self.logfile == None:
args = []
for a in inargs:
args.append(str(a))
print(str(a)),
print
self.buffered_logs.append(str.join(" ", args) + "\n")
return
elif len(self.buffered_logs) > 0:
for s in self.buffered_logs:
print(s)
self.logfile.write(s)
self.logfile.flush()
self.buffered_logs = []
try:
for arg in inargs:
self.logfile.write(str(arg)+" ")
print(str(arg)),
print
self.logfile.write("\n")
self.logfile.flush()
except IOError:
print("* Logging Failed *")
for arg in inargs:
print(str(arg)),
print
def signal_handler(self, signal, frame):
self.log('Interrupt signal caught. Closing gracefully. Iteration:', self.iteration)
self.write_iteration_count(self.iteration)
print("saving .h5 model file")
self.save_h5_model(self.iteration)
print("saving .h5 weights file")
self.save_weights(self.iteration)
print("exiting now")
self.logfile.close()
sys.exit(0)
def custom_objects(self):
return {
"CustomObjects": CustomObjects,
"codec2_param_error": CustomObjects.codec2_param_error,
"codec2_param_error_td": CustomObjects.codec2_param_error_td
}
def test_seed_data(self, all_frames, start_index):
self.open_output_file(0)
seed_frame_seq = all_frames[start_index: start_index + frame_seq_len]
for frame in seed_frame_seq:
self.output_file.write(sample(frame))
self.output_file.close()
def define_or_load_model(self, frame_seq_len, framelen, num_frame_seqs):
self.model_def = ModelDef(self, self.config)
if len(self.model_filename) > 0 and self.model_filename != 'none' and self.model_filename != 'None':
model = self.load_model()
self.save_json_model()
else:
self.log("creating new model")
model = self.model_def.define_model(frame_seq_len, framelen, num_frame_seqs)
self.save_json_model()
if self.load_weights != None:
self.model_def.load_weights(self.load_weights, by_name=True)
return self.model_def
def training_mode(self):
return self.mode == Train
def generate_mode(self):
return self.mode == Generate
def setup_config(self):
return self.config
def log_model_summary(self):
self.model_def.model.summary()
|
|
import os
current_path = os.path.dirname(os.path.abspath(__file__))
header = False
sub_header = False
footer = False
buttons = False
lists = False
cards = False
forms = False
toogle = True
checkbox = False
radio_button = False
range_control = False
select_control = False
tabs = True
actionSheet = False
backdrop = False
scrollDelegate = False
loading = False
modal = False
navigation = False
platform = False
popover = False
popup = False
side_menu = False
utility = False
def create_snippet_string(tabTrigger, content, scope, description):
tabTrigger_tag = "<tabTrigger>" + tabTrigger + "</tabTrigger>"
content_tag = "<content><![CDATA[" + content + "]]></content>"
scope_tag = "<scope>" + scope + "</scope>"
if len(description) == 0:
description_tag = "<description>...</description>"
else:
description_tag = "<description>" + description + "</description>"
snippet_string = "<snippet>\n\t" + content_tag + "\n\t" + tabTrigger_tag +\
"\n\t" + scope_tag + "\n\t" + description_tag + "\n" + "</snippet>"
return snippet_string
def create_snippet_file(path_name_file, tabTrigger, content, scope, description):
snippet_str = create_snippet_string(
tabTrigger, content, scope, description)
output_file = open(path_name_file, "w+")
output_file.write(snippet_str)
output_file.close()
if header:
output_folder = current_path + '/Header/'
css_bars = ['bar-light', 'bar-stable', 'bar-positive',
'bar-calm', 'bar-balanced', 'bar-energized',
'bar-assertive', 'bar-royal', 'bar-dark']
header_desc = ['Ionic Light Header', 'Ionic Stable Header', 'Ionic Positive Header',
'Ionic Calm Header', 'Ionic Balanced Header', 'Ionic Energized Header',
'Ionic Assertive Header', 'Ionic Royal Header', 'Ionic Dark Header']
for idx, bar in enumerate(css_bars):
header_str = '<div class="bar bar-header ' + bar + '"> \n' + \
'\t <h1 class="title">${1:' + bar + '}</h1> \n' + '</div>'
snippet_str = create_snippet_string(
'ionic-header-' + bar[4:], header_str, 'text.html', header_desc[idx])
output_file = open(
output_folder + 'ionic-header-' + bar[4:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if sub_header:
output_folder = current_path + '/SubHeader/'
css_bars = ['bar-light', 'bar-stable', 'bar-positive',
'bar-calm', 'bar-balanced', 'bar-energized',
'bar-assertive', 'bar-royal', 'bar-dark']
subheader_desc = ['Ionic Light SubHeader', 'Ionic Stable SubHeader', 'Ionic Positive SubHeader',
'Ionic Calm SubHeader', 'Ionic Balanced SubHeader', 'Ionic Energized SubHeader',
'Ionic Assertive SubHeader', 'Ionic Royal SubHeader', 'Ionic Dark SubHeader']
for idx, bar in enumerate(css_bars):
strr = '<div class="bar bar-subheader ' + bar + '"> \n' + \
'\t <h2 class="title">$0</h2> \n' + '</div>'
snippet_str = create_snippet_string(
'ionic-subheader-' + bar.split('-')[1], strr, 'text.html', subheader_desc[idx])
output_file = open(
output_folder + 'ionic-subheader-' + bar.split('-')[1] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if footer:
output_folder = current_path + '/Footer/'
css_bars = ['bar-light', 'bar-stable', 'bar-positive',
'bar-calm', 'bar-balanced', 'bar-energized',
'bar-assertive', 'bar-royal', 'bar-dark']
footer_desc = ['Ionic Light Footer', 'Ionic Stable Footer', 'Ionic Positive Footer',
'Ionic Calm Footer', 'Ionic Balanced Footer', 'Ionic Energized Footer',
'Ionic Assertive Footer', 'Ionic Royal Footer', 'Ionic Dark Footer']
for idx, bar in enumerate(css_bars):
strr = '<div class="bar bar-footer ' + bar + '"> \n' + \
'\t <div class="title">${1:' + bar + '}</div> \n' + '</div>'
snippet_str = create_snippet_string(
'ionic-footer-' + bar[4:], strr, 'text.html', footer_desc[idx])
output_file = open(
output_folder + 'ionic-footer-' + bar[4:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if buttons:
output_folder = current_path + '/Buttons/'
css_buttons = ['button-light', 'button-stable', 'button-positive', 'button-calm',
'button-balanced', 'button-energized', 'button-assertive', 'button-royal', 'button-dark']
for button_color in css_buttons:
regular_button = '<button class="button ' + button_color + \
'"> \n' + '\t ${1:' + button_color + '}\n' + '</button>'
block_button = '<button class="button button-block ' + button_color + \
'"> \n' + '\t ${1:' + button_color + '}\n' + '</button>'
full_button = '<button class="button button-full ' + button_color + \
'"> \n' + '\t ${1:' + button_color + '}\n' + '</button>'
small_button = '<button class="button button-small ' + button_color + \
'"> \n' + '\t ${1:' + button_color + '}\n' + '</button>'
large_button = '<button class="button button-large ' + button_color + \
'"> \n' + '\t ${1:' + button_color + '}\n' + '</button>'
outline_button = '<button class="button button-outline ' + \
button_color + '"> \n' + \
'\t ${1:' + button_color + '}\n' + '</button>'
clear_button = '<button class="button button-clear ' + button_color + \
'"> \n' + '\t ${1:' + button_color + '}\n' + '</button>'
icon_left_button = '<button class="button icon-left ${1:ionicon} ' + \
button_color + '"> \n' + \
'\t ${2:' + button_color + '}\n' + '</button>'
icon_right_button = '<button class="button icon-right ${1:ionicon} ' + \
button_color + '"> \n' + \
'\t ${2:' + button_color + '}\n' + '</button>'
button_bar = '<div class="button-bar bar-' + \
button_color[7:] + '">$0</div>'
snippet_str = create_snippet_string(
'ionic-button-' + button_color[7:], regular_button, 'text.html', 'Ionic Button')
output_file = open(
output_folder + 'ionic-button-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string('ionic-button-block-' + button_color[
7:], block_button, 'text.html', 'Ionic Block Button')
output_file = open(
output_folder + 'ionic-button-block-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-button-full-' + button_color[7:], full_button, 'text.html', 'Ionic Full Button')
output_file = open(
output_folder + 'ionic-button-full-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string('ionic-button-small-' + button_color[
7:], small_button, 'text.html', 'Ionic Small Button')
output_file = open(
output_folder + 'ionic-button-small-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string('ionic-button-large-' + button_color[
7:], large_button, 'text.html', 'Ionic Large Button')
output_file = open(
output_folder + 'ionic-button-large-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string('ionic-button-outline-' + button_color[
7:], outline_button, 'text.html', 'Ionic Outline Button')
output_file = open(
output_folder + 'ionic-button-outline-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string('ionic-button-clear-' + button_color[
7:], clear_button, 'text.html', 'Ionic Clear Button')
output_file = open(
output_folder + 'ionic-button-clear-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string('ionic-button-icon-left-' + button_color[
7:], icon_left_button, 'text.html', 'Ionic Icon Left Button')
output_file = open(
output_folder + 'ionic-button-icon-left-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string('ionic-button-icon-right-' + button_color[
7:], icon_right_button, 'text.html', 'Ionic Icon Right Button')
output_file = open(
output_folder + 'ionic-button-icon-right-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-button-bar-' + button_color[7:], button_bar, 'text.html', 'Ionic Bar Button')
output_file = open(
output_folder + 'ionic-button-bar-' + button_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if lists:
output_folder = current_path + '/Lists/'
item_basic = '<a class="item">$0</a>'
item_divider = '<div class="item item-divider">$0</div>'
item_icon_left = '<a class="item item-icon-left" href="#">$0</a>'
item_icon_right = '<a class="item item-icon-right" href="#">$0</a>'
item_icon_left_right = '<a class="item item-icon-left item-icon-right" href="#">$0</a>'
item_button_right = '<a class="item item-button-right" href="#">$0</a>'
item_button_left = '<a class="item item-button-left" href="#">$0</a>'
item_avatar = '<a class="item item-avatar" href="#">\n' + \
'\t<img src="${1:image_source}">\n' + \
'\t<h2>${2:title}</h2>\n' + '\t<p>${3:description}</p>\n' + '</a>'
item_thumbnail_left = '<a class="item item-thumbnail-left" href="#">\n' + \
'\t<img src="${1:image_source}">\n' + \
'\t<h2>${2:title}</h2>\n' + '\t<p>${3:description}</p>\n' + '</a>'
item_thumbnail_right = '<a class="item item-thumbnail-right" href="#">\n' + \
'\t<img src="${1:image_source}">\n' + \
'\t<h2>${2:title}</h2>\n' + '\t<p>${3:description}</p>\n' + '</a>'
list_inset = '<div class="list list-inset">$0</div>'
collection_repeat = '<div class="item ${1:your_item_css_class}"\n' + '\tcollection-repeat="${2:item in items}"\n' + '\tcollection-item-width="${3:\'100%\'}"\n' + \
'\tcollection-item-height="${4:getItemHeight(item, \$index)}"\n' + \
'\tng-style="${5:{height: getItemHeight(item, \$index)}\}">$0\n' + \
'</div>'
ionicListDelegate_showReorder = '\$ionicListDelegate.showReorder(${1:true});'
ionicListDelegate_showDelete = '\$ionicListDelegate.showDelete(${1:true});'
ionicListDelegate_canSwipeItems = '\$ionicListDelegate.canSwipeItems(${1:true});'
ionicListDelegate_closeOptionButtons = '\$ionicListDelegate.closeOptionButtons();'
snippet_str = create_snippet_string(
'ionic-item', item_basic, 'text.html', 'Ionic Item')
output_file = open(
output_folder + 'ionic-item' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-item-divider', item_divider, 'text.html', 'Ionic Item Divider')
output_file = open(
output_folder + 'ionic-item-divider' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-item-icon-left', item_icon_left, 'text.html', 'Ionic Icon Left Item')
output_file = open(
output_folder + 'ionic-item-icon-left' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-item-icon-right', item_icon_right, 'text.html', 'Ionic Icon Right Item')
output_file = open(
output_folder + 'ionic-item-icon-right' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-item-icon-left-right', item_icon_left_right, 'text.html', 'Ionic Icon Left Right Item')
output_file = open(
output_folder + 'ionic-item-icon-left-right' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-item-button-right', item_button_right, 'text.html', 'Ionic Button Right Item')
output_file = open(
output_folder + 'ionic-item-button-right' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-item-button-left', item_button_left, 'text.html', 'Ionic Button Left Item')
output_file = open(
output_folder + 'ionic-item-button-left' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-item-avatar', item_avatar, 'text.html', 'Ionic Avatar Item')
output_file = open(
output_folder + 'ionic-item-avatar' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-item-thumbnail-left', item_thumbnail_left, 'text.html', 'Ionic Left Thumbnail Item')
output_file = open(
output_folder + 'ionic-item-thumbnail-left' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-item-thumbnail-right', item_thumbnail_right, 'text.html', 'Ionic Right Thumbnail Item')
output_file = open(
output_folder + 'ionic-item-thumbnail-right' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-list-inset', list_inset, 'text.html', 'Ionic Inset List')
output_file = open(
output_folder + 'ionic-list-inset' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-collection-repeat', collection_repeat, 'text.html', 'Ionic Collection Repeat')
output_file = open(
output_folder + 'ionic-collection-repeat' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'$ionicListDelegate.showReorder', ionicListDelegate_showReorder, 'source.js', 'Ionic List Delegate')
output_file = open(
output_folder + 'ionicListDelegate.showReorder' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'$ionicListDelegate.showDelete', ionicListDelegate_showDelete, 'source.js', 'Ionic List Delegate')
output_file = open(
output_folder + 'ionicListDelegate.showDelete' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'$ionicListDelegate.canSwipeItems', ionicListDelegate_canSwipeItems, 'source.js', 'Ionic List Delegate')
output_file = open(
output_folder + 'ionicListDelegate.canSwipeItems' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'$ionicListDelegate.closeOptionButtons', ionicListDelegate_closeOptionButtons, 'source.js', 'Ionic List Delegate')
output_file = open(
output_folder + 'ionicListDelegate.closeOptionButtons' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if cards:
output_folder = current_path + '/Cards/'
card = '<div class="card">\n' + '\t<div class="item item-text-wrap">\n' + \
'\t\t${1:desc}\n' + '\t</div>\n' + '</div>'
card_header = '<div class="card">\n' + '\t<div class="item item-divider">\n' + \
'\t\t${1:header}\n' + '\t</div>\n' + '\t<div class="item item-text-wrap">\n' + \
'\t\t${2:desc}\n' + '\t</div>\n' + '</div>'
card_footer = '<div class="card">\n' + '\t<div class="item item-text-wrap">\n' + \
'\t\t${1:desc}\n' + '\t</div>\n' + '\t<div class="item item-divider">\n' + \
'\t\t${2:footer}\n' + '\t</div>\n' + '</div>'
card_header_footer = '<div class="card">\n' + '\t<div class="item item-divider">\n' + \
'\t\t${1:header}\n' + '\t</div>\n' + '\t<div class="item item-text-wrap">\n' + '\t\t${2:desc}\n' + \
'\t</div>\n' + '\t<div class="item item-divider">\n' + \
'\t\t${3:footer}\n' + '\t</div>\n' + '</div>'
card_list_item = '<a href="#" class="item item-icon-left">\n' + \
'\t<i class="icon ${1:ionicon}"></i>\n' + '\t${2:desc}\n' + '</a>'
card_list = '<div class="list card">\n' + '\t$0\n' + '</div>'
item_avatar = '<div class="item item-avatar" href="#">\n' + \
'\t<img src="${1:image_source}">\n' + '\t<h2>${2:title}</h2>\n' + \
'\t<p>${3:description}</p>\n' + '</div>\n'
item_image = '<div class="item item-image">\n' + \
'\t<img src="${4:image_source}">\n' + '</div>\n'
item_link = '<a class="item ${5:item-icon-left} ${6:assertive}" href="#">\n' + \
'\t<i class="icon ${7:ionicon}"></i>\n' + '\t${8:text_link}\n' + '</a>'
card_image = item_avatar + item_image + item_link
item_body = '<div class="item item-body">\n' + \
'\t<img class="full-image" src="${4:image_source}">\n' + \
'\t<p>${5:description}</p>\n' + \
'\t<p>\n' + \
'\t\t<a href="#" class="subdued">1 Like</a>\n' + \
'\t\t<a href="#" class="subdued">5 Comments</a>\n' + \
'\t</p>\n' + \
'</div>\n\n'
item_tabs = '<div class="item tabs tabs-secondary tabs-icon-left">\n' + \
'\t<a class="tab-item" href="#">\n' + \
'\t\t<i class="icon ion-thumbsup"></i>\n' + \
'\t\tLike\n' + \
'\t</a>\n' + \
'\t<a class="tab-item" href="#">\n' + \
'\t\t<i class="icon ion-chatbox"></i>\n' + \
'\t\tComments\n' + \
'\t</a>\n' + \
'\t<a class="tab-item" href="#">\n' + \
'\t\t<i class="icon ion-share"></i>\n' + \
'\t\tShare\n' + \
'\t</a>\n' + \
'</div>\n'
card_showcase = item_avatar + item_body + item_tabs
snippet_str = create_snippet_string(
'ionic-card', card, 'text.html', 'Ionic Card')
output_file = open(output_folder + 'ionic-card' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-card-header', card_header, 'text.html', 'Ionic Header Card')
output_file = open(
output_folder + 'ionic-card-header' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-card-footer', card_footer, 'text.html', 'Ionic Footer Card')
output_file = open(
output_folder + 'ionic-card-footer' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-card-header-footer', card_header_footer, 'text.html', 'Ionic Header Footer Card')
output_file = open(
output_folder + 'ionic-card-header-footer' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-card-list-item', card_list_item, 'text.html', 'Ionic Item Card')
output_file = open(
output_folder + 'ionic-card-list-item' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-card-list', card_list, 'text.html', 'Ionic List Card')
output_file = open(
output_folder + 'ionic-card-list' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-card-image', card_image, 'text.html', 'Ionic Image Card')
output_file = open(
output_folder + 'ionic-card-image' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-card-showcase', card_showcase, 'text.html', 'Ionic Showcase Card')
output_file = open(
output_folder + 'ionic-card-showcase' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if forms:
output_folder = current_path + '/Forms/'
basic_input_html_start = '<label class="item item-input'
basic_input_html_end = '</label>'
input_placeholder = basic_input_html_start + '">\n' + \
'\t<input type="text" placeholder="${1:placeholder_1}">\n' + \
basic_input_html_end
input_inline = basic_input_html_start + '">\n' + \
'\t<span class="input-label">${1:input_name}</span>\n' + \
'\t<input type="text">\n' + \
basic_input_html_end
input_stacked = basic_input_html_start + ' item-stacked-label">\n' + \
'\t<span class="input-label">${1:input_name}</span>\n' + \
'\t<input type="text" placeholder="${2:input_placeholder}">\n' + \
basic_input_html_end
input_floating = basic_input_html_start + ' item-floating-label">\n' + \
'\t<span class="input-label">${1:input_name}</span>\n' + \
'\t<input type="text" placeholder="${2:input_placeholder}">\n' + \
basic_input_html_end
input_inset = '<div class="item item-input-inset">\n' + \
'\t<label class="item-input-wrapper">\n' + \
'\t\t<input type="text" placeholder="${1:input_placeholder}">\n' + \
'\t</label>\n' + \
'\t<button class="button button-small">\n' + \
'\t\t${2:button_name}\n' + \
'\t</button>\n' + \
'</div>\n'
input_icon = basic_input_html_start + '">\n' + \
'\t<i class="icon ion-search placeholder-icon"></i>\n' + \
'\t<input type="text" placeholder="${1:Search}">\n' + \
basic_input_html_end
input_header = '<div class="bar bar-header item-input-inset">\n' + \
'\t<label class="item-input-wrapper">\n' + \
'\t\t<i class="icon ion-ios7-search placeholder-icon"></i>\n' + \
'\t\t<input type="search" placeholder="${1:Search}">\n' + \
'\t</label>\n' + \
'\t<button class="button button-clear">\n' + \
'\t\t${2:Cancel}\n' + \
'\t</button>\n' + \
'</div>'
snippet_str = create_snippet_string(
'ionic-input-placeholder', input_placeholder, 'text.html', 'ionic-input-placeholder')
output_file = open(
output_folder + 'ionic-input-placeholder' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-input-inline', input_inline, 'text.html', 'ionic-input-inline')
output_file = open(
output_folder + 'ionic-input-inline' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-input-stacked', input_stacked, 'text.html', 'ionic-input-stacked')
output_file = open(
output_folder + 'ionic-input-stacked' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-input-floating', input_floating, 'text.html', 'ionic-input-floating')
output_file = open(
output_folder + 'ionic-input-floating' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-input-inset', input_inset, 'text.html', 'ionic-input-inset')
output_file = open(
output_folder + 'ionic-input-inset' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-input-icon', input_icon, 'text.html', 'ionic-input-icon')
output_file = open(
output_folder + 'ionic-input-icon' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'ionic-input-header', input_header, 'text.html', 'ionic-input-header')
output_file = open(
output_folder + 'ionic-input-header' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if toogle:
output_folder = current_path + '/Toggle/'
css_toggles = ['toggle-light', 'toggle-stable', 'toggle-positive',
'toggle-calm', 'toggle-balanced', 'toggle-energized',
'toggle-assertive', 'toggle-royal', 'toggle-dark']
toggle_desc = ['Ionic Light Toggle', 'Ionic Stable Toggle', 'Ionic Positive Toggle',
'Ionic Calm Toggle', 'Ionic Balanced Toggle', 'Ionic Energized Toggle',
'Ionic Assertive Toggle', 'Ionic Royal Toggle', 'Ionic Dark Toggle']
for idx, toggle_color in enumerate(css_toggles):
toggle_str = '<li class="item item-toggle">\n' + \
'\t${1:desc}\n' + \
'\t<label class="toggle ' + toggle_color + '">\n' + \
'\t\t<input type="checkbox">\n' + \
'\t\t<div class="track">\n' + \
'\t\t\t<div class="handle"></div>\n' + \
'\t\t</div>\n' + \
'\t</label>\n' + \
'</li>\n'
snippet_str = create_snippet_string('ionic-toggle-' + toggle_color[7:], toggle_str,
'text.html', toggle_desc[idx])
output_file = open(
output_folder + 'ionic-toggle-' + toggle_color[7:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if checkbox:
output_folder = current_path + '/Checkbox/'
css_checkboxs = ['checkbox-light', 'checkbox-stable', 'checkbox-positive',
'checkbox-calm', 'checkbox-balanced', 'checkbox-energized',
'checkbox-assertive', 'checkbox-royal', 'checkbox-dark']
checkbox_desc = ['Ionic Light Checkbox', 'Ionic Stable Checkbox', 'Ionic Positive Checkbox',
'Ionic Calm Checkbox', 'Ionic Balanced Checkbox', 'Ionic Energized Checkbox',
'Ionic Assertive Checkbox', 'Ionic Royal Checkbox', 'Ionic Dark Checkbox']
for idx, checkbox_color in enumerate(css_checkboxs):
checkbox_str = '<li class="item item-checkbox">\n' + \
'\t<label class="checkbox ' + checkbox_color + '">\n' + \
'\t\t<input type="checkbox">\n' + \
'\t</label>\n' + \
'\t${1:desc}\n' + \
'</li>\n'
snippet_str = create_snippet_string('ionic-checkbox-' + checkbox_color[9:], checkbox_str,
'text.html', checkbox_desc[idx])
output_file = open(
output_folder + 'ionic-checkbox-' + checkbox_color[9:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if radio_button:
output_folder = current_path + '/Radio Buttons/'
radio_str = '<label class="item item-radio">\n' + \
'\t<input type="radio" name="group">\n' + \
'\t<div class="item-content">\n' + \
'\t\t${1:desc}\n' + \
'\t</div>\n' + \
'\t<i class="radio-icon ${2:ion-checkmark}"></i>\n' + \
'</label>\n'
snippet_str = create_snippet_string(
'ionic-radio-button', radio_str, 'text.html', 'Ionic Radio Button')
output_file = open(
output_folder + 'ionic-radio-button.sublime-snippet', 'w+')
output_file.write(snippet_str)
output_file.close()
if range_control:
output_folder = current_path + '/Range/'
css_ranges = ['range-light', 'range-stable', 'range-positive',
'range-calm', 'range-balanced', 'range-energized',
'range-assertive', 'range-royal', 'range-dark']
range_desc = ['Ionic Light Range', 'Ionic Stable Range', 'Ionic Positive Range',
'Ionic Calm Range', 'Ionic Balanced Range', 'Ionic Energized Range',
'Ionic Assertive Range', 'Ionic Royal Range', 'Ionic Dark Range']
range_desc_list = ['Ionic Light List Range', 'Ionic Stable List Range', 'Ionic Positive List Range',
'Ionic Calm List Range', 'Ionic Balanced List Range', 'Ionic Energized List Range',
'Ionic Assertive List Range', 'Ionic Royal List Range', 'Ionic Dark List Range']
for idx, range_color in enumerate(css_ranges):
range_str_default = '<div class="range ' + range_color + '">\n' + \
'\t<i class="icon ${1:ion-volume-low}"></i>\n' + \
'\t<input type="range" name="volume" min="0" max="100" value="33">\n' + \
'\t<i class="icon ${2:ion-volume-high}"></i>\n' + \
'</div>\n'
snippet_str = create_snippet_string('ionic-range-' + range_color[6:], range_str_default,
'text.html', range_desc[idx])
output_file = open(
output_folder + 'ionic-range-' + range_color[6:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
range_str_in_list = '<div class="item range ' + range_color + '">\n' + \
'\t<i class="icon ${1:ion-volume-low}"></i>\n' + \
'\t<input type="range" name="volume" min="0" max="100" value="33">\n' + \
'\t<i class="icon ${2:ion-volume-high}"></i>\n' + \
'</div>\n'
snippet_str = create_snippet_string('ionic-range-item-' + range_color[6:], range_str_in_list,
'text.html', range_desc_list[idx])
output_file = open(
output_folder + 'ionic-range-item-' + range_color[6:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if select_control:
output_folder = current_path + '/Select/'
css_selects = ['item-light', 'item-stable', 'item-positive',
'item-calm', 'item-balanced', 'item-energized',
'item-assertive', 'item-royal', 'item-dark']
select_desc = ['Ionic Light Select', 'Ionic Stable Select', 'Ionic Positive Select',
'Ionic Calm Select', 'Ionic Balanced Select', 'Ionic Energized Select',
'Ionic Assertive Select', 'Ionic Royal Select', 'Ionic Dark Select']
for idx, select_color in enumerate(css_selects):
select_str = '<label class="item item-input item-select ' + select_color + '">\n' + \
'\t<div class="input-label">\n' + \
'\t\t${1:desc}\n' + \
'\t</div>\n' + \
'\t<select>\n' + \
'\t\t<option selected>${2:opt1}</option>\n' + \
'\t\t<option>${3:opt2}</option>\n' + \
'\t<select>\n' + \
'</label>\n'
snippet_str = create_snippet_string('ionic-select-' + select_color[5:], select_str,
'text.html', select_desc[idx])
output_file = open(
output_folder + 'ionic-select-' + select_color[5:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if tabs:
output_folder = current_path + '/Tabs/'
css_tabs = ['tabs-light', 'tabs-stable', 'tabs-positive',
'tabs-calm', 'tabs-balanced', 'tabs-energized',
'tabs-assertive', 'tabs-royal', 'tabs-dark']
tabs_desc = ['Ionic Light Tabs', 'Ionic Stable Tabs', 'Ionic Positive Tabs',
'Ionic Calm Tabs', 'Ionic Balanced Tabs', 'Ionic Energized Tabs',
'Ionic Assertive Tabs', 'Ionic Royal Tabs', 'Ionic Dark Tabs']
for idx, tabs_color in enumerate(css_tabs):
tabs_str_default = '<div class="tabs ' + tabs_color + '">$0</div>\n'
snippet_str = create_snippet_string('ionic-tabs-' + tabs_color[5:], tabs_str_default,
'text.html', tabs_desc[idx])
output_file = open(
output_folder + 'ionic-tabs-' + tabs_color[5:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
tabs_str_icon_only = '<div class="tabs tabs-icon-only ' + tabs_color + '">$0</div>\n'
snippet_str = create_snippet_string('ionic-tabs-icon-only-' + tabs_color[5:], tabs_str_icon_only,
'text.html', tabs_desc[idx])
output_file = open(
output_folder + 'ionic-tabs-icon-only-' + tabs_color[5:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
tabs_str_icon_top = '<div class="tabs tabs-icon-top ' + tabs_color + '">$0</div>\n'
snippet_str = create_snippet_string('ionic-tabs-icon-top-' + tabs_color[5:], tabs_str_icon_top,
'text.html', tabs_desc[idx])
output_file = open(
output_folder + 'ionic-tabs-icon-top-' + tabs_color[5:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
tabs_str_icon_left = '<div class="tabs tabs-icon-left ' + tabs_color + '">$0</div>\n'
snippet_str = create_snippet_string('ionic-tabs-icon-left-' + tabs_color[5:], tabs_str_icon_left,
'text.html', tabs_desc[idx])
output_file = open(
output_folder + 'ionic-tabs-icon-left-' + tabs_color[5:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
tabs_str_striped = '<div class="tabs-striped tabs-color-' + tabs_color[5:] + '">$0</div>\n'
snippet_str = create_snippet_string('ionic-tabs-striped-' + tabs_color[5:], tabs_str_striped,
'text.html',tabs_desc[idx])
output_file = open(
output_folder + 'ionic-tabs-striped-' + tabs_color[5:] + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
methods = ['select(${1:index})', 'selectedIndex()']
for method in methods:
ionicTabsDelegate = '\$ionicTabsDelegate.' + method
path_name_file = output_folder + \
'ionicTabsDelegate.' + \
method.split('(')[0] + ".sublime-snippet"
tabTrigger = '$ionicTabsDelegate.' + method.split('(')[0]
content = ionicTabsDelegate
scope = 'source.js'
description = ' Ionic Tabs Delegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
# item tab
tabs_item_str_default = '<a class="tab-item" href="#">\n' + \
'\t${1:Tab}\n' + \
'</a>\n'
snippet_str = create_snippet_string('ionic-tabs-item', tabs_item_str_default,
'text.html', 'Ionic Tabs Item')
output_file = open(
output_folder + 'ionic-tabs-item' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
# item icon tab
tabs_item_str_icon = '<a class="tab-item" href="#">\n' + \
'\t<i class="icon ${1:ion-home}"></i>\n' + \
'\t$0\n' + \
'</a>\n'
snippet_str = create_snippet_string('ionic-tabs-item-icon', tabs_item_str_icon,
'text.html', 'Ionic Tabs Icon Item')
output_file = open(
output_folder + 'ionic-tabs-item-icon' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
ionicTabsDelegate = '\$ionicTabsDelegate.\$getByHandle(${1:handle})'
path_name_file = output_folder + \
'ionicTabsDelegate.getByHandle.sublime-snippet'
tabTrigger = '$ionicTabsDelegate.$getByHandle'
content = ionicTabsDelegate
scope = 'source.js'
description = ' Ionic Tabs Delegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
if actionSheet:
output_folder = current_path + '/Action Sheet/'
actionSheet_str = '\$ionicActionSheet.show({\n' + \
'\t\tbuttons: [\n' + \
'\t\t\t{ text: "${1:Button text 1}" },\n' + \
'\t\t\t{ text: "${2:Move}" }\n' + \
'\t\t],\n' + \
'\t\tdestructiveText: "${3:Delete}",\n' + \
'\t\ttitleText: "${4:Title}",\n' + \
'\t\tcancelText: "${5:Cancel}",\n' + \
'\t\tcancel: function() {\n' + \
'\t\t\t${6: //your code goes here}\n' + \
'\t\t},\n' + \
'\t\tbuttonClicked: function(index) {\n' + \
'\t\t\t${7: return true;}\n' + \
'\t\t}\n' + \
'});'
snippet_str = create_snippet_string(
'$ionicActionSheet.show', actionSheet_str, 'source.js', 'Ionic Action Sheet')
output_file = open(
output_folder + 'ionicActionSheet.show' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if backdrop:
output_folder = current_path + '/Backdrop/'
backdrop_retain = '\$ionicBackdrop.retain();'
backdrop_release = '\$ionicBackdrop.release();'
snippet_str = create_snippet_string(
'$ionicBackdrop.retain', backdrop_retain, 'source.js', 'Ionic Backdrop')
output_file = open(
output_folder + 'ionicBackdrop.retain' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
snippet_str = create_snippet_string(
'$ionicBackdrop.release', backdrop_release, 'source.js', 'Ionic Backdrop')
output_file = open(
output_folder + 'ionicBackdrop.release' + ".sublime-snippet", "w+")
output_file.write(snippet_str)
output_file.close()
if scrollDelegate:
output_folder = current_path + '/Content/'
service = '\$ionicScrollDelegate.'
resize = service + 'resize()'
scrollTop = service + 'scrollTop(${1:shouldAnimate})'
scrollBottom = service + 'scrollBottom(${1:shouldAnimate})'
scrollTo = service + 'scrollTo(${1:left}, ${2:top}, ${3:shouldAnimate})'
scrollBy = service + 'scrollBy(${1:left}, ${2:top}, ${3:shouldAnimate})'
zoomTo = service + \
'zoomTo(${1:level}, ${2:animate}, ${3:originLeft}, ${4:originTop})'
zoomBy = service + \
'zoomBy(${1:factor}, ${2:animate}, ${3:originLeft}, ${4:originTop})'
getScrollPosition = service + 'getScrollPosition()'
anchorScroll = service + 'anchorScroll(${1:shouldAnimate})'
getScrollView = service + 'getScrollView()'
getByHandle = service + '\$getByHandle(${1:handle})'
path_name_file = output_folder + \
'ionicScrollDelegate.resize' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.resize'
content = resize
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.scrollTop' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.scrollTop'
content = scrollTop
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.scrollBottom' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.scrollBottom'
content = scrollBottom
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.scrollTo' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.scrollTo'
content = scrollTo
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.scrollBy' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.scrollBy'
content = scrollBy
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.zoomTo' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.zoomTo'
content = zoomTo
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.zoomBy' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.zoomBy'
content = zoomBy
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.getScrollPosition' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.getScrollPosition'
content = getScrollPosition
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.anchorScroll' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.anchorScroll'
content = anchorScroll
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.getScrollView' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.getScrollView'
content = getScrollView
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
path_name_file = output_folder + \
'ionicScrollDelegate.getByHandle' + ".sublime-snippet"
tabTrigger = '$ionicScrollDelegate.getByHandle'
content = getByHandle
scope = 'source.js'
description = ' Ionic ScrollDelegate'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
if loading:
output_folder = current_path + '/Loading/'
ionicLoading_show = '\$ionicLoading.show({\n' + \
'\ttemplate: "${1:Loading...}"\n' + \
'});'
path_name_file = output_folder + 'ionicLoading.show' + ".sublime-snippet"
tabTrigger = '$ionicLoading.show'
content = ionicLoading_show
scope = 'source.js'
description = ' Ionic Loading Service'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
ionicLoading_hide = '\$ionicLoading.hide()'
path_name_file = output_folder + 'ionicLoading.hide' + ".sublime-snippet"
tabTrigger = '$ionicLoading.hide'
content = ionicLoading_hide
scope = 'source.js'
description = ' Ionic Loading Service'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
if modal:
output_folder = current_path + '/Modal/'
ionicModal_fromTemplateUrl = '\$ionicModal.fromTemplateUrl(${1:templateUrl}, ${2:options})'
path_name_file = output_folder + \
'ionicModal.fromTemplateUrl' + ".sublime-snippet"
tabTrigger = '$ionicModal.fromTemplateUrl'
content = ionicModal_fromTemplateUrl
scope = 'source.js'
description = ' Ionic Modal Service'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
ionicModal_fromTemplate = '\$ionicModal.fromTemplate(${1:templateString}, ${2:options})'
path_name_file = output_folder + \
'ionicModal.fromTemplate' + ".sublime-snippet"
tabTrigger = '$ionicModal.fromTemplate'
content = ionicModal_fromTemplate
scope = 'source.js'
description = ' Ionic Modal Service'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
if navigation:
output_folder = current_path + '/Navigation/'
# $ionicView
events = ['loaded', 'enter', 'leave', 'beforeEnter',
'beforeLeave', 'afterEnter', 'afterLeave', 'unloaded']
for event in events:
ionicView_event = '\$scope.\$on("\$ionicView.' + \
event + '", function () {\n$0\n});'
path_name_file = output_folder + \
'ionicView.' + event + ".sublime-snippet"
tabTrigger = '$ionicView.' + event
content = ionicView_event
scope = 'source.js'
description = ' Ionic View Event'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
# ionicNavBarDelegate
methods = ['align', 'showBackButton', 'showBar', 'title', 'back']
params = ['direction', 'show', 'show', 'title', '']
for idx, method in enumerate(methods):
ionicNavBarDelegate = '\$ionicNavBarDelegate.%s(${1:%s})' % (
method, params[idx])
path_name_file = output_folder + \
'ionicNavBarDelegate.' + method + ".sublime-snippet"
tabTrigger = '$ionicNavBarDelegate.' + method
content = ionicNavBarDelegate
scope = 'source.js'
description = ' Ionic Nav Methods'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
# ionicHistory
methods = ['viewHistory', 'currentView', 'currentHistoryId',
'currentTitle', 'backView', 'backTitle',
'forwardView', 'currentStateName', 'goBack',
'clearHistory', 'clearCache', 'nextViewOptions']
params = ['', '', '', '${1:val}', '', '', '', '', '', '', '',
'\n\tdisableAnimate: ${1:true},\n \tdisableBack: ${2:true}\n']
for idx, method in enumerate(methods):
ionicHistory = '\$ionicHistory.%s(%s)' % (method, params[idx])
path_name_file = output_folder + \
'ionicHistory.' + method + ".sublime-snippet"
tabTrigger = '$ionicHistory.' + method
content = ionicHistory
scope = 'source.js'
description = ' Ionic Hist View'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
if platform:
output_folder = current_path + '/Platform/'
methods = ['onHardwareBackButton(${1:callback})', 'offHardwareBackButton(${1:callback})',
'registerBackButtonAction(${1:callback}, ${2:priority})', 'on(${1:type}, ${2:callback})',
'ready()']
for method in methods:
platformDelegate = '\$ionicPlatform.' + method
path_name_file = output_folder + \
'ionicPlatform.' + method.split('(')[0] + ".sublime-snippet"
tabTrigger = '$ionicPlatform.' + method.split('(')[0]
content = platformDelegate
scope = 'source.js'
description = ' Ionic Platform Methods'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
if popover:
output_folder = current_path + '/Popover/'
methodsDelegate = ['fromTemplate(${1:templateString}, {\n \t${2:options}\n});',
'fromTemplateUrl(${1:templateUrl}, {\n \t${2:options}\n})']
for method in methodsDelegate:
popoverDelegate = '\$ionicPopover.' + method
path_name_file = output_folder + \
'ionicPopover.' + method.split('(')[0] + ".sublime-snippet"
tabTrigger = '$ionicPopover.' + method.split('(')[0]
content = popoverDelegate
scope = 'source.js'
description = ' Ionic Popover'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
if popup:
output_folder = current_path + '/Popup/'
show = 'show({\n' + \
'\ttemplate: $1,\n' + \
'\ttitle: $2,\n' + \
'\tsubTitle: $3,\n' + \
'\tscope: \$scope,\n' + \
'\tbuttons: [$4]\n' + \
'});'
confirm = 'confirm({\n' + \
'\ttitle: $1,\n' + \
'\ttemplate: $2\n' + \
'});'
alert = 'alert({\n' + \
'\ttitle: $1,\n' + \
'\ttemplate: $2\n' + \
'});'
prompt = 'prompt({\n' + \
'\ttitle: $1,\n' + \
'\ttemplate: $2,\n' + \
'\tinputType: $3,\n' + \
'\tinputPlaceholder: $4\n' + \
'})'
methods = [show, confirm, alert, prompt]
for method in methods:
popupDelegate = '\$ionicPopup.' + method
path_name_file = output_folder + \
'ionicPopup.' + method.split('(')[0] + ".sublime-snippet"
tabTrigger = '$ionicPopup.' + method.split('(')[0]
content = popupDelegate
scope = 'source.js'
description = ' Ionic Popup'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
if side_menu:
output_folder = current_path + '/Side Menu/'
methods = ['toggleLeft()', 'toggleRight()', 'getOpenRatio()',
'isOpen()', 'isOpenLeft()', 'isOpenRight()',
'canDragContent()', 'edgeDragThreshold(${1:value})']
for method in methods:
ionicSideMenuDelegate = '\$ionicSideMenuDelegate.' + method
path_name_file = output_folder + \
'ionicSideMenuDelegate.' + \
method.split('(')[0] + ".sublime-snippet"
tabTrigger = '$ionicSideMenuDelegate.' + method.split('(')[0]
content = ionicSideMenuDelegate
scope = 'source.js'
description = ' Ionic Side Menu'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
ionicSideMenuDelegate = '\$ionicSideMenuDelegate.\$getByHandle(${1:handle})'
path_name_file = output_folder + \
'ionicSideMenuDelegate.getByHandle.sublime-snippet'
tabTrigger = '$ionicSideMenuDelegate.$getByHandle'
content = ionicSideMenuDelegate
scope = 'source.js'
description = ' Ionic Side Menu'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
if utility:
output_folder = current_path + '/Utility/'
# ionicConfigProvider
methods = ['views.transition(${1:transition})', 'views.maxCache(${1:maxNumber})',
'views.forwardCache(${1:value})', 'backButton.icon(${1:value})',
'backButton.text(${1:value})', 'backButton.previousTitleText(${1:value})',
'tabs.style(${1:value})', 'tabs.position(${1:value})',
'templates.maxPrefetch(${1:value})', 'navBar.alignTitle(${1:value})',
'navBar.positionPrimaryButtons(${1:value})', 'navBar.positionSecondaryButtons(${1:value})']
for method in methods:
ionicConfigProvider = '\$ionicConfigProvider.' + method
path_name_file = output_folder + \
'ionicConfigProvider.' + \
method.split('(')[0] + ".sublime-snippet"
tabTrigger = '$ionicConfigProvider.' + method.split('(')[0]
content = ionicConfigProvider
scope = 'source.js'
description = ' Ionic ConfigProvider'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
# ionic.Platform
methods = ['ready(${1:callback})', 'setGrade(${1:grade})',
'device()', 'isWebView()', 'isIPad()', 'isIOS()', 'isAndroid()', 'isWindowsPhone()',
'platform()', 'version()', 'exitApp()', 'showStatusBar(${1:shouldShow})', 'fullScreen()',
'isReady', 'isFullScreen', 'platforms', 'grade']
for method in methods:
ionic_Platform = 'ionic.Platform.' + method
path_name_file = output_folder + \
'ionicPlatform.' + \
method.split('(')[0] + ".sublime-snippet"
tabTrigger = 'ionicPlatform.' + method.split('(')[0]
content = ionic_Platform
scope = 'source.js'
description = ' Ionic Platform'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
# ionic.DomUtil
methods = ['requestAnimationFrame(${1:callback})', 'animationFrameThrottle(${1:callback})',
'getPositionInParent(${1:element})', 'ready(${1:callback})',
'getTextBounds(${1:textNode})', 'getChildIndex(${1:element}, ${2:type})',
'getParentWithClass(${1:element}, ${2:className})', 'getParentOrSelfWithClass(${1:element}, ${2:className})',
'rectContains(${1:x}, ${2:y}, ${3:x1}, ${4:y1}, ${5:x2}, ${6:y2})', 'blurAll()']
for method in methods:
ionic_DomUtil = 'ionic.DomUtil.' + method
path_name_file = output_folder + \
'ionicDomUtil.' + \
method.split('(')[0] + ".sublime-snippet"
tabTrigger = 'ionicDomUtil.' + method.split('(')[0]
content = ionic_DomUtil
scope = 'source.js'
description = ' Ionic DomUtil'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
# ionic.EventController
methods = ['trigger(${1:eventType}, ${2:data})',
'on(${1:type}, ${2:callback}, ${3:element})',
'off(${1:type}, ${2:callback}, ${3:element})',
'onGesture(${1:eventType}, ${2:callback}, ${3:element})',
'offGesture(${1:eventType}, ${2:callback}, ${3:element})']
for method in methods:
ionic_EventController = 'ionic.EventController.' + method
path_name_file = output_folder + \
'ionicEventController.' + \
method.split('(')[0] + ".sublime-snippet"
tabTrigger = 'ionicEventController.' + method.split('(')[0]
content = ionic_EventController
scope = 'source.js'
description = ' Ionic EventController'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
# $ionicPosition
methods = ['position(${1:element})', 'offset(${1:element})']
for method in methods:
ionicPosition = '\$ionicPosition.' + method
path_name_file = output_folder + \
'ionicPosition.' + \
method.split('(')[0] + ".sublime-snippet"
tabTrigger = '$ionicPosition.' + method.split('(')[0]
content = ionicPosition
scope = 'source.js'
description = ' Ionic ConfigProvider'
create_snippet_file(
path_name_file, tabTrigger, content, scope, description)
|
|
#!/usr/bin/env python3
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads, builds (with instrumentation) and installs shared libraries."""
import argparse
import ast
import errno
import fcntl
import os
import platform
import glob
import re
import shlex
import shutil
import subprocess
import sys
SCRIPT_ABSOLUTE_PATH = os.path.dirname(os.path.abspath(__file__))
def unescape_flags(s):
"""Un-escapes build flags received from GYP.
GYP escapes build flags as if they are to be inserted directly into a command
line, wrapping each flag in double quotes. When flags are passed via
CFLAGS/LDFLAGS instead, double quotes must be dropped.
"""
if not s:
return ''
try:
return ' '.join(ast.literal_eval(s))
except (SyntaxError, ValueError):
return ' '.join(shlex.split(s))
def real_path(path_relative_to_gyp):
"""Returns the absolute path to a file.
GYP generates paths relative to the location of the .gyp file, which is one
level above the location of this script. This function converts them to
absolute paths.
"""
return os.path.realpath(os.path.join(SCRIPT_ABSOLUTE_PATH, '..',
path_relative_to_gyp))
class InstrumentedPackageBuilder(object):
"""Checks out and builds a single instrumented package."""
def __init__(self, args, clobber):
self._cc = args.cc
self._cxx = args.cxx
self._extra_configure_flags = unescape_flags(args.extra_configure_flags)
self._libdir = args.libdir
self._package = args.package
self._patches = [real_path(patch) for patch in (args.patch or [])]
self._pre_build = \
real_path(args.pre_build) if args.pre_build else None
self._verbose = args.verbose
self._clobber = clobber
self._working_dir = os.path.join(
real_path(args.intermediate_dir), self._package, '')
product_dir = real_path(args.product_dir)
self._destdir = os.path.join(
product_dir, 'instrumented_libraries')
self._source_archives_dir = os.path.join(
product_dir, 'instrumented_libraries', 'sources', self._package)
self._cflags = unescape_flags(args.cflags)
if args.sanitizer_ignorelist:
ignorelist_file = real_path(args.sanitizer_ignorelist)
self._cflags += ' -fsanitize-blacklist=%s' % ignorelist_file # nocheck
self._ldflags = unescape_flags(args.ldflags)
self.init_build_env()
# Initialized later.
self._source_dir = None
self._source_archives = None
def init_build_env(self):
self._build_env = os.environ.copy()
self._build_env['CC'] = self._cc
self._build_env['CXX'] = self._cxx
self._build_env['CFLAGS'] = self._cflags
self._build_env['CXXFLAGS'] = self._cflags
self._build_env['LDFLAGS'] = self._ldflags
# libappindicator1 needs this.
self._build_env['CSC'] = '/usr/bin/mono-csc'
def shell_call(self, command, env=None, cwd=None, ignore_ret_code=False):
"""Wrapper around subprocess.Popen().
Calls command with specific environment and verbosity using
subprocess.Popen().
"""
child = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env, shell=True, cwd=cwd)
stdout = child.communicate()[0].decode('utf-8')
if ignore_ret_code:
if self._verbose:
print(stdout)
return stdout
if self._verbose or child.returncode:
print(stdout)
if child.returncode:
raise Exception('Failed to run: %s' % command)
return stdout
def maybe_download_source(self):
"""Checks out the source code (if needed).
Checks out the source code for the package, if required (i.e. unless running
in no-clobber mode). Initializes self._source_dir and self._source_archives.
"""
get_fresh_source = self._clobber or not os.path.exists(self._working_dir)
if get_fresh_source:
shutil.rmtree(self._working_dir, ignore_errors=True)
os.makedirs(self._working_dir)
# Download one source package at a time, otherwise, there will
# be connection errors in gnutls_handshake().
lock = open('apt-source-lock', 'w')
fcntl.flock(lock, fcntl.LOCK_EX)
self.shell_call('apt-get source %s' % self._package,
cwd=self._working_dir)
fcntl.flock(lock, fcntl.LOCK_UN)
(dirpath, dirnames, filenames) = next(os.walk(self._working_dir))
if len(dirnames) != 1:
raise Exception(
'`apt-get source %s\' must create exactly one subdirectory.'
% self._package)
self._source_dir = os.path.join(dirpath, dirnames[0], '')
if len(filenames) == 0:
raise Exception('Can\'t find source archives after `apt-get source %s\'.'
% self._package)
self._source_archives = \
[os.path.join(dirpath, filename) for filename in filenames]
return get_fresh_source
def patch_source(self):
for patch in self._patches:
self.shell_call('patch -p1 -i %s' % patch, cwd=self._source_dir)
if self._pre_build:
self.shell_call(self._pre_build, cwd=self._source_dir)
def copy_source_archives(self):
"""Copies the downloaded source archives to the output dir.
For license compliance purposes, every Chromium build that includes
instrumented libraries must include their full source code.
"""
shutil.rmtree(self._source_archives_dir, ignore_errors=True)
os.makedirs(self._source_archives_dir)
for filename in self._source_archives:
shutil.copy(filename, self._source_archives_dir)
for patch in self._patches:
shutil.copy(patch, self._source_archives_dir)
def download_build_install(self):
got_fresh_source = self.maybe_download_source()
if got_fresh_source:
self.patch_source()
self.copy_source_archives()
if not os.path.exists(self.dest_libdir()):
os.makedirs(self.dest_libdir())
try:
self.build_and_install()
except Exception as exception:
print('ERROR: Failed to build package %s. Have you '
'run src/third_party/instrumented_libraries/scripts/'
'install-build-deps.sh?' % self._package)
raise
# Touch a text file to indicate package is installed.
stamp_file = os.path.join(self._destdir, '%s.txt' % self._package)
open(stamp_file, 'w').close()
# Remove downloaded package and generated temporary build files. Failed
# builds intentionally skip this step to help debug build failures.
if self._clobber:
self.shell_call('rm -rf %s' % self._working_dir)
def fix_rpaths(self, directory):
# TODO(eugenis): reimplement fix_rpaths.sh in Python.
script = real_path('scripts/fix_rpaths.sh')
self.shell_call("%s %s" % (script, directory))
def temp_dir(self):
"""Returns the directory which will be passed to `make install'."""
return os.path.join(self._source_dir, 'debian', 'instrumented_build')
def temp_libdir(self):
"""Returns the directory under temp_dir() containing the DSOs."""
return os.path.join(self.temp_dir(), self._libdir)
def dest_libdir(self):
"""Returns the final location of the DSOs."""
return os.path.join(self._destdir, self._libdir)
def cleanup_after_install(self):
"""Removes unneeded files in self.temp_libdir()."""
# .la files are not needed, nuke them.
# In case --no-static is not supported, nuke any static libraries we built.
self.shell_call(
'find %s -name *.la -or -name *.a | xargs rm -f' % self.temp_libdir())
# .pc files are not needed.
self.shell_call('rm %s/pkgconfig -rf' % self.temp_libdir())
def make(self, args, env=None, cwd=None, ignore_ret_code=False):
"""Invokes `make'.
Invokes `make' with the specified args, using self._build_env and
self._source_dir by default.
"""
if cwd is None:
cwd = self._source_dir
if env is None:
env = self._build_env
cmd = ['make'] + args
self.shell_call(' '.join(cmd), env=env, cwd=cwd,
ignore_ret_code=ignore_ret_code)
def make_install(self, args, **kwargs):
"""Invokes `make install'."""
self.make(['install'] + args, **kwargs)
def build_and_install(self):
"""Builds and installs the DSOs.
Builds the package with ./configure + make, installs it to a temporary
location, then moves the relevant files to their permanent location.
"""
configure_cmd = './configure --libdir=/%s/ %s' % (
self._libdir, self._extra_configure_flags)
self.shell_call(configure_cmd, env=self._build_env, cwd=self._source_dir)
# Some makefiles use BUILDROOT or INSTALL_ROOT instead of DESTDIR.
args = ['DESTDIR', 'BUILDROOT', 'INSTALL_ROOT']
make_args = ['%s=%s' % (name, self.temp_dir()) for name in args]
self.make(make_args)
self.make_install(make_args)
self.post_install()
def post_install(self):
self.cleanup_after_install()
self.fix_rpaths(self.temp_libdir())
# Now move the contents of the temporary destdir to their final place.
# We only care for the contents of LIBDIR.
self.shell_call('cp %s/* %s/ -rdf' % (self.temp_libdir(),
self.dest_libdir()))
class DebianBuilder(InstrumentedPackageBuilder):
"""Builds a package using Debian's build system.
TODO(spang): Probably the rest of the packages should also use this method..
"""
def init_build_env(self):
self._build_env = os.environ.copy()
self._build_env['CC'] = self._cc
self._build_env['CXX'] = self._cxx
self._build_env['DEB_CFLAGS_APPEND'] = self._cflags
self._build_env['DEB_CXXFLAGS_APPEND'] = self._cflags
self._build_env['DEB_LDFLAGS_APPEND'] = self._ldflags
self._build_env['DEB_BUILD_OPTIONS'] = \
'nocheck notest nodoc nostrip parallel=%d' % os.cpu_count()
def build_and_install(self):
self.build_debian_packages()
self.install_packaged_libs()
def build_debian_packages(self):
configure_cmd = 'dpkg-buildpackage -B -uc'
self.shell_call(configure_cmd, env=self._build_env, cwd=self._source_dir)
def install_packaged_libs(self):
for deb_file in self.get_deb_files():
self.shell_call("dpkg-deb -x %s %s" % (deb_file, self.temp_dir()))
dpkg_arch = self.shell_call("dpkg-architecture -qDEB_HOST_MULTIARCH").strip()
lib_dirs = [
"usr/lib/%s" % dpkg_arch,
"lib/%s" % dpkg_arch,
]
lib_paths = [path for lib_dir in lib_dirs for path in
glob.glob(os.path.join(self.temp_dir(), lib_dir, "*.so.*"))]
for lib_path in lib_paths:
dest_path = os.path.join(self.dest_libdir(), os.path.basename(lib_path))
try:
os.unlink(dest_path)
except OSError as exception:
if exception.errno != errno.ENOENT:
raise
if os.path.islink(lib_path):
if self._verbose:
print('linking %s' % os.path.basename(lib_path))
os.symlink(os.readlink(lib_path), dest_path)
elif os.path.isfile(lib_path):
if self._verbose:
print('copying %s' % os.path.basename(lib_path))
shutil.copy(lib_path, dest_path)
def get_deb_files(self):
deb_files = []
files_file = os.path.join(self._source_dir, 'debian/files')
for line in open(files_file, 'r').read().splitlines():
filename, category, section = line.split(' ')
pathname = os.path.join(self._source_dir, '..', filename)
deb_files.append(pathname)
return deb_files
class LibcurlBuilder(DebianBuilder):
def build_and_install(self):
DebianBuilder.build_and_install(self)
self.shell_call('ln -rsf %s/libcurl-gnutls.so.4 %s/libcurl.so' %
(self.dest_libdir(), self.dest_libdir()))
class LibcapBuilder(InstrumentedPackageBuilder):
def build_and_install(self):
# libcap2 doesn't have a configure script
build_args = ['CC', 'CXX', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS']
make_args = [
'%s="%s"' % (name, self._build_env[name]) for name in build_args
]
self.make(make_args)
install_args = [
'DESTDIR=%s' % self.temp_dir(),
'lib=%s' % self._libdir,
# Skip a step that requires sudo.
'RAISE_SETFCAP=no'
]
self.make_install(install_args)
self.cleanup_after_install()
self.fix_rpaths(self.temp_libdir())
# Now move the contents of the temporary destdir to their final place.
# We only care for the contents of LIBDIR.
self.shell_call('cp %s/* %s/ -rdf' % (self.temp_libdir(),
self.dest_libdir()))
class Libpci3Builder(InstrumentedPackageBuilder):
def package_version(self):
"""Guesses libpci3 version from source directory name."""
dir_name = os.path.split(os.path.normpath(self._source_dir))[-1]
match = re.match('pciutils-(\d+\.\d+\.\d+)', dir_name)
if match is None:
raise Exception(
'Unable to guess libpci3 version from directory name: %s' % dir_name)
return match.group(1)
def temp_libdir(self):
# DSOs have to be picked up from <source_dir>/lib, since `make install'
# doesn't actualy install them anywhere.
return os.path.join(self._source_dir, 'lib')
def build_and_install(self):
# pciutils doesn't have a configure script
# This build process follows debian/rules.
self.shell_call('mkdir -p %s-udeb/usr/bin' % self.temp_dir())
build_args = ['CC', 'CXX', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS']
make_args = [
'%s="%s"' % (name, self._build_env[name]) for name in build_args
]
make_args += [
'LIBDIR=/%s/' % self._libdir,
'PREFIX=/usr',
'SBINDIR=/usr/bin',
'IDSDIR=/usr/share/misc',
'SHARED=yes',
# pciutils fails to build due to unresolved libkmod symbols. The binary
# package has no dependencies on libkmod, so it looks like it was
# actually built without libkmod support.
'LIBKMOD=no',
]
self.make(make_args)
# `make install' is not needed.
self.fix_rpaths(self.temp_libdir())
# Now install the DSOs to their final place.
self.shell_call(
'install -m 644 %s/libpci.so* %s' % (self.temp_libdir(),
self.dest_libdir()))
self.shell_call(
'ln -sf libpci.so.%s %s/libpci.so.3' % (self.package_version(),
self.dest_libdir()))
class MesonBuilder(InstrumentedPackageBuilder):
def build_and_install(self):
meson_flags = {
'prefix': '/usr',
'libdir': self._libdir,
'sbindir': 'bin',
}
meson_cmd = [
'meson',
'build',
'.',
' '.join('--%s %s' % item for item in meson_flags.items()),
self._extra_configure_flags,
]
self.shell_call(' '.join(meson_cmd),
env=self._build_env, cwd=self._source_dir)
self.shell_call('ninja -C build', cwd=self._source_dir)
self.shell_call('ninja -C build install',
{**self._build_env, 'DESTDIR': self.temp_dir()},
cwd=self._source_dir)
self.post_install()
# LIBDIR is always relative to the prefix (/usr), so that needs to be added
# unlike when using configure.
def temp_libdir(self):
return os.path.join(self.temp_dir(), 'usr', self._libdir)
class NSSBuilder(InstrumentedPackageBuilder):
def build_and_install(self):
# NSS uses a build system that's different from configure/make/install. All
# flags must be passed as arguments to make.
make_args = [
# Do an optimized build.
'BUILD_OPT=1',
# CFLAGS/CXXFLAGS should not be used, as doing so overrides the flags in
# the makefile completely. The only way to append our flags is to tack
# them onto CC/CXX.
'CC="%s %s"' % (self._build_env['CC'], self._build_env['CFLAGS']),
'CXX="%s %s"' % (self._build_env['CXX'], self._build_env['CXXFLAGS']),
# We need to override ZDEFS_FLAG at least to avoid -Wl,-z,defs, which
# is not compatible with sanitizers. We also need some way to pass
# LDFLAGS without overriding the defaults. Conveniently, ZDEF_FLAG is
# always appended to link flags when building NSS on Linux, so we can
# just add our LDFLAGS here.
'ZDEFS_FLAG="-Wl,-z,nodefs %s"' % self._build_env['LDFLAGS'],
'NSPR_INCLUDE_DIR=/usr/include/nspr',
'NSPR_LIB_DIR=%s' % self.dest_libdir(),
'NSS_ENABLE_ECC=1'
]
if platform.architecture()[0] == '64bit':
make_args.append('USE_64=1')
# Make sure we don't override the default flags in the makefile.
for variable in ['CFLAGS', 'CXXFLAGS', 'LDFLAGS']:
del self._build_env[variable]
# Hardcoded paths.
temp_dir = os.path.join(self._source_dir, 'nss')
temp_libdir = os.path.join(temp_dir, 'lib')
# The build happens in <source_dir>/nss. Building fails after all
# the required DSOs have been built, so ignore the error.
self.make(make_args, cwd=temp_dir, ignore_ret_code=True)
self.fix_rpaths(temp_libdir)
# 'make install' is not supported. Copy the DSOs manually.
for (dirpath, dirnames, filenames) in os.walk(temp_libdir):
for filename in filenames:
if filename.endswith('.so'):
full_path = os.path.join(dirpath, filename)
if self._verbose:
print('download_build_install.py: installing ' + full_path)
shutil.copy(full_path, self.dest_libdir())
class StubBuilder(InstrumentedPackageBuilder):
def download_build_install(self):
self._touch(os.path.join(self._destdir, '%s.txt' % self._package))
self.shell_call('mkdir -p %s' % self.dest_libdir())
self._touch(os.path.join(self.dest_libdir(), '%s.so.0' % self._package))
def _touch(self, path):
with open(path, 'w'):
pass
def main():
parser = argparse.ArgumentParser(
description='Download, build and install an instrumented package.')
parser.add_argument('-p', '--package', required=True)
parser.add_argument(
'-i', '--product-dir', default='.',
help='Relative path to the directory with chrome binaries')
parser.add_argument(
'-m', '--intermediate-dir', default='.',
help='Relative path to the directory for temporary build files')
parser.add_argument('--extra-configure-flags', default='')
parser.add_argument('--cflags', default='')
parser.add_argument('--ldflags', default='')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--cc')
parser.add_argument('--cxx')
parser.add_argument('--patch', nargs='*')
# This should be a shell script to run before building specific libraries.
# This will be run after applying the patches above.
parser.add_argument('--pre-build', default='')
parser.add_argument('--build-method', default='destdir')
parser.add_argument('--sanitizer-ignorelist', default='')
# The LIBDIR argument to configure/make.
parser.add_argument('--libdir', default='lib')
# Ignore all empty arguments because in several cases gyp passes them to the
# script, but ArgumentParser treats them as positional arguments instead of
# ignoring (and doesn't have such options).
args = parser.parse_args([arg for arg in sys.argv[1:] if len(arg) != 0])
# Clobber by default, unless the developer wants to hack on the package's
# source code.
clobber = \
(os.environ.get('INSTRUMENTED_LIBRARIES_NO_CLOBBER', '') != '1')
if args.build_method == 'destdir':
builder = InstrumentedPackageBuilder(args, clobber)
elif args.build_method == 'custom_nss':
builder = NSSBuilder(args, clobber)
elif args.build_method == 'custom_libcap':
builder = LibcapBuilder(args, clobber)
elif args.build_method == 'custom_libcurl':
builder = LibcurlBuilder(args, clobber)
elif args.build_method == 'custom_libpci3':
builder = Libpci3Builder(args, clobber)
elif args.build_method == 'debian':
builder = DebianBuilder(args, clobber)
elif args.build_method == 'meson':
builder = MesonBuilder(args, clobber)
elif args.build_method == 'stub':
builder = StubBuilder(args, clobber)
else:
raise Exception('Unrecognized build method: %s' % args.build_method)
builder.download_build_install()
if __name__ == '__main__':
main()
|
|
from __future__ import unicode_literals
from __future__ import absolute_import
from collections import namedtuple
import logging
import re
import os
import sys
from operator import attrgetter
import six
from docker.errors import APIError
from docker.utils import create_host_config, LogConfig
from docker.utils.ports import build_port_bindings, split_port
from . import __version__
from .config import DOCKER_CONFIG_KEYS, merge_environment
from .const import (
DEFAULT_TIMEOUT,
LABEL_CONTAINER_NUMBER,
LABEL_ONE_OFF,
LABEL_PROJECT,
LABEL_SERVICE,
LABEL_VERSION,
LABEL_CONFIG_HASH,
)
from .container import Container
from .legacy import check_for_legacy_containers
from .progress_stream import stream_output, StreamOutputError
from .utils import json_hash, parallel_execute
from .config.validation import VALID_NAME_CHARS
log = logging.getLogger(__name__)
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'devices',
'dns',
'dns_search',
'env_file',
'extra_hosts',
'read_only',
'net',
'log_driver',
'log_opt',
'mem_limit',
'memswap_limit',
'pid',
'privileged',
'restart',
'volumes_from',
'security_opt',
]
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class ConfigError(ValueError):
pass
class NeedsBuildError(Exception):
def __init__(self, service):
self.service = service
class NoSuchImageError(Exception):
pass
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
ServiceName = namedtuple('ServiceName', 'project service number')
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
class Service(object):
def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
self.name = name
self.client = client
self.project = project
self.links = links or []
self.external_links = external_links or []
self.volumes_from = volumes_from or []
self.net = net or None
self.options = options
def containers(self, stopped=False, one_off=False):
containers = filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})])
if not containers:
check_for_legacy_containers(
self.client,
self.project,
[self.name],
)
return containers
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
for container in self.client.containers(filters={'label': labels}):
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
# TODO: remove these functions, project takes care of starting/stopping,
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s..." % c.name)
c.stop(**options)
def kill(self, **options):
for c in self.containers():
log.info("Killing %s..." % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s..." % c.name)
c.restart(**options)
# end TODO
def scale(self, desired_num, timeout=DEFAULT_TIMEOUT):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if self.custom_container_name() and desired_num > 1:
log.warn('The "%s" service is using the custom container name "%s". '
'Docker requires each container to have a unique name. '
'Remove the custom name to scale the service.'
% (self.name, self.custom_container_name()))
if self.specifies_host_port():
log.warn('The "%s" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'
% self.name)
def create_and_start(service, number):
container = service.create_container(number=number, quiet=True)
container.start()
return container
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
if desired_num == num_running:
# do nothing as we already have the desired number
log.info('Desired container number already achieved')
return
if desired_num > num_running:
# we need to start/create until we have desired_num
all_containers = self.containers(stopped=True)
if num_running != len(all_containers):
# we have some stopped containers, let's start them up again
stopped_containers = sorted([c for c in all_containers if not c.is_running], key=attrgetter('number'))
num_stopped = len(stopped_containers)
if num_stopped + num_running > desired_num:
num_to_start = desired_num - num_running
containers_to_start = stopped_containers[:num_to_start]
else:
containers_to_start = stopped_containers
parallel_execute(
objects=containers_to_start,
obj_callable=lambda c: c.start(),
msg_index=lambda c: c.name,
msg="Starting"
)
num_running += len(containers_to_start)
num_to_create = desired_num - num_running
next_number = self._next_container_number()
container_numbers = [
number for number in range(
next_number, next_number + num_to_create
)
]
parallel_execute(
objects=container_numbers,
obj_callable=lambda n: create_and_start(service=self, number=n),
msg_index=lambda n: n,
msg="Creating and starting"
)
if desired_num < num_running:
num_to_stop = num_running - desired_num
sorted_running_containers = sorted(running_containers, key=attrgetter('number'))
containers_to_stop = sorted_running_containers[-num_to_stop:]
parallel_execute(
objects=containers_to_stop,
obj_callable=lambda c: c.stop(timeout=timeout),
msg_index=lambda c: c.name,
msg="Stopping"
)
self.remove_stopped()
def remove_stopped(self, **options):
containers = [c for c in self.containers(stopped=True) if not c.is_running]
parallel_execute(
objects=containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def create_container(self,
one_off=False,
do_build=True,
previous_container=None,
number=None,
quiet=False,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
self.ensure_image_exists(
do_build=do_build,
)
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if 'name' in container_options and not quiet:
log.info("Creating %s..." % container_options['name'])
return Container.create(self.client, **container_options)
def ensure_image_exists(self,
do_build=True):
try:
self.image()
return
except NoSuchImageError:
pass
if self.can_be_built():
if do_build:
self.build()
else:
raise NeedsBuildError(self)
else:
self.pull()
def image(self):
try:
return self.client.inspect_image(self.image_name)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
raise NoSuchImageError("Image '{}' not found".format(self.image_name))
else:
raise
@property
def image_name(self):
if self.can_be_built():
return self.full_name
else:
return self.options['image']
def convergence_plan(self,
allow_recreate=True,
force_recreate=False):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
containers = self.containers(stopped=True)
if not containers:
return ConvergencePlan('create', [])
if not allow_recreate:
return ConvergencePlan('start', containers)
if force_recreate or self._containers_have_diverged(containers):
return ConvergencePlan('recreate', containers)
stopped = [c for c in containers if not c.is_running]
if stopped:
return ConvergencePlan('start', stopped)
return ConvergencePlan('noop', containers)
def _containers_have_diverged(self, containers):
config_hash = None
try:
config_hash = self.config_hash()
except NoSuchImageError as e:
log.debug(
'Service %s has diverged: %s',
self.name, six.text_type(e),
)
return True
has_diverged = False
for c in containers:
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
'%s has diverged: %s != %s',
c.name, container_config_hash, config_hash,
)
has_diverged = True
return has_diverged
def execute_convergence_plan(self,
plan,
do_build=True,
timeout=DEFAULT_TIMEOUT):
(action, containers) = plan
if action == 'create':
container = self.create_container(
do_build=do_build,
)
self.start_container(container)
return [container]
elif action == 'recreate':
return [
self.recreate_container(
c,
timeout=timeout
)
for c in containers
]
elif action == 'start':
for c in containers:
self.start_container_if_stopped(c)
return containers
elif action == 'noop':
for c in containers:
log.info("%s is up-to-date" % c.name)
return containers
else:
raise Exception("Invalid action: {}".format(action))
def recreate_container(self,
container,
timeout=DEFAULT_TIMEOUT):
"""Recreate a container.
The original container is renamed to a temporary name so that data
volumes can be copied to the new container, before the original
container is removed.
"""
log.info("Recreating %s..." % container.name)
try:
container.stop(timeout=timeout)
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
# Use a hopefully unique container name by prepending the short id
self.client.rename(
container.id,
'%s_%s' % (container.short_id, container.name))
new_container = self.create_container(
do_build=False,
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
self.start_container(new_container)
container.remove()
return new_container
def start_container_if_stopped(self, container):
if container.is_running:
return container
else:
log.info("Starting %s..." % container.name)
return self.start_container(container)
def start_container(self, container):
container.start()
return container
def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
for c in self.duplicate_containers():
log.info('Removing %s...' % c.name)
c.stop(timeout=timeout)
c.remove()
def duplicate_containers(self):
containers = sorted(
self.containers(stopped=True),
key=lambda c: c.get('Created'),
)
numbers = set()
for c in containers:
if c.number in numbers:
yield c
else:
numbers.add(c.number)
def config_hash(self):
return json_hash(self.config_dict())
def config_dict(self):
return {
'options': self.options,
'image_id': self.image()['Id'],
}
def get_dependency_names(self):
net_name = self.get_net_name()
return (self.get_linked_names() +
self.get_volumes_from_names() +
([net_name] if net_name else []))
def get_linked_names(self):
return [s.name for (s, _) in self.links]
def get_volumes_from_names(self):
return [s.name for s in self.volumes_from if isinstance(s, Service)]
def get_net_name(self):
if isinstance(self.net, Service):
return self.net.name
else:
return
def get_container_name(self, number, one_off=False):
# TODO: Implement issue #652 here
return build_container_name(self.project, self.name, number, one_off)
# TODO: this would benefit from github.com/docker/docker/pull/11943
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
containers = filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=True,
filters={'label': self.labels(one_off=one_off)})
])
numbers = [c.number for c in containers]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.external_links:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self):
volumes_from = []
for volume_source in self.volumes_from:
if isinstance(volume_source, Service):
containers = volume_source.containers(stopped=True)
if not containers:
volumes_from.append(volume_source.create_container().id)
else:
volumes_from.extend(map(attrgetter('id'), containers))
elif isinstance(volume_source, Container):
volumes_from.append(volume_source.id)
return volumes_from
def _get_net(self):
if not self.net:
return None
if isinstance(self.net, Service):
containers = self.net.containers()
if len(containers) > 0:
net = 'container:' + containers[0].id
else:
log.warning("Warning: Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.net.name))
net = None
elif isinstance(self.net, Container):
net = 'container:' + self.net.id
else:
net = self.net
return net
def _get_container_create_options(
self,
override_options,
number,
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
if self.custom_container_name() and not one_off:
container_options['name'] = self.custom_container_name()
else:
container_options['name'] = self.get_container_name(number, one_off)
if add_config_hash:
config_hash = self.config_hash()
if 'labels' not in container_options:
container_options['labels'] = {}
container_options['labels'][LABEL_CONFIG_HASH] = config_hash
log.debug("Added config hash: %s" % config_hash)
if 'detach' not in container_options:
container_options['detach'] = True
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port_range in all_ports:
internal_range, _ = split_port(port_range)
for port in internal_range:
port = str(port)
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
override_options['binds'] = merge_volume_bindings(
container_options.get('volumes') or [],
previous_container)
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
container_options['environment'] = merge_environment(
self.options.get('environment'),
override_options.get('environment'))
if previous_container:
container_options['environment']['affinity:container'] = ('=' + previous_container.id)
container_options['image'] = self.image_name
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
self.labels(one_off=one_off),
number)
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(
override_options,
one_off=one_off)
return container_options
def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
log_config = LogConfig(
type=options.get('log_driver', 'json-file'),
config=options.get('log_opt', None)
)
pid = options.get('pid', None)
security_opt = options.get('security_opt', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
read_only = options.get('read_only', None)
devices = options.get('devices', None)
return create_host_config(
links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings,
binds=options.get('binds'),
volumes_from=self._get_volumes_from(),
privileged=privileged,
network_mode=self._get_net(),
devices=devices,
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
mem_limit=options.get('mem_limit'),
memswap_limit=options.get('memswap_limit'),
log_config=log_config,
extra_hosts=extra_hosts,
read_only=read_only,
pid_mode=pid,
security_opt=security_opt
)
def build(self, no_cache=False):
log.info('Building %s...' % self.name)
path = six.binary_type(self.options['build'])
build_output = self.client.build(
path=path,
tag=self.image_name,
stream=True,
rm=True,
pull=False,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError as e:
raise BuildError(self, unicode(e))
# Ensure the HTTP connection is not reused for another
# streaming command, as the Docker daemon can sometimes
# complain about it
self.client.close()
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.project),
'{0}={1}'.format(LABEL_SERVICE, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
]
def custom_container_name(self):
return self.options.get('container_name')
def specifies_host_port(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return True
return False
def pull(self):
if 'image' not in self.options:
return
repo, tag = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pulling %s (%s:%s)...' % (self.name, repo, tag))
output = self.client.pull(
repo,
tag=tag,
stream=True,
)
stream_output(output, sys.stdout)
# Names
def build_container_name(project, service, number, one_off=False):
bits = [project, service]
if one_off:
bits.append('run')
return '_'.join(bits + [str(number)])
# Images
def parse_repository_tag(s):
if ":" not in s:
return s, ""
repo, tag = s.rsplit(":", 1)
if "/" in tag:
return s, ""
return repo, tag
# Volumes
def merge_volume_bindings(volumes_option, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in volumes_option or []
if ':' in volume)
if previous_container:
volume_bindings.update(
get_container_data_volumes(previous_container, volumes_option))
return volume_bindings.values()
def get_container_data_volumes(container, volumes_option):
"""Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
"""
volumes = []
volumes_option = volumes_option or []
container_volumes = container.get('Volumes') or {}
image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
for volume in set(volumes_option + image_volumes.keys()):
volume = parse_volume_spec(volume)
# No need to preserve host volumes
if volume.external:
continue
volume_path = container_volumes.get(volume.internal)
# New volume, doesn't exist in the old container
if not volume_path:
continue
# Copy existing volume from old container
volume = volume._replace(external=volume_path)
volumes.append(build_volume_binding(volume))
return dict(volumes)
def build_volume_binding(volume_spec):
return volume_spec.internal, "{}:{}:{}".format(*volume_spec)
def parse_volume_spec(volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external = None
internal = os.path.normpath(parts[0])
else:
external = os.path.normpath(parts[0])
internal = os.path.normpath(parts[1])
mode = parts[2] if len(parts) == 3 else 'rw'
return VolumeSpec(external, internal, mode)
# Labels
def build_container_labels(label_options, service_labels, number, one_off=False):
labels = label_options or {}
labels.update(label.split('=', 1) for label in service_labels)
labels[LABEL_CONTAINER_NUMBER] = str(number)
labels[LABEL_VERSION] = __version__
return labels
# Restart policy
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Extra hosts
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
|
|
import traceback
import pyglet
from geoplotlib.core import GeoplotlibApp
class AppConfig:
def __init__(self):
self.reset()
def reset(self):
self.layers = []
self.bbox = None
self.savefig = None
self.tiles_provider = 'mapquest'
self.smoothing = False
self.map_alpha = 196
screen = pyglet.canvas.get_display().get_default_screen()
self.screen_w = int(screen.width * .9)
self.screen_h = int(screen.height * .9)
_global_config = AppConfig()
def _runapp(app_config):
app = GeoplotlibApp(app_config)
try:
app.start()
except:
traceback.print_exc()
finally:
app.close()
_global_config.reset()
def show():
"""Launch geoplotlib"""
_runapp(_global_config)
def savefig(fname):
"""Launch geoplotlib, saves a screeshot and terminates"""
_global_config.savefig = fname
_runapp(_global_config)
def inline(width=900):
"""display the map inline in ipython
:param width: image width for the browser
"""
from IPython.display import Image, HTML, display, clear_output
import random
import string
import urllib
import os
while True:
fname = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(32))
if not os.path.isfile(fname + '.png'):
break
savefig(fname)
if os.path.isfile(fname + '.png'):
with open(fname + '.png', 'rb') as fin:
base64 = urllib.quote(fin.read().encode("base64"))
image_html = "<img style='width: %dpx; margin: 0px; float: left; border: 1px solid black;' src='data:image/png;base64,%s' />" % (width, base64)
display(HTML(image_html))
os.remove(fname + '.png')
def dot(data, color=None, point_size=2, f_tooltip=None):
"""Create a dot density map
:param data: data access object
:param color: color
:param point_size: point size
:param f_tooltip: function to return a tooltip string for a point
"""
from geoplotlib.layers import DotDensityLayer
_global_config.layers.append(DotDensityLayer(data, color=color, point_size=point_size, f_tooltip=f_tooltip))
def scatter(data, color=None, point_size=2, f_tooltip=None):
"""Deprecated: use dot
"""
import warnings
warnings.warn("deprecated, use geoplotlib.dot", DeprecationWarning)
dot(data, color, point_size, f_tooltip)
def hist(data, cmap='hot', alpha=220, colorscale='sqrt', binsize=16, show_tooltip=False,
scalemin=0, scalemax=None, f_group=None):
"""Create a 2D histogram
:param data: data access object
:param cmap: colormap name
:param alpha: color alpha
:param colorscale: scaling [lin, log, sqrt]
:param binsize: size of the hist bins
:param show_tooltip: if True, will show the value of bins on mouseover
:param scalemin: min value for displaying a bin
:param scalemax: max value for a bin
:param f_group: function to apply to samples in the same bin. Default is to count
"""
from geoplotlib.layers import HistogramLayer
_global_config.layers.append(HistogramLayer(data, cmap=cmap, alpha=alpha, colorscale=colorscale,
binsize=binsize, show_tooltip=show_tooltip, scalemin=scalemin,
scalemax=scalemax, f_group=f_group))
def graph(data, src_lat, src_lon, dest_lat, dest_lon, linewidth=1, alpha=220, color='hot'):
"""Create a graph drawing a line between each pair of (src_lat, src_lon) and (dest_lat, dest_lon)
:param data: data access object
:param src_lat: field name of source latitude
:param src_lon: field name of source longitude
:param dest_lat: field name of destination latitude
:param dest_lon: field name of destination longitude
:param linewidth: line width
:param alpha: color alpha
:param color: color or colormap
"""
from geoplotlib.layers import GraphLayer
_global_config.layers.append(GraphLayer(data, src_lat, src_lon, dest_lat, dest_lon, linewidth, alpha, color))
def shapefiles(fname, f_tooltip=None, color=None, linewidth=3, shape_type='full'):
"""
Load and draws shapefiles
:param fname: full path to the shapefile
:param f_tooltip: function to generate a tooltip on mouseover
:param color: color
:param linewidth: line width
:param shape_type: either full or bbox
"""
from geoplotlib.layers import ShapefileLayer
_global_config.layers.append(ShapefileLayer(fname, f_tooltip, color, linewidth, shape_type))
def voronoi(data, line_color=None, line_width=2, f_tooltip=None, cmap=None, max_area=1e4, alpha=220):
"""
Draw the voronoi tesselation of the points
:param data: data access object
:param line_color: line color
:param line_width: line width
:param f_tooltip: function to generate a tooltip on mouseover
:param cmap: color map
:param max_area: scaling constant to determine the color of the voronoi areas
:param alpha: color alpha
"""
from geoplotlib.layers import VoronoiLayer
_global_config.layers.append(VoronoiLayer(data, line_color, line_width, f_tooltip, cmap, max_area, alpha))
def delaunay(data, line_color=None, line_width=2, cmap=None, max_lenght=100):
"""
Draw a delaunay triangulation of the points
:param data: data access object
:param line_color: line color
:param line_width: line width
:param cmap: color map
:param max_lenght: scaling constant for coloring the edges
"""
from geoplotlib.layers import DelaunayLayer
_global_config.layers.append(DelaunayLayer(data, line_color, line_width, cmap, max_lenght))
def convexhull(data, col, fill=True, point_size=4):
"""
Convex hull for a set of points
:param data: points
:param col: color
:param fill: whether to fill the convexhull polygon or not
:param point_size: size of the points on the convexhull. Points are not rendered if None
"""
from geoplotlib.layers import ConvexHullLayer
_global_config.layers.append(ConvexHullLayer(data, col, fill, point_size))
def kde(data, bw, cmap='hot', method='hist', scaling='sqrt', alpha=220,
cut_below=None, clip_above=None, binsize=1, cmap_levels=10):
"""
Kernel density estimation visualization
:param data: data access object
:param bw: kernel bandwidth (in screen coordinates)
:param cmap: colormap
:param method: if kde use KDEMultivariate from statsmodel, which provides a more accurate but much slower estimation.
If hist, estimates density applying gaussian smoothing on a 2D histogram, which is much faster but less accurate
:param scaling: colorscale, lin log or sqrt
:param alpha: color alpha
:param cut_below: densities below cut_below are not drawn
:param clip_above: defines the max value for the colorscale
:param binsize: size of the bins for hist estimator
:param cmap_levels: discretize colors into cmap_levels levels
"""
from geoplotlib.layers import KDELayer
_global_config.layers.append(KDELayer(data, bw, cmap, method, scaling, alpha,
cut_below, clip_above, binsize, cmap_levels))
def markers(data, marker, f_tooltip=None, marker_preferred_size=32):
"""
Draw markers
:param data: data access object
:param marker: full filename of the marker image
:param f_tooltip: function to generate a tooltip on mouseover
:param marker_preferred_size: size in pixel for the marker images
"""
from geoplotlib.layers import MarkersLayer
_global_config.layers.append(MarkersLayer(data, marker, f_tooltip, marker_preferred_size))
def geojson(filename, color='b', linewidth=1, fill=False, f_tooltip=None):
"""
Draw features described in geojson format (http://geojson.org/)
:param filename: filename of the geojson file
:param color: color for the shapes. If callable, it will be invoked for each feature, passing the properties element
:param linewidth: line width
:param fill: if fill=True the feature polygon is filled, otherwise just the border is rendered
:param f_tooltip: function to generate a tooltip on mouseover. It will be invoked for each feature, passing the properties element
"""
from geoplotlib.layers import GeoJSONLayer
_global_config.layers.append(GeoJSONLayer(filename, color=color, linewidth=linewidth, fill=fill, f_tooltip=f_tooltip))
def clear():
"""
Remove all existing layers
"""
_global_config.layers = []
def tiles_provider(tiles_provider):
"""
Set the tile provider
:param tiles_provider: either one of the built-in providers
['watercolor', 'toner', 'toner-lite', 'mapquest', 'darkmatter','positron']
or a custom provider in the form
{'url': lambda zoom, xtile, ytile: 'someurl' % (zoom, xtile, ytile),
'tiles_dir': 'mytiles',
'attribution': 'my attribution'
})
"""
_global_config.tiles_provider = tiles_provider
def add_layer(layer):
"""
Add a layer
:param layer: a BaseLayer object
"""
_global_config.layers.append(layer)
def set_bbox(bbox):
"""
Set the map bounding box
:param bbox: a BoundingBox object
"""
_global_config.bbox = bbox
def set_smoothing(smoothing):
"""
Enables OpenGL lines smoothing (antialiasing)
:param smoothing: smoothing enabled or disabled
"""
_global_config.smoothing = smoothing
def set_map_alpha(alpha):
"""
Alpha color of the map tiles
:param alpha: int between 0 and 255. 0 is completely dark, 255 is full brightness
"""
if alpha < 0 or alpha > 255:
raise Exception('invalid alpha ' + str(alpha))
_global_config.map_alpha = alpha
def set_window_size(w, h):
"""
Set the geoplotlib window size
:param w: window width
:param h: window height
"""
_global_config.screen_w = w
_global_config.screen_h = h
|
|
"""Contains the implementation of the X11 X Windows System color group."""
__all__ = (
'X11',
)
import enum
from colors import base
class X11(base.ColorGroup):
"""
The X11 color group.
In computing, on the X Window System, X11 color names are represented in a
simple text file, which maps certain strings to RGB color values. It is
shipped with every X11 installation, hence the name, and is usually located
in <X11root>/lib/X11/rgb.txt. The web colors list is descended from it but
differs for certain color names.
Color names are not standardized by Xlib or the X11 protocol. The list does
not show continuity either in selected color values or in color names, and
some color triplets have multiple names. Despite this, graphic designers
and others got used to them, making it practically impossible to introduce
a different list. In earlier releases of X11 (prior to the introduction of
Xcms), server implementors were encouraged to modify the RGB values in the
reference color database to account for gamma correction.
See Also:
`Wikipedia <https://en.wikipedia.org/wiki/X11color_names>`
"""
AliceBlue = base.RGBColor(240, 248, 255)
AntiqueWhite = base.RGBColor(250, 235, 215)
AntiqueWhite1 = base.RGBColor(255, 239, 219)
AntiqueWhite2 = base.RGBColor(238, 223, 204)
AntiqueWhite3 = base.RGBColor(205, 192, 176)
AntiqueWhite4 = base.RGBColor(139, 131, 120)
Aqua = base.RGBColor( 0, 255, 255)
Aqua1 = base.RGBColor( 0, 255, 255)
Aqua2 = base.RGBColor( 0, 238, 238)
Aqua3 = base.RGBColor( 0, 205, 205)
Aqua4 = base.RGBColor( 0, 139, 139)
Aquamarine = base.RGBColor(127, 255, 212)
Aquamarine1 = base.RGBColor(127, 255, 212)
Aquamarine2 = base.RGBColor(118, 238, 198)
Aquamarine3 = base.RGBColor(102, 205, 170)
Aquamarine4 = base.RGBColor( 69, 139, 116)
Azure = base.RGBColor(240, 255, 255)
Azure1 = base.RGBColor(240, 255, 255)
Azure2 = base.RGBColor(224, 238, 238)
Azure3 = base.RGBColor(193, 205, 205)
Azure4 = base.RGBColor(131, 139, 139)
Beige = base.RGBColor(245, 245, 220)
Bisque = base.RGBColor(255, 228, 196)
Bisque1 = base.RGBColor(255, 228, 196)
Bisque2 = base.RGBColor(238, 213, 183)
Bisque3 = base.RGBColor(205, 183, 158)
Bisque4 = base.RGBColor(139, 125, 107)
Black = base.RGBColor( 0, 0, 0)
BlanchedAlmond = base.RGBColor(255, 235, 205)
Blue = base.RGBColor( 0, 0, 255)
Blue1 = base.RGBColor( 0, 0, 255)
Blue2 = base.RGBColor( 0, 0, 238)
Blue3 = base.RGBColor( 0, 0, 205)
Blue4 = base.RGBColor( 0, 0, 139)
BlueViolet = base.RGBColor(138, 43, 226)
Brown = base.RGBColor(165, 42, 42)
Brown1 = base.RGBColor(255, 64, 64)
Brown2 = base.RGBColor(238, 59, 59)
Brown3 = base.RGBColor(205, 51, 51)
Brown4 = base.RGBColor(139, 35, 35)
Burlywood = base.RGBColor(222, 184, 135)
Burlywood1 = base.RGBColor(255, 211, 155)
Burlywood2 = base.RGBColor(238, 197, 145)
Burlywood3 = base.RGBColor(205, 170, 125)
Burlywood4 = base.RGBColor(139, 115, 85)
CadetBlue = base.RGBColor( 95, 158, 160)
CadetBlue1 = base.RGBColor(152, 245, 255)
CadetBlue2 = base.RGBColor(142, 229, 238)
CadetBlue3 = base.RGBColor(122, 197, 205)
CadetBlue4 = base.RGBColor( 83, 134, 139)
Chartreuse = base.RGBColor(127, 255, 0)
Chartreuse1 = base.RGBColor(127, 255, 0)
Chartreuse2 = base.RGBColor(118, 238, 0)
Chartreuse3 = base.RGBColor(102, 205, 0)
Chartreuse4 = base.RGBColor( 69, 139, 0)
Chocolate = base.RGBColor(210, 105, 30)
Chocolate1 = base.RGBColor(255, 127, 36)
Chocolate2 = base.RGBColor(238, 118, 33)
Chocolate3 = base.RGBColor(205, 102, 29)
Chocolate4 = base.RGBColor(139, 69, 19)
Coral = base.RGBColor(255, 127, 80)
Coral1 = base.RGBColor(255, 114, 86)
Coral2 = base.RGBColor(238, 106, 80)
Coral3 = base.RGBColor(205, 91, 69)
Coral4 = base.RGBColor(139, 62, 47)
Cornflower = base.RGBColor(100, 149, 237)
Cornsilk = base.RGBColor(255, 248, 220)
Cornsilk1 = base.RGBColor(255, 248, 220)
Cornsilk2 = base.RGBColor(238, 232, 205)
Cornsilk3 = base.RGBColor(205, 200, 177)
Cornsilk4 = base.RGBColor(139, 136, 120)
Crimson = base.RGBColor(220, 20, 60)
Cyan = base.RGBColor( 0, 255, 255)
Cyan1 = base.RGBColor( 0, 255, 255)
Cyan2 = base.RGBColor( 0, 238, 238)
Cyan3 = base.RGBColor( 0, 205, 205)
Cyan4 = base.RGBColor( 0, 139, 139)
DarkBlue = base.RGBColor( 0, 0, 139)
DarkCyan = base.RGBColor( 0, 139, 139)
DarkGoldenrod = base.RGBColor(184, 134, 11)
DarkGoldenrod1 = base.RGBColor(255, 185, 15)
DarkGoldenrod2 = base.RGBColor(238, 173, 14)
DarkGoldenrod3 = base.RGBColor(205, 149, 12)
DarkGoldenrod4 = base.RGBColor(139, 101, 8)
DarkGray = base.RGBColor(169, 169, 169)
DarkGrey = base.RGBColor(169, 169, 169)
DarkGreen = base.RGBColor( 0, 100, 0)
DarkKhaki = base.RGBColor(189, 183, 107)
DarkMagenta = base.RGBColor(139, 0, 139)
DarkOliveGreen = base.RGBColor( 85, 107, 47)
DarkOliveGreen1 = base.RGBColor(202, 255, 112)
DarkOliveGreen2 = base.RGBColor(188, 238, 104)
DarkOliveGreen3 = base.RGBColor(162, 205, 90)
DarkOliveGreen4 = base.RGBColor(110, 139, 61)
DarkOrange = base.RGBColor(255, 140, 0)
DarkOrange1 = base.RGBColor(255, 127, 0)
DarkOrange2 = base.RGBColor(238, 118, 0)
DarkOrange3 = base.RGBColor(205, 102, 0)
DarkOrange4 = base.RGBColor(139, 69, 0)
DarkOrchid = base.RGBColor(153, 50, 204)
DarkOrchid1 = base.RGBColor(191, 62, 255)
DarkOrchid2 = base.RGBColor(178, 58, 238)
DarkOrchid3 = base.RGBColor(154, 50, 205)
DarkOrchid4 = base.RGBColor(104, 34, 139)
DarkRed = base.RGBColor(139, 0, 0)
DarkSalmon = base.RGBColor(233, 150, 122)
DarkSeaGreen = base.RGBColor(143, 188, 143)
DarkSeaGreen1 = base.RGBColor(193, 255, 193)
DarkSeaGreen2 = base.RGBColor(180, 238, 180)
DarkSeaGreen3 = base.RGBColor(155, 205, 155)
DarkSeaGreen4 = base.RGBColor(105, 139, 105)
DarkSlateBlue = base.RGBColor( 72, 61, 139)
DarkSlateGray = base.RGBColor( 47, 79, 79)
DarkSlateGray1 = base.RGBColor(151, 255, 255)
DarkSlateGray2 = base.RGBColor(141, 238, 238)
DarkSlateGray3 = base.RGBColor(121, 205, 205)
DarkSlateGray4 = base.RGBColor( 82, 139, 139)
DarkSlateGrey = base.RGBColor( 47, 79, 79)
DarkSlateGrey1 = base.RGBColor(151, 255, 255)
DarkSlateGrey2 = base.RGBColor(141, 238, 238)
DarkSlateGrey3 = base.RGBColor(121, 205, 205)
DarkSlateGrey4 = base.RGBColor( 82, 139, 139)
DarkTurquoise = base.RGBColor( 0, 206, 209)
DarkViolet = base.RGBColor(148, 0, 211)
DeepPink = base.RGBColor(255, 20, 147)
DeepPink1 = base.RGBColor(255, 20, 147)
DeepPink2 = base.RGBColor(238, 18, 137)
DeepPink3 = base.RGBColor(205, 16, 118)
DeepPink4 = base.RGBColor(139, 10, 80)
DeepSkyBlue = base.RGBColor( 0, 191, 255)
DeepSkyBlue1 = base.RGBColor( 0, 191, 255)
DeepSkyBlue2 = base.RGBColor( 0, 178, 238)
DeepSkyBlue3 = base.RGBColor( 0, 154, 205)
DeepSkyBlue4 = base.RGBColor( 0, 104, 139)
DimGray = base.RGBColor(105, 105, 105)
DimGrey = base.RGBColor(105, 105, 105)
DodgerBlue = base.RGBColor( 30, 144, 255)
DodgerBlue1 = base.RGBColor( 30, 144, 255)
DodgerBlue2 = base.RGBColor( 28, 134, 238)
DodgerBlue3 = base.RGBColor( 24, 116, 205)
DodgerBlue4 = base.RGBColor( 16, 78, 139)
Firebrick = base.RGBColor(178, 34, 34)
Firebrick1 = base.RGBColor(255, 48, 48)
Firebrick2 = base.RGBColor(238, 44, 44)
Firebrick3 = base.RGBColor(205, 38, 38)
Firebrick4 = base.RGBColor(139, 26, 26)
FloralWhite = base.RGBColor(255, 250, 240)
ForestGreen = base.RGBColor( 34, 139, 34)
Fuchsia = base.RGBColor(255, 0, 255)
Fuchsia1 = base.RGBColor(255, 0, 255)
Fuchsia2 = base.RGBColor(238, 0, 238)
Fuchsia3 = base.RGBColor(205, 0, 205)
Fuchsia4 = base.RGBColor(139, 0, 139)
Gainsboro = base.RGBColor(220, 220, 220)
GhostWhite = base.RGBColor(248, 248, 255)
Gold = base.RGBColor(255, 215, 0)
Gold1 = base.RGBColor(255, 215, 0)
Gold2 = base.RGBColor(238, 201, 0)
Gold3 = base.RGBColor(205, 173, 0)
Gold4 = base.RGBColor(139, 117, 0)
Goldenrod = base.RGBColor(218, 165, 32)
Goldenrod1 = base.RGBColor(255, 193, 37)
Goldenrod2 = base.RGBColor(238, 180, 34)
Goldenrod3 = base.RGBColor(205, 155, 29)
Goldenrod4 = base.RGBColor(139, 105, 20)
Gray = base.RGBColor(190, 190, 190)
Grey = base.RGBColor(190, 190, 190)
X11Gray = base.RGBColor(190, 190, 190)
X11Grey = base.RGBColor(190, 190, 190)
WebGray = base.RGBColor(128, 128, 128)
WebGrey = base.RGBColor(128, 128, 128)
Green = base.RGBColor( 0, 255, 0)
Green1 = base.RGBColor( 0, 255, 0)
Green2 = base.RGBColor( 0, 238, 0)
Green3 = base.RGBColor( 0, 205, 0)
Green4 = base.RGBColor( 0, 139, 0)
X11Green = base.RGBColor( 0, 255, 0)
X11Green1 = base.RGBColor( 0, 255, 0)
X11Green2 = base.RGBColor( 0, 238, 0)
X11Green3 = base.RGBColor( 0, 205, 0)
X11Green4 = base.RGBColor( 0, 139, 0)
WebGreen = base.RGBColor( 0, 128, 0)
GreenYellow = base.RGBColor(173, 255, 47)
Honeydew = base.RGBColor(240, 255, 240)
Honeydew1 = base.RGBColor(240, 255, 240)
Honeydew2 = base.RGBColor(224, 238, 224)
Honeydew3 = base.RGBColor(193, 205, 193)
Honeydew4 = base.RGBColor(131, 139, 131)
HotPink = base.RGBColor(255, 105, 180)
HotPink1 = base.RGBColor(255, 110, 180)
HotPink2 = base.RGBColor(238, 106, 167)
HotPink3 = base.RGBColor(205, 96, 144)
HotPink4 = base.RGBColor(139, 58, 98)
IndianRed = base.RGBColor(205, 92, 92)
IndianRed1 = base.RGBColor(255, 106, 106)
IndianRed2 = base.RGBColor(238, 99, 99)
IndianRed3 = base.RGBColor(205, 85, 85)
IndianRed4 = base.RGBColor(139, 58, 58)
Indigo = base.RGBColor( 75, 0, 130)
Ivory = base.RGBColor(255, 255, 240)
Ivory1 = base.RGBColor(255, 255, 240)
Ivory2 = base.RGBColor(238, 238, 224)
Ivory3 = base.RGBColor(205, 205, 193)
Ivory4 = base.RGBColor(139, 139, 131)
Khaki = base.RGBColor(240, 230, 140)
Khaki1 = base.RGBColor(255, 246, 143)
Khaki2 = base.RGBColor(238, 230, 133)
Khaki3 = base.RGBColor(205, 198, 115)
Khaki4 = base.RGBColor(139, 134, 78)
Lavender = base.RGBColor(230, 230, 250)
LavenderBlush = base.RGBColor(255, 240, 245)
LavenderBlush1 = base.RGBColor(255, 240, 245)
LavenderBlush2 = base.RGBColor(238, 224, 229)
LavenderBlush3 = base.RGBColor(205, 193, 197)
LavenderBlush4 = base.RGBColor(139, 131, 134)
LawnGreen = base.RGBColor(124, 252, 0)
LemonChiffon = base.RGBColor(255, 250, 205)
LemonChiffon1 = base.RGBColor(255, 250, 205)
LemonChiffon2 = base.RGBColor(238, 233, 191)
LemonChiffon3 = base.RGBColor(205, 201, 165)
LemonChiffon4 = base.RGBColor(139, 137, 112)
LightBlue = base.RGBColor(173, 216, 230)
LightBlue1 = base.RGBColor(191, 239, 255)
LightBlue2 = base.RGBColor(178, 223, 238)
LightBlue3 = base.RGBColor(154, 192, 205)
LightBlue4 = base.RGBColor(104, 131, 139)
LightCoral = base.RGBColor(240, 128, 128)
LightCyan = base.RGBColor(224, 255, 255)
LightCyan1 = base.RGBColor(224, 255, 255)
LightCyan2 = base.RGBColor(209, 238, 238)
LightCyan3 = base.RGBColor(180, 205, 205)
LightCyan4 = base.RGBColor(122, 139, 139)
LightGoldenrod = base.RGBColor(250, 250, 210)
LightGoldenrod1 = base.RGBColor(255, 236, 139)
LightGoldenrod2 = base.RGBColor(238, 220, 130)
LightGoldenrod3 = base.RGBColor(205, 190, 112)
LightGoldenrod4 = base.RGBColor(139, 129, 76)
LightGray = base.RGBColor(211, 211, 211)
LightGrey = base.RGBColor(211, 211, 211)
LightGreen = base.RGBColor(144, 238, 144)
LightPink = base.RGBColor(255, 182, 193)
LightPink1 = base.RGBColor(255, 174, 185)
LightPink2 = base.RGBColor(238, 162, 173)
LightPink3 = base.RGBColor(205, 140, 149)
LightPink4 = base.RGBColor(139, 95, 101)
LightSalmon = base.RGBColor(255, 160, 122)
LightSalmon1 = base.RGBColor(255, 160, 122)
LightSalmon2 = base.RGBColor(238, 149, 114)
LightSalmon3 = base.RGBColor(205, 129, 98)
LightSalmon4 = base.RGBColor(139, 87, 66)
LightSeaGreen = base.RGBColor( 32, 178, 170)
LightSkyBlue = base.RGBColor(135, 206, 250)
LightSkyBlue1 = base.RGBColor(176, 226, 255)
LightSkyBlue2 = base.RGBColor(164, 211, 238)
LightSkyBlue3 = base.RGBColor(141, 182, 205)
LightSkyBlue4 = base.RGBColor( 96, 123, 139)
LightSlateGray = base.RGBColor(119, 136, 153)
LightSlateGrey = base.RGBColor(119, 136, 153)
LightSteelBlue = base.RGBColor(176, 196, 222)
LightSteelBlue1 = base.RGBColor(202, 225, 255)
LightSteelBlue2 = base.RGBColor(188, 210, 238)
LightSteelBlue3 = base.RGBColor(162, 181, 205)
LightSteelBlue4 = base.RGBColor(110, 123, 139)
LightYellow = base.RGBColor(255, 255, 224)
LightYellow1 = base.RGBColor(255, 255, 224)
LightYellow2 = base.RGBColor(238, 238, 209)
LightYellow3 = base.RGBColor(205, 205, 180)
LightYellow4 = base.RGBColor(139, 139, 122)
Lime = base.RGBColor( 0, 255, 0)
Lime1 = base.RGBColor( 0, 255, 0)
Lime2 = base.RGBColor( 0, 238, 0)
Lime3 = base.RGBColor( 0, 205, 0)
Lime4 = base.RGBColor( 0, 139, 0)
LimeGreen = base.RGBColor( 50, 205, 50)
Linen = base.RGBColor(250, 240, 230)
Magenta = base.RGBColor(255, 0, 255)
Magenta1 = base.RGBColor(255, 0, 255)
Magenta2 = base.RGBColor(238, 0, 238)
Magenta3 = base.RGBColor(205, 0, 205)
Magenta4 = base.RGBColor(139, 0, 139)
Maroon = base.RGBColor(176, 48, 96)
Maroon1 = base.RGBColor(255, 52, 179)
Maroon2 = base.RGBColor(238, 48, 167)
Maroon3 = base.RGBColor(205, 41, 144)
Maroon4 = base.RGBColor(139, 28, 98)
X11Maroon = base.RGBColor(176, 48, 96)
X11Maroon1 = base.RGBColor(255, 52, 179)
X11Maroon2 = base.RGBColor(238, 48, 167)
X11Maroon3 = base.RGBColor(205, 41, 144)
X11Maroon4 = base.RGBColor(139, 28, 98)
WebMaroon = base.RGBColor(127, 0, 0)
MediumAquamarine = base.RGBColor(102, 205, 170)
MediumBlue = base.RGBColor( 0, 0, 205)
MediumOrchid = base.RGBColor(186, 85, 211)
MediumOrchid1 = base.RGBColor(224, 102, 255)
MediumOrchid2 = base.RGBColor(209, 95, 238)
MediumOrchid3 = base.RGBColor(180, 82, 205)
MediumOrchid4 = base.RGBColor(122, 55, 139)
MediumPurple = base.RGBColor(147, 112, 219)
MediumPurple1 = base.RGBColor(171, 130, 255)
MediumPurple2 = base.RGBColor(159, 121, 238)
MediumPurple3 = base.RGBColor(137, 104, 205)
MediumPurple4 = base.RGBColor( 93, 71, 139)
MediumSeaGreen = base.RGBColor( 60, 179, 113)
MediumSlateBlue = base.RGBColor(123, 104, 238)
MediumSpringGreen = base.RGBColor( 0, 250, 154)
MediumTurquoise = base.RGBColor( 72, 209, 204)
MediumVioletRed = base.RGBColor(199, 21, 133)
MidnightBlue = base.RGBColor( 25, 25, 112)
MintCream = base.RGBColor(245, 255, 250)
MistyRose = base.RGBColor(255, 228, 225)
MistyRose1 = base.RGBColor(255, 228, 225)
MistyRose2 = base.RGBColor(238, 213, 210)
MistyRose3 = base.RGBColor(205, 183, 181)
MistyRose4 = base.RGBColor(139, 125, 123)
Moccasin = base.RGBColor(255, 228, 181)
NavajoWhite = base.RGBColor(255, 222, 173)
NavajoWhite1 = base.RGBColor(255, 222, 173)
NavajoWhite2 = base.RGBColor(238, 207, 161)
NavajoWhite3 = base.RGBColor(205, 179, 139)
NavajoWhite4 = base.RGBColor(139, 121, 94)
NavyBlue = base.RGBColor( 0, 0, 128)
Navy = base.RGBColor( 0, 0, 128)
OldLace = base.RGBColor(253, 245, 230)
Olive = base.RGBColor(128, 128, 0)
OliveDrab = base.RGBColor(107, 142, 35)
OliveDrab1 = base.RGBColor(192, 255, 62)
OliveDrab2 = base.RGBColor(179, 238, 58)
OliveDrab3 = base.RGBColor(154, 205, 50)
OliveDrab4 = base.RGBColor(105, 139, 34)
Orange = base.RGBColor(255, 165, 0)
Orange1 = base.RGBColor(255, 165, 0)
Orange2 = base.RGBColor(238, 154, 0)
Orange3 = base.RGBColor(205, 133, 0)
Orange4 = base.RGBColor(139, 90, 0)
OrangeRed = base.RGBColor(255, 69, 0)
OrangeRed1 = base.RGBColor(255, 69, 0)
OrangeRed2 = base.RGBColor(238, 64, 0)
OrangeRed3 = base.RGBColor(205, 55, 0)
OrangeRed4 = base.RGBColor(139, 37, 0)
Orchid = base.RGBColor(218, 112, 214)
Orchid1 = base.RGBColor(255, 131, 250)
Orchid2 = base.RGBColor(238, 122, 233)
Orchid3 = base.RGBColor(205, 105, 201)
Orchid4 = base.RGBColor(139, 71, 137)
PaleGoldenrod = base.RGBColor(238, 232, 170)
PaleGreen = base.RGBColor(152, 251, 152)
PaleGreen1 = base.RGBColor(154, 255, 154)
PaleGreen2 = base.RGBColor(144, 238, 144)
PaleGreen3 = base.RGBColor(124, 205, 124)
PaleGreen4 = base.RGBColor( 84, 139, 84)
PaleTurquoise = base.RGBColor(175, 238, 238)
PaleTurquoise1 = base.RGBColor(187, 255, 255)
PaleTurquoise2 = base.RGBColor(174, 238, 238)
PaleTurquoise3 = base.RGBColor(150, 205, 205)
PaleTurquoise4 = base.RGBColor(102, 139, 139)
PaleVioletRed = base.RGBColor(219, 112, 147)
PaleVioletRed1 = base.RGBColor(255, 130, 171)
PaleVioletRed2 = base.RGBColor(238, 121, 159)
PaleVioletRed3 = base.RGBColor(205, 104, 137)
PaleVioletRed4 = base.RGBColor(139, 71, 93)
PapayaWhip = base.RGBColor(255, 239, 213)
PeachPuff = base.RGBColor(255, 218, 185)
PeachPuff1 = base.RGBColor(255, 218, 185)
PeachPuff2 = base.RGBColor(238, 203, 173)
PeachPuff3 = base.RGBColor(205, 175, 149)
PeachPuff4 = base.RGBColor(139, 119, 101)
Peru = base.RGBColor(205, 133, 63)
Pink = base.RGBColor(255, 192, 203)
Pink1 = base.RGBColor(255, 181, 197)
Pink2 = base.RGBColor(238, 169, 184)
Pink3 = base.RGBColor(205, 145, 158)
Pink4 = base.RGBColor(139, 99, 108)
Plum = base.RGBColor(221, 160, 221)
Plum1 = base.RGBColor(255, 187, 255)
Plum2 = base.RGBColor(238, 174, 238)
Plum3 = base.RGBColor(205, 150, 205)
Plum4 = base.RGBColor(139, 102, 139)
PowderBlue = base.RGBColor(176, 224, 230)
Purple = base.RGBColor(160, 32, 240)
Purple1 = base.RGBColor(155, 48, 255)
Purple2 = base.RGBColor(145, 44, 238)
Purple3 = base.RGBColor(125, 38, 205)
Purple4 = base.RGBColor( 85, 26, 139)
X11Purple = base.RGBColor(160, 32, 240)
X11Purple1 = base.RGBColor(155, 48, 255)
X11Purple2 = base.RGBColor(145, 44, 238)
X11Purple3 = base.RGBColor(125, 38, 205)
X11Purple4 = base.RGBColor( 85, 26, 139)
WebPurple = base.RGBColor(127, 0, 127)
RebeccaPurple = base.RGBColor(102, 51, 153)
Red = base.RGBColor(255, 0, 0)
Red1 = base.RGBColor(255, 0, 0)
Red2 = base.RGBColor(238, 0, 0)
Red3 = base.RGBColor(205, 0, 0)
Red4 = base.RGBColor(139, 0, 0)
RosyBrown = base.RGBColor(188, 143, 143)
RosyBrown1 = base.RGBColor(255, 193, 193)
RosyBrown2 = base.RGBColor(238, 180, 180)
RosyBrown3 = base.RGBColor(205, 155, 155)
RosyBrown4 = base.RGBColor(139, 105, 105)
RoyalBlue = base.RGBColor( 65, 105, 225)
RoyalBlue1 = base.RGBColor( 72, 118, 255)
RoyalBlue2 = base.RGBColor( 67, 110, 238)
RoyalBlue3 = base.RGBColor( 58, 95, 205)
RoyalBlue4 = base.RGBColor( 39, 64, 139)
SaddleBrown = base.RGBColor(139, 69, 19)
Salmon = base.RGBColor(250, 128, 114)
Salmon1 = base.RGBColor(255, 140, 105)
Salmon2 = base.RGBColor(238, 130, 98)
Salmon3 = base.RGBColor(205, 112, 84)
Salmon4 = base.RGBColor(139, 76, 57)
SandyBrown = base.RGBColor(244, 164, 96)
SeaGreen = base.RGBColor( 46, 139, 87)
SeaGreen1 = base.RGBColor( 84, 255, 159)
SeaGreen2 = base.RGBColor( 78, 238, 148)
SeaGreen3 = base.RGBColor( 67, 205, 128)
SeaGreen4 = base.RGBColor( 46, 139, 87)
Seashell = base.RGBColor(255, 245, 238)
Seashell1 = base.RGBColor(255, 245, 238)
Seashell2 = base.RGBColor(238, 229, 222)
Seashell3 = base.RGBColor(205, 197, 191)
Seashell4 = base.RGBColor(139, 134, 130)
Sienna = base.RGBColor(160, 82, 45)
Sienna1 = base.RGBColor(255, 130, 71)
Sienna2 = base.RGBColor(238, 121, 66)
Sienna3 = base.RGBColor(205, 104, 57)
Sienna4 = base.RGBColor(139, 71, 38)
Silver = base.RGBColor(192, 192, 192)
SkyBlue = base.RGBColor(135, 206, 235)
SkyBlue1 = base.RGBColor(135, 206, 255)
SkyBlue2 = base.RGBColor(126, 192, 238)
SkyBlue3 = base.RGBColor(108, 166, 205)
SkyBlue4 = base.RGBColor( 74, 112, 139)
SlateBlue = base.RGBColor(106, 90, 205)
SlateBlue1 = base.RGBColor(131, 111, 255)
SlateBlue2 = base.RGBColor(122, 103, 238)
SlateBlue3 = base.RGBColor(105, 89, 205)
SlateBlue4 = base.RGBColor( 71, 60, 139)
SlateGray = base.RGBColor(112, 128, 144)
SlateGray1 = base.RGBColor(198, 226, 255)
SlateGray2 = base.RGBColor(185, 211, 238)
SlateGray3 = base.RGBColor(159, 182, 205)
SlateGray4 = base.RGBColor(108, 123, 139)
SlateGrey = base.RGBColor(112, 128, 144)
SlateGrey1 = base.RGBColor(198, 226, 255)
SlateGrey2 = base.RGBColor(185, 211, 238)
SlateGrey3 = base.RGBColor(159, 182, 205)
SlateGrey4 = base.RGBColor(108, 123, 139)
Snow = base.RGBColor(255, 250, 250)
Snow1 = base.RGBColor(255, 250, 250)
Snow2 = base.RGBColor(238, 233, 233)
Snow3 = base.RGBColor(205, 201, 201)
Snow4 = base.RGBColor(139, 137, 137)
SpringGreen = base.RGBColor( 0, 255, 127)
SpringGreen1 = base.RGBColor( 0, 255, 127)
SpringGreen2 = base.RGBColor( 0, 238, 118)
SpringGreen3 = base.RGBColor( 0, 205, 102)
SpringGreen4 = base.RGBColor( 0, 139, 69)
SteelBlue = base.RGBColor( 70, 130, 180)
SteelBlue1 = base.RGBColor( 99, 184, 255)
SteelBlue2 = base.RGBColor( 92, 172, 238)
SteelBlue3 = base.RGBColor( 79, 148, 205)
SteelBlue4 = base.RGBColor( 54, 100, 139)
Tan = base.RGBColor(210, 180, 140)
Tan1 = base.RGBColor(255, 165, 79)
Tan2 = base.RGBColor(238, 154, 73)
Tan3 = base.RGBColor(205, 133, 63)
Tan4 = base.RGBColor(139, 90, 43)
Teal = base.RGBColor( 0, 128, 128)
Thistle = base.RGBColor(216, 191, 216)
Thistle1 = base.RGBColor(255, 225, 255)
Thistle2 = base.RGBColor(238, 210, 238)
Thistle3 = base.RGBColor(205, 181, 205)
Thistle4 = base.RGBColor(139, 123, 139)
Tomato = base.RGBColor(255, 99, 71)
Tomato1 = base.RGBColor(255, 99, 71)
Tomato2 = base.RGBColor(238, 92, 66)
Tomato3 = base.RGBColor(205, 79, 57)
Tomato4 = base.RGBColor(139, 54, 38)
Turquoise = base.RGBColor( 64, 224, 208)
Turquoise1 = base.RGBColor( 0, 245, 255)
Turquoise2 = base.RGBColor( 0, 229, 238)
Turquoise3 = base.RGBColor( 0, 197, 205)
Turquoise4 = base.RGBColor( 0, 134, 139)
Violet = base.RGBColor(238, 130, 238)
VioletRed = base.RGBColor(208, 32, 144)
VioletRed1 = base.RGBColor(255, 62, 150)
VioletRed2 = base.RGBColor(238, 58, 140)
VioletRed3 = base.RGBColor(205, 50, 120)
VioletRed4 = base.RGBColor(139, 34, 82)
Wheat = base.RGBColor(245, 222, 179)
Wheat1 = base.RGBColor(255, 231, 186)
Wheat2 = base.RGBColor(238, 216, 174)
Wheat3 = base.RGBColor(205, 186, 150)
Wheat4 = base.RGBColor(139, 126, 102)
White = base.RGBColor(255, 255, 255)
WhiteSmoke = base.RGBColor(245, 245, 245)
Yellow = base.RGBColor(255, 255, 0)
Yellow1 = base.RGBColor(255, 255, 0)
Yellow2 = base.RGBColor(238, 238, 0)
Yellow3 = base.RGBColor(205, 205, 0)
Yellow4 = base.RGBColor(139, 139, 0)
YellowGreen = base.RGBColor(154, 205, 50)
Gray0 = base.RGBColor( 0, 0, 0)
Gray1 = base.RGBColor( 3, 3, 3)
Gray2 = base.RGBColor( 5, 5, 5)
Gray3 = base.RGBColor( 8, 8, 8)
Gray4 = base.RGBColor( 10, 10, 10)
Gray5 = base.RGBColor( 13, 13, 13)
Gray6 = base.RGBColor( 15, 15, 15)
Gray7 = base.RGBColor( 18, 18, 18)
Gray8 = base.RGBColor( 20, 20, 20)
Gray9 = base.RGBColor( 23, 23, 23)
Gray10 = base.RGBColor( 26, 26, 26)
Gray11 = base.RGBColor( 28, 28, 28)
Gray12 = base.RGBColor( 31, 31, 31)
Gray13 = base.RGBColor( 33, 33, 33)
Gray14 = base.RGBColor( 36, 36, 36)
Gray15 = base.RGBColor( 38, 38, 38)
Gray16 = base.RGBColor( 41, 41, 41)
Gray17 = base.RGBColor( 43, 43, 43)
Gray18 = base.RGBColor( 46, 46, 46)
Gray19 = base.RGBColor( 48, 48, 48)
Gray20 = base.RGBColor( 51, 51, 51)
Gray21 = base.RGBColor( 54, 54, 54)
Gray22 = base.RGBColor( 56, 56, 56)
Gray23 = base.RGBColor( 59, 59, 59)
Gray24 = base.RGBColor( 61, 61, 61)
Gray25 = base.RGBColor( 64, 64, 64)
Gray26 = base.RGBColor( 66, 66, 66)
Gray27 = base.RGBColor( 69, 69, 69)
Gray28 = base.RGBColor( 71, 71, 71)
Gray29 = base.RGBColor( 74, 74, 74)
Gray30 = base.RGBColor( 77, 77, 77)
Gray31 = base.RGBColor( 79, 79, 79)
Gray32 = base.RGBColor( 82, 82, 82)
Gray33 = base.RGBColor( 84, 84, 84)
Gray34 = base.RGBColor( 87, 87, 87)
Gray35 = base.RGBColor( 89, 89, 89)
Gray36 = base.RGBColor( 92, 92, 92)
Gray37 = base.RGBColor( 94, 94, 94)
Gray38 = base.RGBColor( 97, 97, 97)
Gray39 = base.RGBColor( 99, 99, 99)
Gray40 = base.RGBColor(102, 102, 102)
Gray41 = base.RGBColor(105, 105, 105)
Gray42 = base.RGBColor(107, 107, 107)
Gray43 = base.RGBColor(110, 110, 110)
Gray44 = base.RGBColor(112, 112, 112)
Gray45 = base.RGBColor(115, 115, 115)
Gray46 = base.RGBColor(117, 117, 117)
Gray47 = base.RGBColor(120, 120, 120)
Gray48 = base.RGBColor(122, 122, 122)
Gray49 = base.RGBColor(125, 125, 125)
Gray50 = base.RGBColor(127, 127, 127)
Gray51 = base.RGBColor(130, 130, 130)
Gray52 = base.RGBColor(133, 133, 133)
Gray53 = base.RGBColor(135, 135, 135)
Gray54 = base.RGBColor(138, 138, 138)
Gray55 = base.RGBColor(140, 140, 140)
Gray56 = base.RGBColor(143, 143, 143)
Gray57 = base.RGBColor(145, 145, 145)
Gray58 = base.RGBColor(148, 148, 148)
Gray59 = base.RGBColor(150, 150, 150)
Gray60 = base.RGBColor(153, 153, 153)
Gray61 = base.RGBColor(156, 156, 156)
Gray62 = base.RGBColor(158, 158, 158)
Gray63 = base.RGBColor(161, 161, 161)
Gray64 = base.RGBColor(163, 163, 163)
Gray65 = base.RGBColor(166, 166, 166)
Gray66 = base.RGBColor(168, 168, 168)
Gray67 = base.RGBColor(171, 171, 171)
Gray68 = base.RGBColor(173, 173, 173)
Gray69 = base.RGBColor(176, 176, 176)
Gray70 = base.RGBColor(179, 179, 179)
Gray71 = base.RGBColor(181, 181, 181)
Gray72 = base.RGBColor(184, 184, 184)
Gray73 = base.RGBColor(186, 186, 186)
Gray74 = base.RGBColor(189, 189, 189)
Gray75 = base.RGBColor(191, 191, 191)
Gray76 = base.RGBColor(194, 194, 194)
Gray77 = base.RGBColor(196, 196, 196)
Gray78 = base.RGBColor(199, 199, 199)
Gray79 = base.RGBColor(201, 201, 201)
Gray80 = base.RGBColor(204, 204, 204)
Gray81 = base.RGBColor(207, 207, 207)
Gray82 = base.RGBColor(209, 209, 209)
Gray83 = base.RGBColor(212, 212, 212)
Gray84 = base.RGBColor(214, 214, 214)
Gray85 = base.RGBColor(217, 217, 217)
Gray86 = base.RGBColor(219, 219, 219)
Gray87 = base.RGBColor(222, 222, 222)
Gray88 = base.RGBColor(224, 224, 224)
Gray89 = base.RGBColor(227, 227, 227)
Gray90 = base.RGBColor(229, 229, 229)
Gray91 = base.RGBColor(232, 232, 232)
Gray92 = base.RGBColor(235, 235, 235)
Gray93 = base.RGBColor(237, 237, 237)
Gray94 = base.RGBColor(240, 240, 240)
Gray95 = base.RGBColor(242, 242, 242)
Gray96 = base.RGBColor(245, 245, 245)
Gray97 = base.RGBColor(247, 247, 247)
Gray98 = base.RGBColor(250, 250, 250)
Gray99 = base.RGBColor(252, 252, 252)
Gray100 = base.RGBColor(255, 255, 255)
Grey0 = base.RGBColor( 0, 0, 0)
Grey1 = base.RGBColor( 3, 3, 3)
Grey2 = base.RGBColor( 5, 5, 5)
Grey3 = base.RGBColor( 8, 8, 8)
Grey4 = base.RGBColor( 10, 10, 10)
Grey5 = base.RGBColor( 13, 13, 13)
Grey6 = base.RGBColor( 15, 15, 15)
Grey7 = base.RGBColor( 18, 18, 18)
Grey8 = base.RGBColor( 20, 20, 20)
Grey9 = base.RGBColor( 23, 23, 23)
Grey10 = base.RGBColor( 26, 26, 26)
Grey11 = base.RGBColor( 28, 28, 28)
Grey12 = base.RGBColor( 31, 31, 31)
Grey13 = base.RGBColor( 33, 33, 33)
Grey14 = base.RGBColor( 36, 36, 36)
Grey15 = base.RGBColor( 38, 38, 38)
Grey16 = base.RGBColor( 41, 41, 41)
Grey17 = base.RGBColor( 43, 43, 43)
Grey18 = base.RGBColor( 46, 46, 46)
Grey19 = base.RGBColor( 48, 48, 48)
Grey20 = base.RGBColor( 51, 51, 51)
Grey21 = base.RGBColor( 54, 54, 54)
Grey22 = base.RGBColor( 56, 56, 56)
Grey23 = base.RGBColor( 59, 59, 59)
Grey24 = base.RGBColor( 61, 61, 61)
Grey25 = base.RGBColor( 64, 64, 64)
Grey26 = base.RGBColor( 66, 66, 66)
Grey27 = base.RGBColor( 69, 69, 69)
Grey28 = base.RGBColor( 71, 71, 71)
Grey29 = base.RGBColor( 74, 74, 74)
Grey30 = base.RGBColor( 77, 77, 77)
Grey31 = base.RGBColor( 79, 79, 79)
Grey32 = base.RGBColor( 82, 82, 82)
Grey33 = base.RGBColor( 84, 84, 84)
Grey34 = base.RGBColor( 87, 87, 87)
Grey35 = base.RGBColor( 89, 89, 89)
Grey36 = base.RGBColor( 92, 92, 92)
Grey37 = base.RGBColor( 94, 94, 94)
Grey38 = base.RGBColor( 97, 97, 97)
Grey39 = base.RGBColor( 99, 99, 99)
Grey40 = base.RGBColor(102, 102, 102)
Grey41 = base.RGBColor(105, 105, 105)
Grey42 = base.RGBColor(107, 107, 107)
Grey43 = base.RGBColor(110, 110, 110)
Grey44 = base.RGBColor(112, 112, 112)
Grey45 = base.RGBColor(115, 115, 115)
Grey46 = base.RGBColor(117, 117, 117)
Grey47 = base.RGBColor(120, 120, 120)
Grey48 = base.RGBColor(122, 122, 122)
Grey49 = base.RGBColor(125, 125, 125)
Grey50 = base.RGBColor(127, 127, 127)
Grey51 = base.RGBColor(130, 130, 130)
Grey52 = base.RGBColor(133, 133, 133)
Grey53 = base.RGBColor(135, 135, 135)
Grey54 = base.RGBColor(138, 138, 138)
Grey55 = base.RGBColor(140, 140, 140)
Grey56 = base.RGBColor(143, 143, 143)
Grey57 = base.RGBColor(145, 145, 145)
Grey58 = base.RGBColor(148, 148, 148)
Grey59 = base.RGBColor(150, 150, 150)
Grey60 = base.RGBColor(153, 153, 153)
Grey61 = base.RGBColor(156, 156, 156)
Grey62 = base.RGBColor(158, 158, 158)
Grey63 = base.RGBColor(161, 161, 161)
Grey64 = base.RGBColor(163, 163, 163)
Grey65 = base.RGBColor(166, 166, 166)
Grey66 = base.RGBColor(168, 168, 168)
Grey67 = base.RGBColor(171, 171, 171)
Grey68 = base.RGBColor(173, 173, 173)
Grey69 = base.RGBColor(176, 176, 176)
Grey70 = base.RGBColor(179, 179, 179)
Grey71 = base.RGBColor(181, 181, 181)
Grey72 = base.RGBColor(184, 184, 184)
Grey73 = base.RGBColor(186, 186, 186)
Grey74 = base.RGBColor(189, 189, 189)
Grey75 = base.RGBColor(191, 191, 191)
Grey76 = base.RGBColor(194, 194, 194)
Grey77 = base.RGBColor(196, 196, 196)
Grey78 = base.RGBColor(199, 199, 199)
Grey79 = base.RGBColor(201, 201, 201)
Grey80 = base.RGBColor(204, 204, 204)
Grey81 = base.RGBColor(207, 207, 207)
Grey82 = base.RGBColor(209, 209, 209)
Grey83 = base.RGBColor(212, 212, 212)
Grey84 = base.RGBColor(214, 214, 214)
Grey85 = base.RGBColor(217, 217, 217)
Grey86 = base.RGBColor(219, 219, 219)
Grey87 = base.RGBColor(222, 222, 222)
Grey88 = base.RGBColor(224, 224, 224)
Grey89 = base.RGBColor(227, 227, 227)
Grey90 = base.RGBColor(229, 229, 229)
Grey91 = base.RGBColor(232, 232, 232)
Grey92 = base.RGBColor(235, 235, 235)
Grey93 = base.RGBColor(237, 237, 237)
Grey94 = base.RGBColor(240, 240, 240)
Grey95 = base.RGBColor(242, 242, 242)
Grey96 = base.RGBColor(245, 245, 245)
Grey97 = base.RGBColor(247, 247, 247)
Grey98 = base.RGBColor(250, 250, 250)
Grey99 = base.RGBColor(252, 252, 252)
Grey100 = base.RGBColor(255, 255, 255)
|
|
#!/usr/bin/env python
# Copyright (c) 2015 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
""" This program provides an agent that sends and responds to
health-check packets in order to determine the liveliness of the
configured MPLS tunnels. """
import eossdk
import eossdk_utils
import functools
import json
import os
import pyinotify
import scapy
import scapy.fields
import scapy.layers.l2
import scapy.layers.inet
import scapy.packet
import scapy.route
import scapy.sendrecv
import socket
import struct
import sys
import time
# Requires: RPMs for scapy and EosSdk, as well as the eossdk_utils.py
# script (for debugging). Tunnel configuration is done at the bottom
# of this file in the main function.
# The main agent is located in the MplsTunnelLivenessAgent class below.
POLL_TIME = 1 # how often to send a liveness packet in seconds
TIMEOUT_TIME = 5 # seconds before a tunnel is declared dead
STARTUP_GRACEPERIOD = 0 # seconds after startup before we start checking a tunnel
# Make sure your IP tables are up to date on the switch:
# > sudo iptables -I INPUT -p UDP --dport 17171 -j ACCEPT
UDP_PORT = 17171
MAX_PKT_SIZE = 2048 # The maximum payload size of our packet
MAX_INT = 0xffffffff # The maximum size of a 4 byte unsigned int
class Message(object):
""" A Message is the payload of the health-check packets that this
agent sends out and receives. It consists of two parts. The first
is a header that contains an number that identifies which tunnel
the sender sent this message out of. The header also contains a
numeric id of the packet, and finally, a number describing how many
'entries' are in the second part of the packet. This second part is
a list of 0 or more 'tunnel status entries'. Each entry contains a
numeric tunnel identifier and a boolean describing whether the
sending switch thinks that tunnel is alive or not."""
# Header consists of (version, pid, sender's tunnel key, msg id,
# num status entries), as integers, in little-endian:
header_format = '<IIIII'
header_len = struct.calcsize(header_format)
tunnel_entry_format = '<I?' # tunnel_key, bool
tunnel_entry_len = struct.calcsize(tunnel_entry_format)
def __init__(self, pid, egress_tunnel_key, msg_id, tunnel_liveness):
self.pid = pid
self.egress_tunnel_key = egress_tunnel_key
self.msg_id = msg_id
# Mapping from tunnel_key to boolean whether this is alive or not
self.tunnel_liveness = tunnel_liveness
def serialize(self):
# First put the length of this packet
ret = struct.pack(Message.header_format, 1, self.pid, self.egress_tunnel_key,
self.msg_id, len(self.tunnel_liveness))
for tunnel_key, is_alive in self.tunnel_liveness.iteritems():
ret += struct.pack(Message.tunnel_entry_format, tunnel_key, is_alive)
if len(ret) > MAX_PKT_SIZE:
assert False, "Packet %s too large to send!" % self.__str__()
return ret
def __str__(self):
return "Message(sender_pid=%d, egress_tunnel_key=%d, id=%d, %r)" % (
self.pid, self.egress_tunnel_key, self.msg_id, self.tunnel_liveness)
@staticmethod
def deserialize(buf):
""" Given a buffer, create and return a Message from the
buffer's contents. If the buffer does not contain a valid
Message, this returns None.
"""
if len(buf) < Message.header_len:
return None
version, pid, egress_tunnel_key, msg_id, num_entries = struct.unpack(
Message.header_format, buf[:Message.header_len])
if version != 1:
return None
msg_len = Message.header_len + Message.tunnel_entry_len * num_entries
if len(buf) < msg_len:
return None
liveness = {}
for i in xrange(Message.header_len, msg_len,
Message.tunnel_entry_len):
# Unpack each status entry reported in this packet
key, is_alive = struct.unpack(Message.tunnel_entry_format,
buf[i: i + Message.tunnel_entry_len])
liveness[key] = is_alive
return Message(pid, egress_tunnel_key, msg_id, liveness)
class EgressTunnel(object):
""" Contains the configuration and status of this switch's outgoing
tunnels. """
def __init__(self, label, nexthop_ip_addr):
# Configurable attributes
self.mpls_label = label
self.nexthop_ip = nexthop_ip_addr
# Dynamic attributes:
# The bridging MAC of the nexthop:
self.nexthop_eth_addr = None
# The interface the nexthop_eth_addr lives on:
self.egress_intf = None
# ... and the MAC address of that interface:
self.egress_intf_eth_addr = None
self.last_update_time = 0
self.is_alive = True
class RemoteTunnelStatus(object):
""" Tracks the status of a remote tunnel (a tunnel where the packet
sender is the remote switch). """
def __init__(self):
self.last_rx_msg_id = 0
self.last_update_time = time.time()
class RemoteSwitch(object):
""" This object stores the configuration for our outgoing tunnels to
this remote switch, as well as a status collection containing our view on
the liveness of that switch's tunnels to us. """
def __init__(self, dest_ip):
# Configuration
# The IP address of the remote switch
self.destination_ip = dest_ip
# The following dictionary keeps track of our outgoing tunnels
# to this switch. It is a mapping from integer tunnel_key to a
# EgressTunnel()
self.egress_tunnels = {}
# Status
self.last_tx_msg_id = 0
self.last_rx_msg_id = 0
self.pid = 0
# The `remote_tunnel_status` variable keeps track of whether their
# tunnels are alive or not. It is a mapping from an integer
# tunnel_key to a RemoteTunnelStatus() object. Note that these
# keys correspond to the remote switche's tunnel collection, and
# is not the same as the keys for the `tunnels` variable above.
self.remote_tunnel_status = {}
def liveness_dict(self, cur_time):
ret = {}
for key, tunnel_status in self.remote_tunnel_status.items():
time_delta = cur_time - tunnel_status.last_update_time
if time_delta > (TIMEOUT_TIME * 10):
# Stop sending tunnels that we haven't heard from in a
# really long time.
del self.remote_tunnel_status[key]
elif time_delta > TIMEOUT_TIME:
# Tunnel is dead!
ret[key] = False
else:
ret[key] = True
return ret
class MPLS(scapy.packet.Packet):
""" Create an MPLS header that can be used with scapy packets """
name = "MPLS"
fields_desc = [ scapy.fields.BitField("label", 9, 20),
scapy.fields.BitField("cos", 0, 3),
scapy.fields.BitField("s", 1, 1),
scapy.fields.ByteField("ttl", 0) ]
scapy.packet.bind_layers(scapy.layers.l2.Ether, MPLS, type=0x8847)
class InotifyHandler(pyinotify.ProcessEvent):
""" A helper class handles inotify updates """
parent = None
def my_init(self, **kwargs):
self.parent = kwargs['parent']
def process_IN_MODIFY(self, event):
self.parent.process_config()
class MplsTunnelLivenessAgent(eossdk_utils.EosSdkAgent,
eossdk.AgentHandler,
eossdk.FdHandler,
eossdk.TimeoutHandler):
""" This agent is responsible for tracking the liveness of specified
MPLS tunnels. """
def __init__(self, sdk, config_file="MplsTunnelLivenessConfig.json"):
""" Create the agent. Requires an eossdk handle, as well as the
input configuration """
self.agent_mgr = sdk.get_agent_mgr()
self.eth_intf_mgr = sdk.get_eth_intf_mgr()
self.ip_intf_mgr = sdk.get_ip_intf_mgr()
self.mac_table_mgr = sdk.get_mac_table_mgr()
self.neighbor_table_mgr = sdk.get_neighbor_table_mgr()
self.tracer = eossdk.Tracer("MplsTunnelLivenessAgent")
eossdk_utils.EosSdkAgent.__init__(self)
eossdk.AgentHandler.__init__(self, self.agent_mgr)
eossdk.TimeoutHandler.__init__(self, sdk.get_timeout_mgr())
eossdk.FdHandler.__init__(self)
self.tracer.trace0("MPLS tunnel liveness agent constructed")
self.initialized = False
self.pid = os.getpid()
# The l3 interface we should grab our "SRC IP" from. Read from
# the config:
self.src_intf = None
self.src_ip = None # Resolved after reading from config
# A UDP socket that receives liveness packets from other
# agents. Created during on_initialized
self.rx_sock = None
# A mapping from remote switch IP to RemoteSwitch()
self.remote_switches = {}
self.config_file = config_file
self.wm = pyinotify.WatchManager()
handler = functools.partial(InotifyHandler, parent=self)
# pylint: disable-msg=E1101
self.wm.watch_transient_file(config_file, pyinotify.IN_MODIFY, handler)
# pylint: enable-msg=E1101
self.notifier = pyinotify.AsyncNotifier(self.wm,
InotifyHandler(parent=self))
self.notifier.coalesce_events(True)
self.inotify_fd = self.wm.get_fd()
self.watch_readable(self.inotify_fd, True)
# Read our initial configuration
self.process_config()
def on_initialized(self):
""" Update our configured egress tunnels. Start all tunnels as
alive, with a last_update_time of now + any grace
period. Calculate the output interfaces for each tunnel based
off of that tunnel's nexthop MAC address. """
self.initialized = True
self.tracer.trace2("Looking up the IP address for interface " + self.src_intf)
src_ips = self.ip_intf_mgr.ip_addrs(eossdk.IntfId(self.src_intf))
if not src_ips:
assert False, "No IP addresses assigned to %s" % self.src_intf
self.src_ip = src_ips[0].addr().to_string()
self.tracer.trace2("Using src IP address " + self.src_ip)
self.tracer.trace2("Create the socket that receives remote probes")
self.rx_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rx_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.rx_sock.bind((self.src_ip, UDP_PORT))
self.rx_sock.setblocking(0)
self.watch_readable(self.rx_sock.fileno(), True)
self.resolve_config()
def handle_tunnel_alive(self, dst_ip, tunnel_key, tunnel):
self.tracer.trace3("Tunnel %d to %s came back!" % (tunnel_key, dst_ip))
# Do any other logic (a.k.a. alert another agent that
# tunnel.mpls_label is usable again)
def handle_tunnel_death(self, dst_ip, tunnel_key, tunnel):
self.tracer.trace3("Tunnel %d to %s died!" % (tunnel_key, dst_ip))
# Do any other logic (a.k.a. alert another agent that
# tunnel.mpls_label is no longer a valid tunnel)
def on_timeout(self):
""" Time to send some packets to our neighbors! Our poller
fired, so we should send out our heartbeat packets. We also
check if we haven't heard about any of our tunnels recently, and
if so, mark them as dead. """
cur_time = time.time()
for host in self.remote_switches.itervalues():
liveness_dict = host.liveness_dict(cur_time)
host.last_tx_msg_id += 1
if host.last_tx_msg_id > MAX_INT:
host.last_tx_msg_id = 1
for key, tunnel in host.egress_tunnels.iteritems():
msg = Message(self.pid, key, host.last_tx_msg_id, liveness_dict)
self.send_packet(host.destination_ip, tunnel, msg)
if tunnel.is_alive and (
time.time() - tunnel.last_update_time > TIMEOUT_TIME):
# There has been no updates to this tunnel at all
# within our timeout period.
tunnel.is_alive = False
self.handle_tunnel_death(host.destination_ip, key, tunnel)
# Schedule us to be called again in the future
self.timeout_time_is(eossdk.now() + POLL_TIME)
def on_readable(self, fd):
""" We got a packet on our UDP port! Read the packet, update our
views of the remote tunnel's liveness, and then parse the
packet's payload to inspect what the remote packet thinks of
*our* tunnel liveness. If any liveness changed, then fire our
handlers. """
if fd == self.inotify_fd:
self.tracer.trace6("Inotify fd %d is readable" % self.inotify_fd)
self.notifier.handle_read()
return
if fd != self.rx_sock.fileno():
assert False, "Unknown socket became readable %d" % fd
data, addr = self.rx_sock.recvfrom(MAX_PKT_SIZE)
src_ip = addr[0]
self.tracer.trace6("Received message from %r" % src_ip)
if not data:
self.tracer.trace7("Received empty message, ignoring.")
return
msg = Message.deserialize(data)
if not msg:
self.tracer.trace7("Received invalid message, ignoring! "
"First 500 bytes of pkt: %r" % data[:500])
return
self.tracer.trace8("Got message %s" % str(msg))
if src_ip not in self.remote_switches:
self.tracer.trace7("Got packet from unknown host: %r" % src_ip)
return
remote_switch = self.remote_switches[src_ip]
remote_tunnel_status = remote_switch.remote_tunnel_status.setdefault(
msg.egress_tunnel_key, RemoteTunnelStatus())
if msg.pid != remote_switch.pid:
# This is the either the first message we've received from
# them, or their remote switch restarted. In any case, the
# msg IDs they are sending will have been reset.
remote_switch.pid = msg.pid
remote_switch.last_rx_msg_id = 0
remote_tunnel_status.last_rx_msg_id = 0
# First track we got a packet from the sender's tunnel named
# in the packet.
if self.is_new_id(remote_tunnel_status.last_rx_msg_id, msg.msg_id):
# Do we care about packets coming in out of order?
remote_tunnel_status.last_update_time = time.time()
remote_tunnel_status.last_rx_msg_id = msg.msg_id
# Then inspect the body of the packet that tells me which of
# my tunnel statuses the remote switch has seen.
if not self.is_new_id(remote_switch.last_rx_msg_id, msg.msg_id):
# We've already seen newer messages. Ignore the this.
self.tracer.trace7("Got old message with id: %d (currently at %d)"
% (msg.msg_id, remote_switch.last_rx_msg_id))
return
remote_switch.last_rx_msg_id = msg.msg_id
for tunnel_key, is_alive in msg.tunnel_liveness.iteritems():
if tunnel_key not in remote_switch.egress_tunnels:
# They are telling us about one of our egress tunnels that
# we have no record of...
self.tracer.trace0("Got tunnel status for an unknown key: %r" %
tunnel_key)
continue
tunnel = remote_switch.egress_tunnels[tunnel_key]
tunnel.last_update_time = time.time()
# Check if the remote switch thinks our egress tunnel is
# up or down. If it changed, call our handlers!
if tunnel.is_alive == is_alive:
self.tracer.trace9("No change to liveness for tunnel %d" % tunnel_key)
continue
elif is_alive:
tunnel.is_alive = True
self.handle_tunnel_alive(src_ip, tunnel_key, tunnel)
else:
tunnel.is_alive = False
self.handle_tunnel_death(src_ip, tunnel_key, tunnel)
def resolve_egress_tunnel(self, tunnel):
self.tracer.trace8("Resolve the nexthop IP %s to an ethernet address" %
tunnel.nexthop_ip)
neighbor_key = eossdk.NeighborKey(
eossdk.IpAddr(tunnel.nexthop_ip), eossdk.IntfId())
neighbor_entry = self.neighbor_table_mgr.neighbor_entry_status(neighbor_key)
if neighbor_entry == eossdk.NeighborEntry():
self.tracer.trace8("Checking static ARP entries")
neighbor_entry = self.neighbor_table_mgr.neighbor_entry(neighbor_key)
if neighbor_entry == eossdk.NeighborEntry():
self.tracer.trace0("IP address %r has no ARP entry" %
tunnel.nexthop_ip)
assert False, "Unlearned nexthop IP %s" % tunnel.nexthop_ip
nexthop_eth_addr = neighbor_entry.eth_addr()
self.tracer.trace5("IP %s lives on %s" %
(tunnel.nexthop_ip, nexthop_eth_addr.to_string()))
tunnel.nexthop_eth_addr = nexthop_eth_addr.to_string()
self.tracer.trace8("Now resolving that MAC entry to an interface.")
# TODO: Is this necessary if we send it out of the "fabric"
# interface?
vlan_id = 1
mac_entry = self.mac_table_mgr.mac_entry(vlan_id, nexthop_eth_addr)
if mac_entry.intf() == eossdk.IntfId():
self.tracer.trace0("Mac entry %r not on any interface" %
tunnel.nexthop_eth_addr)
assert False, "Unlearned nexthop MAC %s" % tunnel.nexthop_eth_addr
intf = mac_entry.intf().to_string()
# Convert the interface names to the kernel interface names
intf = intf.replace("Ethernet", "et")
intf = intf.replace("Port-Channel", "po")
self.tracer.trace5("MAC entry %s is learned on inteface %r" %
(tunnel.nexthop_eth_addr, intf))
tunnel.egress_intf = intf
self.tracer.trace8("Looking up that interface's MAC address")
egress_eth_addr = self.eth_intf_mgr.eth_addr(mac_entry.intf())
if egress_eth_addr == eossdk.EthAddr():
assert False, "Interface %s has no MAC address" % intf
self.tracer.trace5("Intf %s has MAC address %s" %
(intf, egress_eth_addr.to_string()))
tunnel.egress_intf_eth_addr = egress_eth_addr.to_string()
def send_packet(self, dst_ip, tunnel, msg):
""" Wrap `msg` in a UDP-over-MPLS packet, using `dst_ip` and the tunnel's
MPLS label, and send the packet out of the tunnel's egress interface."""
self.tracer.trace8("Sending message %s" % str(msg))
payload = msg.serialize()
pkt = scapy.layers.l2.Ether(src=tunnel.egress_intf_eth_addr,
dst=tunnel.nexthop_eth_addr)
pkt = (pkt / MPLS(label=tunnel.mpls_label, ttl=64) /
scapy.layers.inet.IP(src=self.src_ip,
dst=dst_ip) /
scapy.layers.inet.UDP(dport=UDP_PORT) /
(payload))
# In the real world we might make this non-blocking, but for now
# we assume packet always get sent in one go. Also, it might be
# worth maintaining our own socket to the egress interface to
# save us the work of creating/tearing down raw sockets
# constantly.
scapy.sendrecv.sendp(pkt, iface=tunnel.egress_intf, verbose=0)
def process_config(self):
self.tracer.trace1("Processing configuration change on %s" %
self.config_file)
with open(self.config_file) as f:
cfg = json.loads(f.read())
if not self.initialized:
# Write the src_intf only once.
self.src_intf = cfg["src_intf"]
# Clear out the previous config:
self.remote_switches = {}
# And signify that we are a new process by changing our
# advertised pid. It would be preferable to just only update the
# newly configured tunnels, but that's more complicated for now.
self.pid -= 1
for rs in cfg["remote_switches"]:
dst_ip = rs["destination_ip"]
dst = RemoteSwitch(dst_ip)
for tunnel_key_str, tunnel_info in rs["tunnels"].iteritems():
tunnel_key = int(tunnel_key_str)
dst.egress_tunnels[tunnel_key] = EgressTunnel(
tunnel_info["label"], tunnel_info["nexthop_ip"])
self.remote_switches[dst_ip] = dst
if self.initialized:
self.resolve_config()
def resolve_config(self):
self.tracer.trace2("Resolving all of our configured tunnels")
for host in self.remote_switches.itervalues():
for tunnel in host.egress_tunnels.itervalues():
tunnel.last_update_time = time.time() + STARTUP_GRACEPERIOD
self.resolve_egress_tunnel(tunnel)
self.timeout_time_is(eossdk.now() + POLL_TIME)
def is_new_id(self, last_seen_id, new_id):
# Returns True if the new_id is larger than the last_seen_id, or
# the new_id has wrapped around.
return (last_seen_id < new_id) or ((last_seen_id - new_id) > (MAX_INT / 2))
def main(args):
sdk = eossdk.Sdk()
_ = MplsTunnelLivenessAgent(sdk)
sdk.main_loop(args)
if __name__ == "__main__":
main(sys.argv)
|
|
================================================================================
html_help.py
================================================================================
import time
import Zam
class html_table:
def __init__(self, rows, columns, indent, style):
self.__matrix = Zam.matrix(rows, columns, '')
self.__indent = indent
self.__style = style
self.__table_option = ''
self.__row_option = ''
self.__column_option = ''
def mutate(self, row, column, text):
assert type(text) is str
self.__matrix[row][column] = text
return self
def access(self, row, column):
return self.__matrix[row][column]
def table_option(self, string):
assert type(string) is str
self.__table_option = string
return self
def row_option(self, string):
assert type(string) is str
self.__row_option = string
return self
def column_option(self, string):
assert type(string) is str
self.__column_option = string
return self
def html(self):
html = self.__style * self.__indent + '<table'
if self.__table_option:
html += ' ' + self.__table_option
html += '>\n'
for row in self.__matrix:
html += self.__style * (self.__indent + 1) + '<tr'
if self.__row_option:
html += ' ' + self.__row_option
html += '>\n'
for item in row:
html += self.__style * (self.__indent + 2) + '<td'
if self.__column_option:
html += ' ' + self.__column_option
html += '>\n'
html += ''.join([self.__style * (self.__indent + 3) + line + '\n' for line in item.splitlines()])
html += self.__style * (self.__indent + 2) + '</td>\n'
html += self.__style * (self.__indent + 1) + '</tr>\n'
return html + self.__style * self.__indent + '</table>'
class html_month:
def __init__(self, year, month, indent, style):
self.matrix = matrix = self.__make_matrix(year, month)
self.__table = html_table(len(matrix) + 1, 7, indent, style)
for index, item in enumerate(('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday')):
self.__table.mutate(0, index, '<b>' + item + '</b>')
for row in range(len(matrix)):
for column in range(7):
if matrix[row][column]:
self.__table.mutate(row + 1, column, '<b>' + str(matrix[row][column]).zfill(2) + '</b>\n<hr>\n')
def __make_matrix(self, year, month):
rows = [Zam.array(7, 0)]
row = 0
now = time.localtime(time.mktime(time.strptime(str(year).zfill(2) + ' ' + str(month).zfill(2) + ' 01', '%y %m %d')) + 14400)
self.__first_day = (now.tm_wday + 1) % 7
once = False
while now.tm_mon == month:
if once:
if now.tm_wday == 6:
rows.append(Zam.array(7, 0))
row += 1
else:
once = True
rows[row][(now.tm_wday + 1) % 7] = now.tm_mday
self.__days_in_month = now.tm_mday
now = time.localtime(time.mktime(now) + 86400)
return rows
def mutate(self, day, text):
row, column = self.__get_pos(day)
self.__table.mutate(row, column, self.__table.access(row, column)[:15] + text)
return self
def access(self, day):
row, column = self.__get_pos(day)
return self.__table.access(row, column)[15:]
def __get_pos(self, day):
assert 1 <= day <= self.__days_in_month
pos = self.__first_day - 1 + day
return pos / 7 + 1, pos % 7
def table_option(self, string):
self.__table.table_option(string)
return self
def row_option(self, string):
self.__table.row_option(string)
return self
def column_option(self, string):
self.__table.column_option(string)
return self
def html(self):
return self.__table.html()
================================================================================
Zam.py
================================================================================
# Name & Description
# ==================
'''Support module for array and matrix use.
This module provides two classes that emulate one and two
dimentional lists with fixed sizes but mutable internals.'''
# Data & Imports
# ==============
__all__ = ['array', 'matrix']
__version__ = '1.1'
import sys
# Public Names
# ============
class array(object):
'''array(length) -> new array
array(length, value) -> initialized from value'''
def __init__(self, length, value=None):
'''x.__init__(...) initializes x'''
self.__data = range(length)
for index in range(length):
self.__data[index] = value
def __repr__(self):
'''x.__repr__() <==> repr(x)'''
return repr(self.__data)
def __len__(self):
'''x.__len__() <==> len(x)'''
return len(self.__data)
def __getitem__(self, key):
'''x.__getitem__(y) <==> x[y]'''
return self.__data[key]
def __setitem__(self, key, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
self.__data[key] = value
def __delitem__(self, key):
'''x.__delitem__(y) <==> del x[y]'''
self.__data[key] = None
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self.__data)
def __contains__(self, value):
'''x.__contains__(y) <==> y in x'''
return value in self.__data
class matrix(object):
'''matrix(rows, columns) -> new matrix
matrix(rows, columns, value) -> initialized from value'''
def __init__(self, rows, columns, value=None):
'''x.__init__(...) initializes x'''
self.__data = array(rows)
for index in range(rows):
self.__data[index] = array(columns, value)
def __repr__(self):
'''x.__repr__() <==> repr(x)'''
return repr(self.__data)
def __len__(self):
'''x.__len__() <==> len(x)'''
return len(self.__data)
def __getitem__(self, key):
'''x.__getitem__(y) <==> x[y]'''
return self.__data[key]
def __setitem__(self, key, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
self.__data[key] = array(len(self.__data[key]), value)
def __delitem__(self, key):
'''x.__delitem__(y) <==> del x[y]'''
self.__data[key] = array(len(self.__data[key]))
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self.__data)
def __contains__(self, value):
'''x.__contains__(y) <==> y in x'''
for item in self.__data:
if value in item:
return True
return False
# Private Names
# =============
def main():
print 'Content-Type: text/plain'
print
print file(sys.argv[0]).read()
# Execute Main
# ============
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014 Brainly.com sp. z o.o.
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# Global imports:
import copy
import mock
import os
import pymisc
import sys
import unittest
import dns
from pymisc.script import ScriptConfiguration
from collections import namedtuple
# To perform local imports first we need to fix PYTHONPATH:
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.abspath(pwd + '/../../modules/'))
# Local imports:
import file_paths as paths
import check_zonesync
class TestArgumentParsing(unittest.TestCase):
def test_verbose_flag(self):
ret = check_zonesync.parse_command_line(script_name="test-scriptname",
args=["-c", "/path/to/test",
"--verbose"])
self.assertEqual({'config_file': '/path/to/test',
'std_err': False,
'verbose': True},
ret)
def test_stderr_flag(self):
ret = check_zonesync.parse_command_line(script_name="other-scriptname",
args=["-c", "/path/to/test", "-s"])
self.assertEqual({'config_file': '/path/to/test',
'std_err': True,
'verbose': False},
ret)
def test_minimal_invocation(self):
ret = check_zonesync.parse_command_line(script_name="other-scriptname",
args=["-c", "/path/to/other/test"])
self.assertEqual({'config_file': '/path/to/other/test',
'std_err': False,
'verbose': False},
ret)
@mock.patch('dns.tsigkeyring.from_text')
@mock.patch('dns.zone.from_xfr')
@mock.patch('dns.query.xfr')
class TestZoneDataFetch(unittest.TestCase):
def test_input_argument_sanity_checking(self, DNSQueryXfrMock,
DNSZoneFromXfrMock,
DNSKeyringFromTextMock):
with self.assertRaises(pymisc.script.FatalException):
check_zonesync.fetch_domain_data(zone_file="/path/to/zonefile",
host="example.com")
def test_zone_file_parsing(self, DNSQueryXfrMock,
DNSZoneFromXfrMock,
DNSKeyringFromTextMock):
with self.assertRaises(check_zonesync.ZoneParseFailed):
check_zonesync.fetch_domain_data(zone_file=paths.TEST_ZONE_BAD)
check_zonesync.fetch_domain_data(zone_file=paths.TEST_ZONE_GOOD)
def test_zone_axfr_parsing_with_key(self, DNSQueryXfrMock,
DNSZoneFromXfrMock,
DNSKeyringFromTextMock):
DNSKeyringFromTextMock.return_value = "test-keyring"
check_zonesync.fetch_domain_data(host="example-server.com",
zone_name="example.com",
port=53,
key_id="example.com-key_id",
key_data="1234567890",
key_algo="sample-algo",
)
DNSQueryXfrMock.assert_called_once_with(zone='example.com',
keyalgorithm='sample-algo',
keyring='test-keyring',
where='example-server.com',
keyname='example.com-key_id',
port=53)
def test_zone_axfr_parsing_without_key(self, DNSQueryXfrMock,
DNSZoneFromXfrMock,
DNSKeyringFromTextMock):
check_zonesync.fetch_domain_data(host="example-server.com",
zone_name="example.com",
port=53,
)
DNSQueryXfrMock.assert_called_once_with(zone='example.com',
where='example-server.com',
port=53,
keyalgorithm=None,
keyring=None,
keyname=None,)
class TestZoneComparing(unittest.TestCase):
def setUp(self):
self.reference_zone = dns.zone.from_file(paths.TEST_ZONE_GOOD)
def test_zones_are_the_same(self):
test_zone = dns.zone.from_file(paths.TEST_ZONE_GOOD)
ret = check_zonesync.compare_domain_data(self.reference_zone, test_zone)
self.assertEqual(ret.record_types, set([]))
self.assertEqual(ret.full, [])
def test_soa_differs(self):
test_zone = dns.zone.from_file(paths.TEST_ZONE_GOOD_SOA_DIFFERS)
ret = check_zonesync.compare_domain_data(self.reference_zone, test_zone)
self.assertEqual(ret.record_types, set(["SOA"]))
self.assertEqual(len(ret.full), 2)
def test_records_missing(self):
test_zone = dns.zone.from_file(paths.TEST_ZONE_GOOD_DELETED_RECORD)
ret = check_zonesync.compare_domain_data(self.reference_zone, test_zone)
self.assertEqual(ret.record_types, set(["CNAME"]))
self.assertEqual(len(ret.full), 1)
def test_records_changed(self):
test_zone = dns.zone.from_file(paths.TEST_ZONE_GOOD_CHANGED_RECORD)
ret = check_zonesync.compare_domain_data(self.reference_zone, test_zone)
self.assertEqual(ret.record_types, set(["A"]))
self.assertEqual(len(ret.full), 2)
def test_records_added(self):
test_zone = dns.zone.from_file(paths.TEST_ZONE_GOOD_ADDED_RECORD)
ret = check_zonesync.compare_domain_data(self.reference_zone, test_zone)
self.assertEqual(ret.record_types, set(['MX']))
self.assertEqual(len(ret.full), 1)
class TestHostDataVerification(unittest.TestCase):
def setUp(self):
self.host_name = "example-host"
self.zone = "example.com"
self.hash = {"ip": "1.2.3.4",
"port": 53,
"key-id": "example-key",
"key-data": "1234567890abcdef",
"master": True,
}
def test_verify_host_data_correct(self):
msg = check_zonesync._verify_host_data(host_hash=self.hash,
zone=self.zone,
host_name=self.host_name)
self.assertEqual(msg, [])
def test_verify_host_data_bad_ip(self):
self.hash["ip"] = "1000.1.2.3.4"
msg = check_zonesync._verify_host_data(host_hash=self.hash,
zone=self.zone,
host_name=self.host_name)
self.assertEqual(len(msg), 1)
def test_verify_host_data_missing_ip(self):
del self.hash["ip"]
msg = check_zonesync._verify_host_data(host_hash=self.hash,
zone=self.zone,
host_name=self.host_name)
self.assertEqual(len(msg), 1)
def test_verify_host_data_malformed_port(self):
self.hash["port"] = "53."
msg = check_zonesync._verify_host_data(host_hash=self.hash,
zone=self.zone,
host_name=self.host_name)
self.assertEqual(len(msg), 1)
def test_verify_host_data_incomplete_key_set(self):
del self.hash["key-data"]
msg = check_zonesync._verify_host_data(host_hash=self.hash,
zone=self.zone,
host_name=self.host_name)
self.assertEqual(len(msg), 1)
def test_verify_host_data_without_keys(self):
del self.hash["key-id"]
del self.hash["key-data"]
msg = check_zonesync._verify_host_data(host_hash=self.hash,
zone=self.zone,
host_name=self.host_name)
self.assertEqual(msg, [])
def test_verify_host_data_malformed_key_id(self):
self.hash["key-id"] = 'this is not a proper key'
msg = check_zonesync._verify_host_data(host_hash=self.hash,
zone=self.zone,
host_name=self.host_name)
self.assertEqual(len(msg), 1)
def test_verify_host_data_malformed_key_data(self):
self.hash["key-data"] = 'aasfasdfdasf-asdfdas dasf'
msg = check_zonesync._verify_host_data(host_hash=self.hash,
zone=self.zone,
host_name=self.host_name)
self.assertEqual(len(msg), 1)
class TestConfigurationVerification(unittest.TestCase):
def setUp(self):
# Load test configuration
ScriptConfiguration.load_config(paths.TEST_CONFIG_FILE)
self.conf_hash = ScriptConfiguration.get_config()
self.mocks = {}
for patched in ['check_zonesync._verify_host_data',
'os.access',
'os.path.exists', ]:
patcher = mock.patch(patched)
self.mocks[patched] = patcher.start()
self.addCleanup(patcher.stop)
self.mocks["check_zonesync._verify_host_data"].return_value = []
self.mocks["os.access"].return_value = True
self.mocks["os.path.exists"].return_value = True
def test_proper_configuration(self):
msg = check_zonesync._verify_conf(self.conf_hash)
self.assertEqual(msg, [])
def test_malformed_timeout(self):
self.conf_hash['timeout'] = 'not an integer'
msg = check_zonesync._verify_conf(self.conf_hash)
self.assertEqual(len(msg), 1)
def test_empty_zone_list(self):
self.conf_hash['zones'] = {}
msg = check_zonesync._verify_conf(self.conf_hash)
self.assertEqual(len(msg), 1)
def test_malformed_zone_name(self):
self.conf_hash['zones']['a bad zone'] = self.conf_hash[
'zones'].pop('test1.zone.pl')
msg = check_zonesync._verify_conf(self.conf_hash)
self.assertEqual(len(msg), 1)
def test_zone_without_masters(self):
del self.conf_hash['zones']['test1.zone.pl']['zonehosts']['master1']
del self.conf_hash['zones']['test1.zone.pl']['zonehosts']['master2']
msg = check_zonesync._verify_conf(self.conf_hash)
self.assertEqual(len(msg), 1)
def test_zone_without_datafile(self):
del self.conf_hash['zones']['test1.zone.pl']['zonedata']
del self.conf_hash['zones']['test1.zone.pl']['zonehosts']['master1']
# Removing slaves is also necessary
del self.conf_hash['zones']['test1.zone.pl']['zonehosts']['slavehost1']
del self.conf_hash['zones']['test1.zone.pl']['zonehosts']['slavehost2']
del self.conf_hash['zones']['test1.zone.pl']['zonehosts']['slavehost3']
msg = check_zonesync._verify_conf(self.conf_hash)
self.assertEqual(len(msg), 1)
def test_zone_file_does_not_exists(self):
self.mocks["os.path.exists"].return_value = False
msg = check_zonesync._verify_conf(self.conf_hash)
self.assertEqual(len(msg), 1)
class TestMainLogic(unittest.TestCase):
def setUp(self):
self.mocks = {}
for patched in ['check_zonesync.ScriptStatus',
'check_zonesync.ScriptTimeout',
'check_zonesync.ScriptLock',
'check_zonesync._verify_conf',
'check_zonesync.fetch_domain_data',
'check_zonesync.compare_domain_data',
'check_zonesync.sys.exit',
'logging.error',
'logging.info',
'logging.warn', ]:
patcher = mock.patch(patched)
self.mocks[patched] = patcher.start()
self.addCleanup(patcher.stop)
def terminate_script(exit_status):
raise SystemExit(exit_status)
self.mocks["check_zonesync.sys.exit"].side_effect = terminate_script
self.mocks["check_zonesync._verify_conf"].return_value = []
def terminate_script(*unused):
raise SystemExit(216)
self.mocks["check_zonesync.ScriptStatus"].notify_immediate.side_effect = \
terminate_script
self.mocks["check_zonesync.ScriptStatus"].notify_agregated.side_effect = \
terminate_script
self.mocks["check_zonesync.fetch_domain_data"].return_value = "fooBar"
self.zonediff_ret = namedtuple('ZoneDiff', ['full', 'record_types'])
self.zonediff_ret.record_types = set()
self.zonediff_ret.full = []
self.mocks["check_zonesync.compare_domain_data"].return_value = self.zonediff_ret
def test_all_ok(self):
with self.assertRaises(SystemExit):
check_zonesync.main(paths.TEST_CONFIG_FILE)
self.assertFalse(
self.mocks["check_zonesync.ScriptStatus"].notify_immediate.called)
self.mocks["check_zonesync.ScriptStatus"].notify_agregated.assert_called_once_with()
def test_configuration_issues(self):
self.mocks["check_zonesync._verify_conf"].return_value = \
["There is a problem with configuration"]
with self.assertRaises(SystemExit):
check_zonesync.main(paths.TEST_CONFIG_FILE)
self.mocks["check_zonesync.ScriptStatus"].notify_immediate.assert_called_once_with(
'unknown',
'Configuration file contains errors: There is a problem with configuration')
def test_zone_data_parsing_problems(self):
def raise_parserror(zone_name, zone_file, *unused_p, **unused_kw):
if not unused_p and not unused_kw:
raise check_zonesync.ZoneParseFailed
else:
# With this test data it should not happen
self.fail("check_zonesync.fetch_domain_data called with non-file"
" arguments")
self.mocks["check_zonesync.fetch_domain_data"].side_effect = raise_parserror
with self.assertRaises(SystemExit):
check_zonesync.main(paths.TEST_CONFIG_FILE)
self.assertFalse(self.mocks["check_zonesync.ScriptStatus"].notify_immediate.called)
self.mocks["check_zonesync.ScriptStatus"].notify_agregated.assert_called_once_with()
self.mocks["check_zonesync.ScriptStatus"].update.assert_has_calls(
mock.call('critical', 'Failed to load zone file for zone test1.zone.pl: .'))
def test_axfr_zone_transfer_problems(self):
def raise_transfererror(zone_name=None,
zone_file=None,
host=None,
port=None,
key_id=None,
key_data=None,
key_algo=None):
if zone_name == 'test1.zone.pl' and \
zone_file == '/tmp/example.com.zone':
# We are not testing this here:
pass
else:
raise check_zonesync.ZoneTransferFailed()
self.mocks["check_zonesync.fetch_domain_data"].side_effect = raise_transfererror
with self.assertRaises(SystemExit):
check_zonesync.main(paths.TEST_CONFIG_FILE)
self.assertFalse(self.mocks["check_zonesync.ScriptStatus"].notify_immediate.called)
self.mocks["check_zonesync.ScriptStatus"].notify_agregated.assert_called_once_with()
all_updates_were_critical = all([x[0][0] == 'critical'
for x in self.mocks["check_zonesync.ScriptStatus"].update.call_args_list])
self.assertTrue(
all_updates_were_critical and
len(self.mocks["check_zonesync.ScriptStatus"].update.call_args_list) > 0)
def test_no_masterhost_and_zonedata(self):
def raise_transfererror(zone_name=None,
zone_file=None,
host=None,
port=None,
key_id=None,
key_data=None,
key_algo=None):
if zone_name == 'test1.zone.pl' and \
zone_file == '/tmp/example.com.zone':
# We are not testing this here:
pass
elif host in ['master1', 'master2']:
# These we want out:
raise check_zonesync.ZoneTransferFailed()
else:
pass
self.mocks["check_zonesync.fetch_domain_data"].side_effect = \
raise_transfererror
with self.assertRaises(SystemExit):
check_zonesync.main(paths.TEST_NOZONEFILE_CONFIG_FILE)
self.assertFalse(
self.mocks["check_zonesync.ScriptStatus"].notify_immediate.called)
self.mocks["check_zonesync.ScriptStatus"].notify_agregated.assert_called_once_with()
all_updates_were_critical = all([x[0][0] == 'critical' for x in
self.mocks["check_zonesync.ScriptStatus"].update.call_args_list])
self.assertTrue(
all_updates_were_critical and
len(self.mocks["check_zonesync.ScriptStatus"].update.call_args_list) > 0)
def test_soa_records_differ(self):
self.zonediff_ret.record_types = set(['SOA', ])
self.zonediff_ret.full = ['foo', 'bar', ]
with self.assertRaises(SystemExit):
check_zonesync.main(paths.TEST_CONFIG_FILE)
self.assertFalse(self.mocks["check_zonesync.ScriptStatus"].notify_immediate.called)
self.mocks["check_zonesync.ScriptStatus"].notify_agregated.assert_called_once_with()
all_updates_were_critical = all([x[0][0] == 'warn'
for x in self.mocks["check_zonesync.ScriptStatus"].update.call_args_list])
self.assertTrue(all_updates_were_critical and
len(self.mocks["check_zonesync.ScriptStatus"].update.call_args_list) > 0)
def test_records_differ(self):
self.zonediff_ret.record_types = set(['SOA', 'A'])
self.zonediff_ret.full = ['foo', 'bar', ]
with self.assertRaises(SystemExit):
check_zonesync.main(paths.TEST_CONFIG_FILE)
self.assertFalse(self.mocks["check_zonesync.ScriptStatus"].notify_immediate.called)
self.mocks["check_zonesync.ScriptStatus"].notify_agregated.assert_called_once_with()
all_updates_were_critical = all([x[0][0] == 'critical'
for x in self.mocks["check_zonesync.ScriptStatus"].update.call_args_list])
self.assertTrue(all_updates_were_critical and
len(self.mocks["check_zonesync.ScriptStatus"].update.call_args_list) > 0)
if __name__ == '__main__':
unittest.main()
|
|
import collections
import os
import sys
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import chainer
from chainer.dataset.convert import concat_examples
from chainer.dataset import download
from chainer import function
from chainer.functions.activation.relu import relu
from chainer.functions.activation.softmax import softmax
from chainer.functions.array.reshape import reshape
from chainer.functions.math.average import average
from chainer.functions.noise.dropout import dropout
from chainer.functions.normalization.local_response_normalization import (
local_response_normalization)
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d
from chainer.functions.pooling.max_pooling_2d import max_pooling_2d
from chainer.initializers import constant
from chainer.initializers import uniform
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer.links.connection.inception import Inception
from chainer.links.connection.linear import Linear
from chainer.serializers import npz
from chainer.utils import argument
from chainer.utils import imgproc
from chainer.variable import Variable
class GoogLeNet(link.Chain):
"""A pre-trained GoogLeNet model provided by BVLC.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
GoogLeNet, which is also called Inception-v1, is an architecture of
convolutional neural network proposed in 2014. This model is relatively
lightweight and requires small memory footprint during training compared
with modern architectures such as ResNet. Therefore, if you fine-tune your
network based on a model pre-trained by Imagenet and need to train it with
large batch size, GoogLeNet may be useful. On the other hand, if you just
want an off-the-shelf classifier, we recommend you to use ResNet50 or other
models since they are more accurate than GoogLeNet.
The original model is provided here:
`<https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically downloads the caffemodel from the internet.
Note that in this case the converted chainer model is stored
on ``$CHAINER_DATASET_ROOT/pfnet/chainer/models`` directory,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
as a environment variable. The converted chainer model is
automatically used from the second time.
If the argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in BVLC, i.e.,
``chainer.initializers.LeCunUniform(scale=1.0)``.
Note that, in Caffe, when weight_filler is specified as
"xavier" type without variance_norm parameter, the weights are
initialized by Uniform(-s, s), where
:math:`s = \\sqrt{\\frac{3}{fan_{in}}}` and :math:`fan_{in}` is the
number of input units. This corresponds to LeCunUniform in Chainer
but not GlorotUniform.
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto'):
super(GoogLeNet, self).__init__()
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
kwargs = {'initialW': constant.Zero()}
else:
# employ default initializers used in BVLC. For more detail, see
# https://github.com/chainer/chainer/pull/2424#discussion_r109642209
kwargs = {'initialW': uniform.LeCunUniform(scale=1.0)}
with self.init_scope():
self.conv1 = Convolution2D(3, 64, 7, stride=2, pad=3, **kwargs)
self.conv2_reduce = Convolution2D(64, 64, 1, **kwargs)
self.conv2 = Convolution2D(64, 192, 3, stride=1, pad=1, **kwargs)
self.inc3a = Inception(192, 64, 96, 128, 16, 32, 32)
self.inc3b = Inception(256, 128, 128, 192, 32, 96, 64)
self.inc4a = Inception(480, 192, 96, 208, 16, 48, 64)
self.inc4b = Inception(512, 160, 112, 224, 24, 64, 64)
self.inc4c = Inception(512, 128, 128, 256, 24, 64, 64)
self.inc4d = Inception(512, 112, 144, 288, 32, 64, 64)
self.inc4e = Inception(528, 256, 160, 320, 32, 128, 128)
self.inc5a = Inception(832, 256, 160, 320, 32, 128, 128)
self.inc5b = Inception(832, 384, 192, 384, 48, 128, 128)
self.loss3_fc = Linear(1024, 1000, **kwargs)
self.loss1_conv = Convolution2D(512, 128, 1, **kwargs)
self.loss1_fc1 = Linear(2048, 1024, **kwargs)
self.loss1_fc2 = Linear(1024, 1000, **kwargs)
self.loss2_conv = Convolution2D(528, 128, 1, **kwargs)
self.loss2_fc1 = Linear(2048, 1024, **kwargs)
self.loss2_fc2 = Linear(1024, 1000, **kwargs)
if pretrained_model == 'auto':
_retrieve(
'bvlc_googlenet.npz',
'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
self)
elif pretrained_model:
npz.load_npz(pretrained_model, self)
@property
def functions(self):
return collections.OrderedDict([
('conv1', [self.conv1, relu]),
('pool1', [_max_pooling_2d, _local_response_normalization]),
('conv2_reduce', [self.conv2_reduce, relu]),
('conv2', [self.conv2, relu, _local_response_normalization]),
('pool2', [_max_pooling_2d]),
('inception_3a', [self.inc3a]),
('inception_3b', [self.inc3b]),
('pool3', [_max_pooling_2d]),
('inception_4a', [self.inc4a]),
('inception_4b', [self.inc4b]),
('inception_4c', [self.inc4c]),
('inception_4d', [self.inc4d]),
('inception_4e', [self.inc4e]),
('pool4', [_max_pooling_2d]),
('inception_5a', [self.inc5a]),
('inception_5b', [self.inc5b]),
('pool5', [_average_pooling_2d_k7]),
('loss3_fc', [_dropout, self.loss3_fc]),
('prob', [softmax]),
# Since usually the following outputs are not used, they are put
# after 'prob' to be skipped for efficiency.
('loss1_fc2', [_average_pooling_2d_k5, self.loss1_conv, relu,
self.loss1_fc1, relu, self.loss1_fc2]),
('loss2_fc2', [_average_pooling_2d_k5, self.loss2_conv, relu,
self.loss2_fc1, relu, self.loss2_fc2])
])
@property
def available_layers(self):
return list(self.functions.keys())
@classmethod
def convert_caffemodel_to_npz(cls, path_caffemodel, path_npz):
"""Converts a pre-trained caffemodel to a chainer model.
Args:
path_caffemodel (str): Path of the pre-trained caffemodel.
path_npz (str): Path of the converted chainer model.
"""
# As CaffeFunction uses shortcut symbols,
# we import CaffeFunction here.
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
chainermodel = cls(pretrained_model=None)
_transfer_googlenet(caffemodel, chainermodel)
npz.save_npz(path_npz, chainermodel, compression=False)
def forward(self, x, layers=None, **kwargs):
"""forward(self, x, layers=['prob'])
Computes all the feature maps specified by ``layers``.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
x (~chainer.Variable): Input variable. It should be prepared by
``prepare`` function.
layers (list of str): The list of layer names you want to extract.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['prob']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
h = x
activations = {}
inception_4a_cache = None
inception_4d_cache = None
target_layers = set(layers)
for key, funcs in self.functions.items():
if len(target_layers) == 0:
break
if key == 'loss1_fc2':
h = inception_4a_cache
elif key == 'loss2_fc2':
h = inception_4d_cache
for func in funcs:
h = func(h)
if key in target_layers:
activations[key] = h
target_layers.remove(key)
if key == 'inception_4a':
inception_4a_cache = h
elif key == 'inception_4d':
inception_4d_cache = h
return activations
def extract(self, images, layers=None, size=(224, 224), **kwargs):
"""extract(self, images, layers=['pool5'], size=(224, 224))
Extracts all the feature maps of given images.
The difference of directly executing ``forward`` is that
it directly accepts images as an input and automatically
transforms them to a proper variable. That is,
it is also interpreted as a shortcut method that implicitly calls
``prepare`` and ``forward`` functions.
Unlike ``predict`` method, this method does not override
``chainer.config.train`` and ``chainer.config.enable_backprop``
configuration. If you want to extract features without updating
model parameters, you need to manually set configuration when
calling this method as follows:
.. code-block:: python
# model is an instance of `GoogLeNet`
with chainer.using_config('train', False):
with chainer.using_config('enable_backprop', False):
feature = model.extract([image])
.. warning::
``train`` and ``volatile`` arguments are not supported
anymore since v2. Instead, users should configure
training and volatile modes with ``train`` and
``enable_backprop``, respectively.
Note that default behavior of this method is different
between v1 and later versions. Specifically,
the default values of ``train`` arguments in v1 were
``False`` and ``OFF``, while that of
``chainer.config.train`` are ``True``.
Therefore, users need to explicitly switch ``train``
to ``False`` to run the code in test mode to turn off
coputational graph construction.
See the `upgrade guide <https://docs.chainer.org/en/stable\
/upgrade_v2.html#training-mode-is-configured-by-a-thread-local-flag>`_.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
layers (list of str): The list of layer names you want to extract.
size (pair of ints): The resolution of resized images used as
an input of CNN. All the given images are not resized
if this argument is ``None``, but the resolutions of
all the images should be the same.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['pool5']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
x = concat_examples([prepare(img, size=size) for img in images])
x = Variable(self.xp.asarray(x))
return self(x, layers=layers)
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
When you specify a color image as a :class:`numpy.ndarray`,
make sure that color order is RGB.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode(), chainer.using_config('train', False):
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = y.data.shape[0] // 10
y_shape = y.data.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = average(y, axis=1)
return y
def prepare(image, size=(224, 224)):
"""Converts the given image to the numpy array for GoogLeNet.
Note that you have to call this method before ``forward``
because the pre-trained GoogLeNet model requires to resize the given
image, covert the RGB to the BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
dtype = chainer.get_dtype()
if isinstance(image, numpy.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(numpy.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = numpy.asarray(image, dtype=dtype)
image = image[:, :, ::-1]
image -= numpy.array([104.0, 117.0, 123.0], dtype=dtype) # BGR
image = image.transpose((2, 0, 1))
return image
def _transfer_inception(src, dst, names):
for name in names:
chain = getattr(dst, 'inc{}'.format(name))
src_prefix = 'inception_{}/'.format(name)
chain.conv1.W.data[:] = src[src_prefix + '1x1'].W.data
chain.conv1.b.data[:] = src[src_prefix + '1x1'].b.data
chain.proj3.W.data[:] = src[src_prefix + '3x3_reduce'].W.data
chain.proj3.b.data[:] = src[src_prefix + '3x3_reduce'].b.data
chain.conv3.W.data[:] = src[src_prefix + '3x3'].W.data
chain.conv3.b.data[:] = src[src_prefix + '3x3'].b.data
chain.proj5.W.data[:] = src[src_prefix + '5x5_reduce'].W.data
chain.proj5.b.data[:] = src[src_prefix + '5x5_reduce'].b.data
chain.conv5.W.data[:] = src[src_prefix + '5x5'].W.data
chain.conv5.b.data[:] = src[src_prefix + '5x5'].b.data
chain.projp.W.data[:] = src[src_prefix + 'pool_proj'].W.data
chain.projp.b.data[:] = src[src_prefix + 'pool_proj'].b.data
def _transfer_googlenet(src, dst):
# 1 #################################################################
dst.conv1.W.data[:] = src['conv1/7x7_s2'].W.data
dst.conv1.b.data[:] = src['conv1/7x7_s2'].b.data
# 2 #################################################################
dst.conv2_reduce.W.data[:] = src['conv2/3x3_reduce'].W.data
dst.conv2_reduce.b.data[:] = src['conv2/3x3_reduce'].b.data
dst.conv2.W.data[:] = src['conv2/3x3'].W.data
dst.conv2.b.data[:] = src['conv2/3x3'].b.data
# 3, 4, 5 ###########################################################
_transfer_inception(src, dst, ['3a', '3b',
'4a', '4b', '4c', '4d', '4e',
'5a', '5b'])
# outputs ############################################################
dst.loss1_conv.W.data[:] = src['loss1/conv'].W.data
dst.loss1_conv.b.data[:] = src['loss1/conv'].b.data
dst.loss1_fc1.W.data[:] = src['loss1/fc'].W.data
dst.loss1_fc1.b.data[:] = src['loss1/fc'].b.data
dst.loss1_fc2.W.data[:] = src['loss1/classifier'].W.data
dst.loss1_fc2.b.data[:] = src['loss1/classifier'].b.data
dst.loss2_conv.W.data[:] = src['loss2/conv'].W.data
dst.loss2_conv.b.data[:] = src['loss2/conv'].b.data
dst.loss2_fc1.W.data[:] = src['loss2/fc'].W.data
dst.loss2_fc1.b.data[:] = src['loss2/fc'].b.data
dst.loss2_fc2.W.data[:] = src['loss2/classifier'].W.data
dst.loss2_fc2.b.data[:] = src['loss2/classifier'].b.data
dst.loss3_fc.W.data[:] = src['loss3/classifier'].W.data
dst.loss3_fc.b.data[:] = src['loss3/classifier'].b.data
def _max_pooling_2d(x):
return max_pooling_2d(x, ksize=3, stride=2)
def _local_response_normalization(x):
return local_response_normalization(x, n=5, k=1, alpha=1e-4 / 5)
def _average_pooling_2d_k5(x):
return average_pooling_2d(x, ksize=5, stride=3)
def _average_pooling_2d_k7(x):
return average_pooling_2d(x, ksize=7, stride=1)
def _dropout(x):
return dropout(x, ratio=0.4)
def _make_npz(path_npz, url, model):
path_caffemodel = download.cached_download(url)
sys.stderr.write(
'Now loading caffemodel (usually it may take few minutes)\n')
sys.stderr.flush()
GoogLeNet.convert_caffemodel_to_npz(path_caffemodel, path_npz)
npz.load_npz(path_npz, model)
return model
def _retrieve(name_npz, url, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name_npz)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, url, model),
lambda path: npz.load_npz(path, model))
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pseudorandom number kernels."""
import math
import numpy as np
import tvm
import tvm.topi
from ... import tir
from ...tir import ir_builder
# Threefry PRNG with splitting based on
# - J. K. Salmon, M. A. Moraes, R. O. Dror and D. E. Shaw, "Parallel random numbers: As easy as 1,
# 2, 3," SC '11: Proceedings of 2011 International Conference for High Performance Computing,
# Networking, Storage and Analysis, Seattle, WA, 2011, pp. 1-12, doi: 10.1145/2063384.2063405.
# - Claessen, K. ; Palka, M. (2013) "Splittable Pseudorandom Number Generators using Cryptographic
# Hashing". Proceedings of Haskell Symposium 2013 pp. 47-58. MLA
# - Ferguson, Niels, et al. "The Skein hash function family." Submission to NIST (round 3) 7.7.5
# (2010): 3.
# Threefry is a counter based PRNG: given a unique input, it generates a unique random number. As
# there is no state to maintain, we can apply it to a sequence of numbers (0..N) to generate a
# sequence of random numbers in parallel. In order to make the PRNG splittable (that is we can
# generate a sequence of random numbers in one place, and another sequence in another), we add a
# path and key in addition to the counter. The path allows us to encode a sequence of splits (a 0 in
# the path indicates the left result of a split, a 1 indicates the right). To avoid continuously
# growing the path, we can compress an existing path into the key portion of the generator by
# hashing the current key, path, and counter to create the new key (this same technique is used if
# we run out of room for the counter). They key is initialized with a unique initial state.
#
# Random numbers are generated by applying the Threefry hash to the current key, path, and counter.
# This module use encoding e4 from the appendix of "Splittable Pseudorandom Number Generators using
# Cryptographic Hashing" (confusingly, the definition in the paper uses e3 to define the encoding
# function). This encoding uses a 10 element uint64 tensor where each byte means the following:
# .. code-block:
# gen:
# words: 0 1 2 3 | 4 5 | 6 7 | 8 9
# usage: key | path | counter | position of next step in path encoded in binary
# ex: 0b00010 -> next path entry goes one from the right
# Right now, counter only uses the rightmost word.
# Threefry rotation constants from the Skein paper ("The Skein Hash Function Family"
# https://www.schneier.com/wp-content/uploads/2015/01/skein.pdf)
_ROTATIONS = {
4: [[14, 16], [52, 57], [23, 40], [5, 37], [25, 33], [46, 12], [58, 22], [32, 32]],
8: [
[46, 36, 19, 37],
[33, 27, 14, 42],
[17, 49, 36, 39],
[44, 9, 54, 56],
[39, 30, 34, 24],
[13, 50, 10, 17],
[25, 29, 39, 43],
[8, 35, 56, 22],
],
16: [
[24, 13, 8, 47, 8, 17, 22, 37],
[38, 19, 10, 55, 49, 18, 23, 52],
[33, 4, 51, 13, 34, 41, 59, 17],
[5, 20, 48, 41, 47, 28, 16, 25],
[41, 9, 37, 31, 12, 47, 44, 30],
[16, 34, 56, 51, 4, 53, 42, 41],
[31, 44, 47, 46, 19, 42, 44, 25],
[9, 48, 35, 52, 23, 31, 37, 20],
],
}
# Threefry permutation constants from the Skein paper ("The Skein Hash Function Family"
# https://www.schneier.com/wp-content/uploads/2015/01/skein.pdf)
_PERMUTATIONS = {
4: [0, 3, 2, 1],
8: [2, 1, 4, 7, 6, 5, 0, 3],
16: [0, 9, 2, 13, 6, 11, 4, 15, 10, 7, 12, 3, 14, 5, 8, 1],
}
def _threefry(
irb, key_buf, key_offset, counter_buf, counter_offset, out_buf, out_offset, out_shape
):
"""IRBuilder code for running Threefry
Parameters
----------
irb: IRBuilder
IRBuilder that this code will be generated for.
key_buf: BufferVar
Buffer to read the key from.
key_offset: number
Threefry will write to :code:`key_buf[key_offset:key_offset+4]`
counter_buf: BufferVar
Buffer to read the counter from.
counter_offset: number
Threefry will write to :code:`counter_buf[counter_offset:counter_offset+4]`
out_buf: BufferVar
Buffer to read the counter from.
out_offset: number
Threefry will write to :code:`out_buf[out_offset:out_offset+4*product(out_shape)]`
out_shape: number
Determines the number of output states to generate. :code:`state[i]` will correspond to
counter+i.
"""
nrounds = 20
nwords = 4
iwidth = 64
assert nrounds % 4 == 0
assert nwords in [4, 8, 16]
# The paper has constants for 32 bit threefry, but we keep the implementation simple by only
# using 64-bit words.
assert key_buf.dtype == "uint64", "threefry only supports 64-bit keys"
assert key_buf.dtype == counter_buf.dtype, "threefry key and counter must be the same dtype"
def mix(a, b, rotation):
x = a + b # wrapping
y = x ^ ((b << rotation) | (b >> (iwidth - rotation)))
return [x, y]
# temporary buffer for holding the results of _PERMUTATIONS
tmp = irb.allocate(out_buf.dtype, out_shape * nwords, name="tmp", scope="global")
tmp_offset = 0
# Initialize entire key. It is composed of the original key with one
# element appended. The appended element is the xor of all key words plus a
# constant.
full_key = irb.allocate("uint64", nwords + 1, name="full_key", scope="global")
for i in range(nwords):
full_key[i] = key_buf[key_offset + i]
# initial key constant, full_key[nwords] is equivalent to k_{N_W} in the Skein paper.
full_key[nwords] = tvm.tir.const(0x1BD11BDAA9FC1A22, dtype="uint64")
for i in range(nwords):
full_key[nwords] ^= key_buf[key_offset + i]
with irb.for_range(0, out_shape, dtype="uint64", name="i") as i:
for j in range(nwords):
out_buf[out_offset + i * nwords + j] = counter_buf[counter_offset + j] + i
def key_schedule(s, i):
# Threefry uses no tweak, so the key schedule is simple
if i == nwords - 1:
return full_key[(s + i) % (nwords + 1)] + tvm.tir.const(s, dtype="uint64")
return full_key[(s + i) % (nwords + 1)]
with irb.for_range(0, out_shape, name="l") as l: # pylint: disable=invalid-name
for i in range(nrounds // 4):
for j in range(nwords):
out_buf[out_offset + l * nwords + j] += key_schedule(i, j) # wrapping
for k in range(4):
for j in range(nwords // 2):
(
out_buf[out_offset + l * nwords + j * 2 + 0],
out_buf[out_offset + l * nwords + j * 2 + 1],
) = mix(
out_buf[out_offset + l * nwords + j * 2 + 0],
out_buf[out_offset + l * nwords + j * 2 + 1],
_ROTATIONS[nwords][(i * 4 + k) % 8][j],
)
for j in range(nwords):
tmp[tmp_offset + l * nwords + j] = out_buf[
out_offset + l * nwords + _PERMUTATIONS[nwords][j]
]
# number of rounds is even, so out always contains the result
(out_buf, tmp) = (tmp, out_buf)
(out_offset, tmp_offset) = (tmp_offset, out_offset)
def threefry_generate(gen, out_shape):
"""Generate a series of random values
Notes
-----
This function uses the counter portion of the generator state to generate a series of random
numbers in parallel. Random number `i` is generated by applying Threefry to the current
generator state with the counter portion incremented by `i`. This means that each random number
is generated independently from each other random number, so we can compute them in parallel.
If there is not enough room left in the counter to generate the desired shape of random values,
then a new generator is created by applying Threefry to the current key, path, and counter.
This new generator will have a reset counter.
Warning
-------
Threeyfry requires that unsigned integer arithmetic wraps on overflow. Currently TVM has no
guarantee of this, so threefry contains an internal assert to check wrapping behavior. This
assert may or may not run depending on your platform, so it is recommended you run
:py:func:`threefry_test_wrapping` to verify wrapping behavior.
Parameters
----------
gen : Tensor[10, uint64]
Generator state. Can be create with :py:func:`tvm.relay.random.threefry_key`. This should
not be reused in another function, otherwise random numbers will be repeated.
out_shape : Sequence[int]
Output shape of the random numbers.
Returns
-------
new_gen : Tensor[10, uint64]
The new generator state to be used in subsequent calls.
rand : Tensor[out_shape, uint64]
Tensor of random numbers with shape `out_shape`.
"""
out_len = tir.const(1)
for s in out_shape:
out_len *= s
assert (
out_len.value <= 2 ** 64 - 1
), f"Can only generate up to 2^64 random numbers, but {out_len} were requested."
def gen_ir(gen_ptr, out_gen_ptr, out_array_ptr):
irb = ir_builder.create()
gen = irb.buffer_ptr(gen_ptr)
out_gen = irb.buffer_ptr(out_gen_ptr)
out_array = irb.buffer_ptr(out_array_ptr)
# Check that unsigned arithmetic wraps, as it is required to implement threefry correctly.
irb.emit(
tvm.tir.AssertStmt(
tvm.tir.const(0xFFFFFFFFFFFFFFFF, "uint64") + tvm.tir.const(1, "uint64")
== tvm.tir.const(0, "uint64"),
tvm.tir.StringImm(
"Unsigned integer arithmetic is not wrapping, but threefry requires wrapping."
),
tvm.tir.Evaluate(0),
)
)
# Create a temporary array to hold the generator state we will use to create the random
# numbers. We cannot use gen because we may need to update the key + path if there is not
# enough room in the counter.
tmp = irb.allocate(gen.dtype, 10, name="tmp", scope="global")
# TODO(tkonolige): for now we only use the last word of the counter for counting. It is too
# much work to figure out how to do 128 bit addition.
# Max value for counter should be 2**64-2 because we need to reserve a special value to
# indicate the counter is used up.
with irb.if_scope(gen[7] < tir.const(2 ** 64 - 1, dtype=gen.dtype) - out_len):
for i in range(10):
tmp[i] = gen[i]
with irb.else_scope():
# no room left in the counter, we have to change the path or key
with irb.if_scope(gen[8] == 0 and gen[9] == 0):
# out of room in the path, have to generate new key
# The paper says the counter that we will be hashing should be a special value of
# all ones. We need to allocate some space for it because we cannot overwrite gen.
tmp_counter = irb.allocate(gen.dtype, 2, name="tmp_counter", scope="global")
tmp_counter[0] = tir.const(0xFFFFFFFFFFFFFFFF, dtype=gen.dtype)
tmp_counter[1] = tir.const(0xFFFFFFFFFFFFFFFF, dtype=gen.dtype)
_threefry(irb, gen, 0, tmp_counter, 0, tmp, 0, 1)
tmp[4] = tir.const(0, dtype=gen.dtype) # zero path, i.e. no path
tmp[5] = tir.const(0, dtype=gen.dtype)
tmp[6] = tir.const(0, dtype=gen.dtype) # zero counter
tmp[7] = tir.const(0, dtype=gen.dtype)
tmp[8] = tir.const(1 << 63, dtype=gen.dtype) # one in the leftmost position
tmp[9] = tir.const(0, dtype=gen.dtype)
with irb.else_scope():
tmp[0] = gen[0]
tmp[1] = gen[1]
tmp[2] = gen[2]
tmp[3] = gen[3]
tmp[4] = gen[4] | gen[8] # add a 1 to the path
tmp[5] = gen[5] | gen[9]
tmp[6] = tir.const(0, dtype=gen.dtype) # zero counter
tmp[7] = tir.const(0, dtype=gen.dtype)
_shift_right(irb, gen[8], gen[9], tmp, 8, tmp, 9)
# Compute random values
if out_len.value >= 4:
_threefry(irb, tmp, 0, tmp, 4, out_array, 0, out_len // 4)
if out_len.value % 4 != 0:
remaining = irb.allocate(gen.dtype, 4, name="remaining", scope="global")
tmp[7] = tmp[7] + tir.Cast(gen.dtype, out_len // 4 * 4) # increment counter
_threefry(irb, tmp, 0, tmp, 4, remaining, 0, 1)
with irb.for_range(0, out_len % 4, dtype="uint64", name="i") as i:
out_array[out_len // 4 * 4 + i] = remaining[i]
# Update generator state
out_gen[0] = tmp[0] # key stays the same
out_gen[1] = tmp[1]
out_gen[2] = tmp[2]
out_gen[3] = tmp[3]
out_gen[4] = tmp[4] # path stays the same
out_gen[5] = tmp[5]
out_gen[6] = tir.const(0, dtype=gen.dtype) # unused, leave it as 0
if out_len.value % 4 != 0:
# increment counter for the remaining
# as we will generate 4 random numbers for the remaining, increase 4 here.
# the main increment was done before the second _threefry.
out_gen[7] = tmp[7] + tir.Cast(gen.dtype, 4)
else:
out_gen[7] = tmp[7] + tir.Cast(gen.dtype, out_len) # increment counter
out_gen[8] = tmp[8] # path unchanged, so no update here
out_gen[9] = tmp[9]
return irb.get()
out_gen = tvm.tir.decl_buffer((10,), name="out_gen", dtype="uint64")
out_array = tvm.tir.decl_buffer(out_shape, name="out_array", dtype="uint64")
return tvm.te.extern(
[out_gen.shape, out_array.shape],
[gen],
lambda ins, outs: gen_ir(ins[0], outs[0], outs[1]),
out_buffers=[out_gen, out_array],
name="threefry_generate",
tag="threefry_generate",
)
def _shift_right(irb, a, b, out_a, a_off, out_b, b_off):
"""Binary shift a 128bit number composed of two 64 bit words right by one."""
with irb.if_scope(a == 1):
out_a[a_off] = tir.const(0, dtype=a.dtype)
out_b[b_off] = tir.const(0x8000000000000000, dtype=a.dtype)
with irb.else_scope():
with irb.if_scope(a == 0):
out_a[a_off] = tir.const(0, dtype=a.dtype)
out_b[b_off] = b >> 1
with irb.else_scope():
out_a[a_off] = a >> 1
out_b[b_off] = tir.const(0, dtype=a.dtype)
def threefry_split(gen):
"""Split a single generator state into two new ones
Notes
-----
The new generator is created by appending a one (for the right output) or a zero (for the left
output) to the end of the path portion of the generator If there is no longer and room in the
path, then we create a new key portion of the generator by applying Threefry to the old state,
path, and counter. i.e. :code:`new_key = threefry(old_key, [old_path, old_counter])`. This
resets the path portion of the new generator.
Parameters
----------
gen : Tensor[10, uint64]
Generator state. Can be create with :py:func:`tvm.relay.random.threefry_key`. This should
not be reused in another function, otherwise random numbers will be repeated.
Returns
-------
out_gen_left : Tensor[10, uint64]
New generator state that is distinct from `out_gen_right`.
out_gen_right : Tensor[10, uint64]
New generator state that is distinct from `out_gen_left`.
"""
def gen_ir(gen_ptr, out_left_ptr, out_right_ptr):
irb = ir_builder.create()
gen = irb.buffer_ptr(gen_ptr)
out_left = irb.buffer_ptr(out_left_ptr)
out_right = irb.buffer_ptr(out_right_ptr)
with irb.if_scope(gen[8] == 0 and gen[9] == 0):
# Generate new key because we have run out of room to extend the path
_threefry(irb, gen, 0, gen, 4, out_left, 0, 1)
out_left[4] = tir.const(0, dtype=gen.dtype)
out_left[5] = tir.const(0, dtype=gen.dtype)
out_left[6] = tir.const(0, dtype=gen.dtype) # counter gets zeroed
out_left[7] = tir.const(0, dtype=gen.dtype) # counter gets zeroed
out_left[8] = tir.const(
1 << 62, dtype=gen.dtype
) # one in the second from the leftmost position
out_left[9] = tir.const(0, dtype=gen.dtype)
out_right[0] = out_left[0]
out_right[1] = out_left[1]
out_right[2] = out_left[2]
out_right[3] = out_left[3]
out_right[4] = tir.const(1 << 63, dtype=gen.dtype) # one in the leftmost position
out_right[5] = tir.const(0, dtype=gen.dtype)
out_right[6] = tir.const(0, dtype=gen.dtype)
out_right[7] = tir.const(0, dtype=gen.dtype)
out_right[8] = tir.const(
1 << 62, dtype=gen.dtype
) # one in the second from the leftmost position
out_right[9] = tir.const(0, dtype=gen.dtype)
with irb.else_scope():
out_left[0] = gen[0]
out_left[1] = gen[1]
out_left[2] = gen[2]
out_left[3] = gen[3]
out_left[4] = gen[4] # adding a zero here, but its already zero padded
out_left[5] = gen[5]
out_left[6] = gen[6]
out_left[7] = gen[7]
# move path position over one bit
_shift_right(irb, gen[8], gen[9], out_left, 8, out_left, 9)
out_right[0] = gen[0]
out_right[1] = gen[1]
out_right[2] = gen[2]
out_right[3] = gen[3]
out_right[4] = gen[4] | gen[8] # add a one to the path
out_right[5] = gen[5] | gen[9]
out_right[6] = gen[6]
out_right[7] = gen[7]
_shift_right(irb, gen[8], gen[9], out_right, 8, out_right, 9)
return irb.get()
out_left = tvm.tir.decl_buffer((10,), name="out_left", dtype="uint64")
out_right = tvm.tir.decl_buffer((10,), name="out_right", dtype="uint64")
return tvm.te.extern(
[out_left.shape, out_right.shape],
[gen],
lambda ins, outs: gen_ir(ins[0], outs[0], outs[1]),
out_buffers=[out_left, out_right],
name="threefry_split",
tag="threefry_split",
)
def threefry_test_wrapping(target, device):
"""Test that unsigned arithmetic wraps on overflow.
Parameters
----------
target : tvm.target.Target
Target to run against
device : tvm.runtime.Device
Context to run the test on
Returns
-------
is_wrapping : bool
Whether or not unsigned integer arithmetic is wrapping for this target, context pair. True
indicates that threefry will work on this platform.
"""
if isinstance(target, str):
target = tvm.target.Target(target)
def gen_ir(out_ptr):
irb = ir_builder.create()
out = irb.buffer_ptr(out_ptr)
if "gpu" in target.keys:
thread_x = tvm.te.thread_axis("threadIdx.x")
irb.scope_attr(thread_x, "thread_extent", 1)
out[0] = tvm.tir.const(0xFFFFFFFFFFFFFFFF, "uint64") + tvm.tir.const(1, "uint64")
return irb.get()
out = tvm.tir.decl_buffer((1,), dtype="uint64")
f = tvm.te.extern(
[out.shape], [], lambda ins, outs: gen_ir(outs[0]), dtype="uint64", out_buffers=[out]
)
s = tvm.te.create_schedule([f.op])
out_ary = tvm.nd.array(np.ones((1,), "uint64"), device)
tvm.build(s, [f], target=target)(out_ary)
return out_ary.numpy()[0] == 0
def uniform(gen, low, high, out_shape, out_dtype):
"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval [low, high)
(includes low, but excludes high). In other words, any value within the
given interval is equally likely to be drawn by uniform.
Parameters
----------
gen : ThreefryKey
Generator state. Can be create with :py:func:`tvm.relay.threefry_key`. This should not be
reused in another function, otherwise random numbers will be repeated.
low : Tensor[(), out_dtype]
Lower boundary of the output interval. All values generated will be
greater than or equal to low.
high : Tensor[(), out_dtype]
Upper boundary of the output interval. All values generated will be
less than high.
out_shape : Sequence[int]
Output shape of the random numbers.
out_dtype : str
The output dtype.
Returns
-------
new_gen : ThreefryKey
New generator state that is distinct from `gen`.
out : Tensor[out_shape, out_dtype]
Tensor of random numbers with shape `out_shape` and type `out_dtype`.
"""
new_gen, random_bits = threefry_generate(gen, out_shape)
assert out_dtype in ("float32", "float64"), (
"Only support float32 or float64 for now, got %s" % out_dtype
)
if out_dtype == "float32":
random_dtype = "uint32"
nbits = 32
nfraction = 23
elif out_dtype == "float64":
random_dtype = "uint64"
nbits = 64
nfraction = 52
nexp = nbits - nfraction - 1
random_bits = random_bits.astype(random_dtype)
fraction = tvm.topi.right_shift(
random_bits, tvm.tir.const(nbits - nfraction, dtype=random_dtype)
)
exponent = tvm.topi.left_shift(
tvm.topi.full(out_shape, random_dtype, (1 << (nexp - 1)) - 1),
tvm.tir.const(nfraction, dtype=random_dtype),
)
mantissa = tvm.topi.bitwise_or(fraction, exponent).astype(random_dtype)
standard_uniform_values = tvm.topi.reinterpret(mantissa, out_dtype) - tvm.tir.const(
1, dtype=out_dtype
)
uniform_values = tvm.topi.add(tvm.topi.multiply(standard_uniform_values, high - low), low)
return new_gen, uniform_values
def normal(gen, mean, scale, out_shape, out_dtype):
"""Draw samples from a normal distribution.
The algorithm is based on Box-Muller transform
Parameters
----------
gen : ThreefryKey
Generator state. Can be create with :py:func:`tvm.relay.threefry_key`. This should not be
reused in another function, otherwise random numbers will be repeated.
mean : Tensor[(), out_dtype]
The mean of the normal distribution.
scale : Tensor[(), out_dtype]
The standard deviation of the normal distribution.
out_shape : Sequence[int]
Output shape of the random numbers.
out_dtype : str
The output dtype.
Returns
-------
new_gen : ThreefryKey
New generator state that is distinct from `gen`.
out : Tensor[out_shape, out_dtype]
Tensor of random numbers with shape `out_shape` and type `out_dtype`.
"""
out_shape = list(out_shape)
# Box-Muller transform need two pieces of original uniform data
out_shape.insert(0, 2)
new_gen, uniform_values = uniform(
gen,
tvm.tir.const(0.0, out_dtype),
tvm.tir.const(1.0, out_dtype),
out_shape,
out_dtype,
)
two_pi = tvm.tir.const(2.0 * math.pi, out_dtype)
uniform_values_1 = tvm.topi.strided_slice(uniform_values, [0], [1], strides=[1], axes=[0])
uniform_values_1 = tvm.topi.squeeze(uniform_values_1, axis=0)
uniform_values_2 = tvm.topi.strided_slice(uniform_values, [1], [2], strides=[1], axes=[0])
uniform_values_2 = tvm.topi.squeeze(uniform_values_2, axis=0)
uniform_values_1 = tvm.topi.subtract(tvm.tir.const(1.0, out_dtype), uniform_values_1)
sqrt_values = tvm.topi.sqrt(
tvm.topi.multiply(tvm.tir.const(-2.0, out_dtype), tvm.topi.log(uniform_values_1))
)
sin_values = tvm.topi.sin(tvm.topi.multiply(two_pi, uniform_values_2))
random_values = tvm.topi.add(
tvm.topi.multiply(tvm.topi.multiply(sqrt_values, sin_values), scale), mean
)
return new_gen, random_values
|
|
#!/usr/bin/env python
"""
pyboard interface
This module provides the Pyboard class, used to communicate with and
control the pyboard over a serial USB connection.
Example usage:
import pyboard
pyb = pyboard.Pyboard('/dev/ttyACM0')
Or:
pyb = pyboard.Pyboard('192.168.1.1')
Then:
pyb.enter_raw_repl()
pyb.exec('pyb.LED(1).on()')
pyb.exit_raw_repl()
Note: if using Python2 then pyb.exec must be written as pyb.exec_.
To run a script from the local machine on the board and print out the results:
import pyboard
pyboard.execfile('test.py', device='/dev/ttyACM0')
This script can also be run directly. To execute a local script, use:
./pyboard.py test.py
Or:
python pyboard.py test.py
"""
import sys
import time
try:
stdout = sys.stdout.buffer
except AttributeError:
# Python2 doesn't have buffer attr
stdout = sys.stdout
def stdout_write_bytes(b):
b = b.replace(b"\x04", b"")
stdout.write(b)
stdout.flush()
class PyboardError(BaseException):
pass
class TelnetToSerial:
def __init__(self, ip, user, password, read_timeout=None):
import telnetlib
self.tn = telnetlib.Telnet(ip, timeout=15)
self.read_timeout = read_timeout
if b'Login as:' in self.tn.read_until(b'Login as:', timeout=read_timeout):
self.tn.write(bytes(user, 'ascii') + b"\r\n")
if b'Password:' in self.tn.read_until(b'Password:', timeout=read_timeout):
# needed because of internal implementation details of the telnet server
time.sleep(0.2)
self.tn.write(bytes(password, 'ascii') + b"\r\n")
if b'for more information.' in self.tn.read_until(b'Type "help()" for more information.', timeout=read_timeout):
# login succesful
from collections import deque
self.fifo = deque()
return
raise PyboardError('Failed to establish a telnet connection with the board')
def __del__(self):
self.close()
def close(self):
try:
self.tn.close()
except:
# the telnet object might not exist yet, so ignore this one
pass
def read(self, size=1):
while len(self.fifo) < size:
timeout_count = 0
data = self.tn.read_eager()
if len(data):
self.fifo.extend(data)
timeout_count = 0
else:
time.sleep(0.25)
if self.read_timeout is not None and timeout_count > 4 * self.read_timeout:
break
timeout_count += 1
data = b''
while len(data) < size and len(self.fifo) > 0:
data += bytes([self.fifo.popleft()])
return data
def write(self, data):
self.tn.write(data)
return len(data)
def inWaiting(self):
n_waiting = len(self.fifo)
if not n_waiting:
data = self.tn.read_eager()
self.fifo.extend(data)
return len(data)
else:
return n_waiting
class Pyboard:
def __init__(self, device, baudrate=115200, user='micro', password='python', wait=0):
if device and device[0].isdigit() and device[-1].isdigit() and device.count('.') == 3:
# device looks like an IP address
self.serial = TelnetToSerial(device, user, password, read_timeout=10)
else:
import serial
delayed = False
for attempt in range(wait + 1):
try:
self.serial = serial.Serial(device, baudrate=baudrate, interCharTimeout=1)
break
except (OSError, IOError): # Py2 and Py3 have different errors
if wait == 0:
continue
if attempt == 0:
sys.stdout.write('Waiting {} seconds for pyboard '.format(wait))
delayed = True
time.sleep(1)
sys.stdout.write('.')
sys.stdout.flush()
else:
if delayed:
print('')
raise PyboardError('failed to access ' + device)
if delayed:
print('')
def close(self):
self.serial.close()
def read_until(self, min_num_bytes, ending, timeout=10, data_consumer=None):
data = self.serial.read(min_num_bytes)
if data_consumer:
data_consumer(data)
timeout_count = 0
while True:
if data.endswith(ending):
break
elif self.serial.inWaiting() > 0:
new_data = self.serial.read(1)
data = data + new_data
if data_consumer:
data_consumer(new_data)
timeout_count = 0
else:
timeout_count += 1
if timeout is not None and timeout_count >= 100 * timeout:
break
time.sleep(0.01)
return data
def enter_raw_repl(self):
self.serial.write(b'\r\x03\x03') # ctrl-C twice: interrupt any running program
# flush input (without relying on serial.flushInput())
n = self.serial.inWaiting()
while n > 0:
self.serial.read(n)
n = self.serial.inWaiting()
self.serial.write(b'\r\x01') # ctrl-A: enter raw REPL
data = self.read_until(1, b'raw REPL; CTRL-B to exit\r\n>')
if not data.endswith(b'raw REPL; CTRL-B to exit\r\n>'):
print(data)
raise PyboardError('could not enter raw repl')
self.serial.write(b'\x04') # ctrl-D: soft reset
data = self.read_until(1, b'soft reboot\r\n')
if not data.endswith(b'soft reboot\r\n'):
print(data)
raise PyboardError('could not enter raw repl')
# By splitting this into 2 reads, it allows boot.py to print stuff,
# which will show up after the soft reboot and before the raw REPL.
data = self.read_until(1, b'raw REPL; CTRL-B to exit\r\n')
if not data.endswith(b'raw REPL; CTRL-B to exit\r\n'):
print(data)
raise PyboardError('could not enter raw repl')
def exit_raw_repl(self):
self.serial.write(b'\r\x02') # ctrl-B: enter friendly REPL
def follow(self, timeout, data_consumer=None):
# wait for normal output
data = self.read_until(1, b'\x04', timeout=timeout, data_consumer=data_consumer)
if not data.endswith(b'\x04'):
raise PyboardError('timeout waiting for first EOF reception')
data = data[:-1]
# wait for error output
data_err = self.read_until(1, b'\x04', timeout=timeout)
if not data_err.endswith(b'\x04'):
raise PyboardError('timeout waiting for second EOF reception')
data_err = data_err[:-1]
# return normal and error output
return data, data_err
def exec_raw_no_follow(self, command):
if isinstance(command, bytes):
command_bytes = command
else:
command_bytes = bytes(command, encoding='utf8')
# check we have a prompt
data = self.read_until(1, b'>')
if not data.endswith(b'>'):
raise PyboardError('could not enter raw repl')
# write command
for i in range(0, len(command_bytes), 256):
self.serial.write(command_bytes[i:min(i + 256, len(command_bytes))])
time.sleep(0.01)
self.serial.write(b'\x04')
# check if we could exec command
data = self.serial.read(2)
if data != b'OK':
raise PyboardError('could not exec command')
def exec_raw(self, command, timeout=10, data_consumer=None):
self.exec_raw_no_follow(command);
return self.follow(timeout, data_consumer)
def eval(self, expression):
ret = self.exec_('print({})'.format(expression))
ret = ret.strip()
return ret
def exec_(self, command):
ret, ret_err = self.exec_raw(command)
if ret_err:
raise PyboardError('exception', ret, ret_err)
return ret
def execfile(self, filename):
with open(filename, 'rb') as f:
pyfile = f.read()
return self.exec_(pyfile)
def get_time(self):
t = str(self.eval('pyb.RTC().datetime()'), encoding='utf8')[1:-1].split(', ')
return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])
# in Python2 exec is a keyword so one must use "exec_"
# but for Python3 we want to provide the nicer version "exec"
setattr(Pyboard, "exec", Pyboard.exec_)
def execfile(filename, device='/dev/ttyACM0', baudrate=115200, user='micro', password='python'):
pyb = Pyboard(device, baudrate, user, password)
pyb.enter_raw_repl()
output = pyb.execfile(filename)
stdout_write_bytes(output)
pyb.exit_raw_repl()
pyb.close()
def main():
import argparse
cmd_parser = argparse.ArgumentParser(description='Run scripts on the pyboard.')
cmd_parser.add_argument('--device', default='/dev/ttyACM0', help='the serial device or the IP address of the pyboard')
cmd_parser.add_argument('-b', '--baudrate', default=115200, help='the baud rate of the serial device')
cmd_parser.add_argument('-u', '--user', default='micro', help='the telnet login username')
cmd_parser.add_argument('-p', '--password', default='python', help='the telnet login password')
cmd_parser.add_argument('-c', '--command', help='program passed in as string')
cmd_parser.add_argument('-w', '--wait', default=0, type=int, help='seconds to wait for USB connected board to become available')
cmd_parser.add_argument('--follow', action='store_true', help='follow the output after running the scripts [default if no scripts given]')
cmd_parser.add_argument('files', nargs='*', help='input files')
args = cmd_parser.parse_args()
# open the connection to the pyboard
try:
pyb = Pyboard(args.device, args.baudrate, args.user, args.password, args.wait)
except PyboardError as er:
print(er)
sys.exit(1)
# run any command or file(s)
if args.command is not None or len(args.files):
# we must enter raw-REPL mode to execute commands
# this will do a soft-reset of the board
try:
pyb.enter_raw_repl()
except PyboardError as er:
print(er)
sys.exit(1)
def execbuffer(buf):
try:
ret, ret_err = pyb.exec_raw(buf, timeout=None, data_consumer=stdout_write_bytes)
except PyboardError as er:
print(er)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
if ret_err:
pyb.exit_raw_repl()
pyb.close()
stdout_write_bytes(ret_err)
sys.exit(1)
# run the command, if given
if args.command is not None:
execbuffer(args.command.encode('utf-8'))
# run any files
for filename in args.files:
with open(filename, 'rb') as f:
pyfile = f.read()
execbuffer(pyfile)
# exiting raw-REPL just drops to friendly-REPL mode
pyb.exit_raw_repl()
# if asked explicitly, or no files given, then follow the output
if args.follow or (args.command is None and len(args.files) == 0):
try:
ret, ret_err = pyb.follow(timeout=None, data_consumer=stdout_write_bytes)
except PyboardError as er:
print(er)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
if ret_err:
pyb.close()
stdout_write_bytes(ret_err)
sys.exit(1)
# close the connection to the pyboard
pyb.close()
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
orm['cms.CMSPlugin'].objects.filter(plugin_type='AuthorEntriesPlugin').update(plugin_type='AuthorsPlugin')
def backwards(self, orm):
orm['cms.CMSPlugin'].objects.filter(plugin_type='AuthorsPlugin').update(plugin_type='AuthorEntriesPlugin')
models = {
u'aldryn_blog.authorsplugin': {
'Meta': {'object_name': 'AuthorsPlugin', 'db_table': "u'cmsplugin_authorsplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'aldryn_blog.latestentriesplugin': {
'Meta': {'object_name': 'LatestEntriesPlugin', 'db_table': "u'cmsplugin_latestentriesplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'latest_entries': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['taggit.Tag']", 'symmetrical': 'False', 'blank': 'True'})
},
u'aldryn_blog.post': {
'Meta': {'ordering': "['-publication_start']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aldryn_blog_posts'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_visual': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'lead_in': ('djangocms_text_ckeditor.fields.HTMLField', [], {}),
'publication_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publication_start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', 'db_table': "u'cmsplugin_placeholderreference'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['cms', 'aldryn_blog']
symmetrical = True
|
|
"""
Gaussian Mixture Models
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
#
import numpy as np
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils.extmath import logsum
from .. import cluster
# FIXME this lacks a proper docstring
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: Modifies inplace the array
"""
A += np.finfo(float).eps
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
def lmvnpdf(obs, means, covars, cvtype='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
obs : array_like, shape (O, D)
List of D-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (C, D)
List of D-dimensional mean vectors for C Gaussians. Each row
corresponds to a single mean vector.
covars : array_like
List of C covariance parameters for each Gaussian. The shape
depends on `cvtype`:
(C,) if 'spherical',
(D, D) if 'tied',
(C, D) if 'diag',
(C, D, D) if 'full'
cvtype : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (O, C)
Array containing the log probabilities of each data point in
`obs` under each of the C multivariate Gaussian distributions.
"""
lmvnpdf_dict = {'spherical': _lmvnpdfspherical,
'tied': _lmvnpdftied,
'diag': _lmvnpdfdiag,
'full': _lmvnpdffull}
return lmvnpdf_dict[cvtype](obs, means, covars)
def sample_gaussian(mean, covar, cvtype='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `cvtype`:
scalar if 'spherical',
(D) if 'diag',
(D, D) if 'tied', or 'full'
cvtype : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
obs : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if cvtype == 'spherical':
rand *= np.sqrt(covar)
elif cvtype == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
from scipy import linalg
U, s, V = linalg.svd(covar)
sqrtS = np.diag(np.sqrt(s))
sqrt_covar = np.dot(U, np.dot(sqrtS, V))
rand = np.dot(sqrt_covar, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
cvtype : string (read-only), optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
rng : numpy.random object, optional
Must support the full numpy random number generator API.
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
Attributes
----------
cvtype : string (read-only)
String describing the type of covariance parameters used by
the GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_states : int (read-only)
Number of mixture components.
weights : array, shape (`n_states`,)
Mixing weights for each mixture component.
means : array, shape (`n_states`, `n_features`)
Mean parameters for each mixture component.
covars : array
Covariance parameters for each mixture component. The shape
depends on `cvtype`:
(`n_states`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
Methods
-------
decode(X)
Find most likely mixture components for each point in `X`.
eval(X)
Compute the log likelihood of `X` under the model and the
posterior distribution over mixture components.
fit(X)
Estimate model parameters from `X` using the EM algorithm.
predict(X)
Like decode, find most likely mixtures components for each
observation in `X`.
rvs(n=1, random_state=None)
Generate `n` samples from the model.
score(X)
Compute the log likelihood of `X` under the model.
See Also
--------
DPGMM : Ininite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs)
GMM(cvtype='diag', n_components=2)
>>> np.round(g.weights, 2)
array([ 0.75, 0.25])
>>> np.round(g.means, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]])
array([1, 1, 0, 0])
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]])
GMM(cvtype='diag', n_components=2)
>>> np.round(g.weights, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, cvtype='diag', random_state=None,
thresh=1e-2, min_covar=1e-3):
self.n_components = n_components
self._cvtype = cvtype
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
if not cvtype in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad cvtype: '+str(cvtype))
self.weights = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
# Read-only properties.
@property
def cvtype(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._cvtype
def _get_covars(self):
"""Return covars as a full matrix."""
if self.cvtype == 'full':
return self._covars
elif self.cvtype == 'diag':
return [np.diag(cov) for cov in self._covars]
elif self.cvtype == 'tied':
return [self._covars] * self.n_components
elif self.cvtype == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars]
def _set_covars(self, covars):
covars = np.asanyarray(covars)
_validate_covars(covars, self._cvtype, self.n_components, self.n_features)
self._covars = covars
covars = property(_get_covars, _set_covars)
def _get_means(self):
"""Mean parameters for each mixture component."""
return self._means
def _set_means(self, means):
means = np.asarray(means)
if hasattr(self, 'n_features') and \
means.shape != (self.n_components, self.n_features):
raise ValueError('means must have shape (n_components, n_features)')
self._means = means.copy()
self.n_features = self._means.shape[1]
means = property(_get_means, _set_means)
def __repr__(self):
return "GMM(cvtype='%s', n_components=%s)"%(self._cvtype, self.n_components)
def _get_weights(self):
"""Mixing weights for each mixture component."""
return np.exp(self._log_weights)
def _set_weights(self, weights):
if len(weights) != self.n_components:
raise ValueError('weights must have length n_components')
if not np.allclose(np.sum(weights), 1.0):
raise ValueError('weights must sum to 1.0')
self._log_weights = np.log(np.asarray(weights).copy())
weights = property(_get_weights, _set_weights)
def eval(self, obs, return_log=False):
"""Evaluate the model on data
Compute the log probability of `obs` under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of `obs`.
Parameters
----------
obs: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
return_log: boolean, optional
If True, the posteriors returned are log-probabilities
Returns
-------
logprob: array_like, shape (n_samples,)
Log probabilities of each data point in `obs`
posteriors: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
obs = np.asanyarray(obs)
lpr = (lmvnpdf(obs, self._means, self._covars, self._cvtype)
+ self._log_weights)
logprob = logsum(lpr, axis=1)
posteriors = np.exp(lpr - logprob[:, np.newaxis])
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in `obs`
"""
# We use return_log=True to avoid a useless exponentiation
logprob, _ = self.eval(obs, return_log=True)
return logprob
def decode(self, obs):
"""Find most likely mixture components for each point in `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprobs : array_like, shape (n_samples,)
Log probability of each point in `obs` under the model.
components : array_like, shape (n_samples,)
Index of the most likelihod mixture components for each observation
"""
logprob, posteriors = self.eval(obs)
return logprob, posteriors.argmax(axis=1)
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, components = self.decode(X)
return components
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, posteriors = self.eval(X)
return posteriors
def rvs(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
obs : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_pdf = self.weights
weight_cdf = np.cumsum(weight_pdf)
obs = np.empty((n_samples, self.n_features))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in xrange(self.n_components):
# occurrences of current component in obs
comp_in_obs = (comp == comps)
# number of those occurrences
num_comp_in_obs = comp_in_obs.sum()
if num_comp_in_obs > 0:
if self._cvtype == 'tied':
cv = self._covars
else:
cv = self._covars[comp]
obs[comp_in_obs] = sample_gaussian(
self._means[comp], cv, self._cvtype, num_comp_in_obs,
random_state=random_state
).T
return obs
def fit(self, X, n_iter=10, thresh=1e-2, params='wmc',
init_params='wmc'):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string ''. Likewise, if you
would like just to do an initialization, call this method with
n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
n_iter : int, optional
Number of EM iterations to perform.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
"""
## initialization step
X = np.asanyarray(X)
if hasattr(self, 'n_features') and self.n_features != X.shape[1]:
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (X.shape[1], self.n_features))
self.n_features = X.shape[1]
if 'm' in init_params:
self._means = cluster.KMeans(
k=self.n_components).fit(X).cluster_centers_
elif not hasattr(self, 'means'):
self._means = np.zeros((self.n_components, self.n_features))
if 'w' in init_params or not hasattr(self, 'weights'):
self.weights = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in init_params:
cv = np.cov(X.T)
if not cv.shape:
cv.shape = (1, 1)
self._covars = _distribute_covar_matrix_to_match_cvtype(
cv, self._cvtype, self.n_components)
elif not hasattr(self, 'covars'):
self.covars = _distribute_covar_matrix_to_match_cvtype(
np.eye(self.n_features), self.cvtype, self.n_components)
# EM algorithm
logprob = []
# reset self.converged_ to False
self.converged_ = False
for i in xrange(n_iter):
# Expectation step
curr_logprob, posteriors = self.eval(X)
logprob.append(curr_logprob.sum())
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, posteriors, params, self.min_covar)
return self
def _do_mstep(self, X, posteriors, params, min_covar=0):
w = posteriors.sum(axis=0)
avg_obs = np.dot(posteriors.T, X)
norm = 1.0 / (w[:, np.newaxis] + 10*np.finfo(np.float).eps)
if 'w' in params:
self._log_weights = np.log(w / (w.sum() + 10*np.finfo(np.float).eps)
+ np.finfo(np.float).eps)
if 'm' in params:
self._means = avg_obs * norm
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self._cvtype]
self._covars = covar_mstep_func(self, X, posteriors,
avg_obs, norm, min_covar)
return w
##
## some helper routines
##
def _lmvnpdfdiag(obs, means=0.0, covars=1.0):
n_obs, n_dim = obs.shape
# (x-y).T A (x-y) = x.T A x - 2x.T A y + y.T A y
#lpr = -0.5 * (np.tile((np.sum((means**2) / covars, 1)
# + np.sum(np.log(covars), 1))[np.newaxis,:], (n_obs,1))
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(obs, (means / covars).T)
+ np.dot(obs ** 2, (1.0 / covars).T))
return lpr
def _lmvnpdfspherical(obs, means=0.0, covars=1.0):
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
return _lmvnpdfdiag(obs, means, np.tile(cv, (1, obs.shape[-1])))
def _lmvnpdftied(obs, means, covars):
from scipy import linalg
n_obs, n_dim = obs.shape
# (x-y).T A (x-y) = x.T A x - 2x.T A y + y.T A y
icv = linalg.pinv(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars)+0.1)
+ np.sum(obs * np.dot(obs, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(obs, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _lmvnpdffull(obs, means, covars):
"""
Log probability for full covariance matrices.
WARNING: In certain cases, this function will modify in-place
some of the covariance matrices
"""
from scipy import linalg
import itertools
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_obs, n_dim = obs.shape
nmix = len(means)
log_prob = np.empty((n_obs, nmix))
for c, (mu, cv) in enumerate(itertools.izip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv[:] = 10 * np.eye(cv.shape[0])
cv_chol = cv
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (obs - mu).T, lower=True).T
log_prob[:, c] = -.5 * (np.sum(cv_sol ** 2, axis=1) + \
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, cvtype, nmix, n_dim):
from scipy import linalg
if cvtype == 'spherical':
if len(covars) != nmix:
raise ValueError("'spherical' covars must have length nmix")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif cvtype == 'tied':
if covars.shape != (n_dim, n_dim):
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif cvtype == 'diag':
if covars.shape != (nmix, n_dim):
raise ValueError("'diag' covars must have shape (nmix, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif cvtype == 'full':
if covars.shape != (nmix, n_dim, n_dim):
raise ValueError("'full' covars must have shape "
"(nmix, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
def _distribute_covar_matrix_to_match_cvtype(tiedcv, cvtype, n_components):
if cvtype == 'spherical':
cv = np.tile(np.diag(tiedcv).mean(), n_components)
elif cvtype == 'tied':
cv = tiedcv
elif cvtype == 'diag':
cv = np.tile(np.diag(tiedcv), (n_components, 1))
elif cvtype == 'full':
cv = np.tile(tiedcv, (n_components, 1, 1))
else:
raise (ValueError,
"cvtype must be one of 'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, obs, posteriors, avg_obs, norm, min_covar):
# For column vectors:
# covars_c = average((obs(t) - means_c) (obs(t) - means_c).T,
# weights_c)
# (obs(t) - means_c) (obs(t) - means_c).T
# = obs(t) obs(t).T - 2 obs(t) means_c.T + means_c means_c.T
#
# But everything here is a row vector, so all of the
# above needs to be transposed.
avg_obs2 = np.dot(posteriors.T, obs * obs) * norm
avg_means2 = gmm._means ** 2
avg_obs_means = gmm._means * avg_obs * norm
return avg_obs2 - 2 * avg_obs_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
return _covar_mstep_diag(*args).mean(axis=1)
def _covar_mstep_full(gmm, obs, posteriors, avg_obs, norm, min_covar):
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
cv = np.empty((gmm.n_components, gmm.n_features, gmm.n_features))
for c in xrange(gmm.n_components):
post = posteriors[:, c]
avg_cv = np.dot(post * obs.T, obs) / (post.sum() +
10*np.finfo(np.float).eps)
mu = gmm._means[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu)
+ min_covar * np.eye(gmm.n_features))
return cv
def _covar_mstep_tied2(*args):
return _covar_mstep_full(*args).mean(axis=0)
def _covar_mstep_tied(gmm, obs, posteriors, avg_obs, norm, min_covar):
print "THIS IS BROKEN"
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
avg_obs2 = np.dot(obs.T, obs)
avg_means2 = np.dot(gmm._means.T, gmm._means)
return (avg_obs2 - avg_means2 + min_covar * np.eye(gmm.n_features))
def _covar_mstep_slow(gmm, obs, posteriors, avg_obs, norm, min_covar):
w = posteriors.sum(axis=0)
covars = np.zeros(gmm._covars.shape)
for c in xrange(gmm.n_components):
mu = gmm._means[c]
#cv = np.dot(mu.T, mu)
avg_obs2 = np.zeros((gmm.n_features, gmm.n_features))
for t, o in enumerate(obs):
avg_obs2 += posteriors[t, c] * np.outer(o, o)
cv = (avg_obs2 / w[c]
- 2 * np.outer(avg_obs[c] / w[c], mu)
+ np.outer(mu, mu)
+ min_covar * np.eye(gmm.n_features))
if gmm.cvtype == 'spherical':
covars[c] = np.diag(cv).mean()
elif gmm.cvtype == 'diag':
covars[c] = np.diag(cv)
elif gmm.cvtype == 'full':
covars[c] = cv
elif gmm.cvtype == 'tied':
covars += cv / gmm.n_components
return covars
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
#'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
'tied': _covar_mstep_slow,
}
|
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from lifetimes.utils import calculate_alive_path, expected_cumulative_transactions
from scipy import stats
__all__ = [
"plot_period_transactions",
"plot_calibration_purchases_vs_holdout_purchases",
"plot_frequency_recency_matrix",
"plot_probability_alive_matrix",
"plot_expected_repeat_purchases",
"plot_history_alive",
"plot_cumulative_transactions",
"plot_incremental_transactions",
"plot_transaction_rate_heterogeneity",
"plot_dropout_rate_heterogeneity",
]
def coalesce(*args):
return next(s for s in args if s is not None)
def plot_period_transactions(
model,
max_frequency=7,
title="Frequency of Repeat Transactions",
xlabel="Number of Calibration Period Transactions",
ylabel="Customers",
**kwargs
):
"""
Plot a figure with period actual and predicted transactions.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
max_frequency: int, optional
The maximum frequency to plot.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
labels = kwargs.pop("label", ["Actual", "Model"])
n = model.data.shape[0]
simulated_data = model.generate_new_data(size=n)
model_counts = pd.DataFrame(model.data["frequency"].value_counts().sort_index().iloc[:max_frequency])
simulated_counts = pd.DataFrame(simulated_data["frequency"].value_counts().sort_index().iloc[:max_frequency])
combined_counts = model_counts.merge(simulated_counts, how="outer", left_index=True, right_index=True).fillna(0)
combined_counts.columns = labels
ax = combined_counts.plot(kind="bar", **kwargs)
plt.legend()
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
return ax
def plot_calibration_purchases_vs_holdout_purchases(
model, calibration_holdout_matrix, kind="frequency_cal", n=7, **kwargs
):
"""
Plot calibration purchases vs holdout.
This currently relies too much on the lifetimes.util calibration_and_holdout_data function.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
calibration_holdout_matrix: pandas DataFrame
DataFrame from calibration_and_holdout_data function.
kind: str, optional
x-axis :"frequency_cal". Purchases in calibration period,
"recency_cal". Age of customer at last purchase,
"T_cal". Age of customer at the end of calibration period,
"time_since_last_purchase". Time since user made last purchase
n: int, optional
Number of ticks on the x axis
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
x_labels = {
"frequency_cal": "Purchases in calibration period",
"recency_cal": "Age of customer at last purchase",
"T_cal": "Age of customer at the end of calibration period",
"time_since_last_purchase": "Time since user made last purchase",
}
summary = calibration_holdout_matrix.copy()
duration_holdout = summary.iloc[0]["duration_holdout"]
summary["model_predictions"] = model.conditional_expected_number_of_purchases_up_to_time(
duration_holdout, summary["frequency_cal"], summary["recency_cal"], summary["T_cal"])
if kind == "time_since_last_purchase":
summary["time_since_last_purchase"] = summary["T_cal"] - summary["recency_cal"]
ax = (
summary.groupby(["time_since_last_purchase"])[["frequency_holdout", "model_predictions"]]
.mean()
.iloc[:n]
.plot(**kwargs)
)
else:
ax = summary.groupby(kind)[["frequency_holdout", "model_predictions"]].mean().iloc[:n].plot(**kwargs)
plt.title("Actual Purchases in Holdout Period vs Predicted Purchases")
plt.xlabel(x_labels[kind])
plt.ylabel("Average of Purchases in Holdout Period")
plt.legend()
return ax
def plot_frequency_recency_matrix(
model,
T=1,
max_frequency=None,
max_recency=None,
title=None,
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
**kwargs
):
"""
Plot recency frequecy matrix as heatmap.
Plot a figure of expected transactions in T next units of time by a customer's frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
T: fload, optional
Next units of time to make predictions for
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the customer.
Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if max_frequency is None:
max_frequency = int(model.data["frequency"].max())
if max_recency is None:
max_recency = int(model.data["T"].max())
Z = np.zeros((max_recency + 1, max_frequency + 1))
for i, recency in enumerate(np.arange(max_recency + 1)):
for j, frequency in enumerate(np.arange(max_frequency + 1)):
Z[i, j] = model.conditional_expected_number_of_purchases_up_to_time(T, frequency, recency, max_recency)
interpolation = kwargs.pop("interpolation", "none")
ax = plt.subplot(111)
pcm = ax.imshow(Z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
title = (
"Expected Number of Future Purchases for {} Unit{} of Time,".format(T, "s"[T == 1 :])
+ "\nby Frequency and Recency of a Customer"
)
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
plt.colorbar(pcm, ax=ax)
return ax
def plot_probability_alive_matrix(
model,
max_frequency=None,
max_recency=None,
title="Probability Customer is Alive,\nby Frequency and Recency of a Customer",
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
**kwargs
):
"""
Plot probability alive matrix as heatmap.
Plot a figure of the probability a customer is alive based on their
frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the customer.
Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
z = model.conditional_probability_alive_matrix(max_frequency, max_recency)
interpolation = kwargs.pop("interpolation", "none")
ax = plt.subplot(111)
pcm = ax.imshow(z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
plt.colorbar(pcm, ax=ax)
return ax
def plot_expected_repeat_purchases(
model,
title="Expected Number of Repeat Purchases per Customer",
xlabel="Time Since First Purchase",
ax=None,
label=None,
**kwargs
):
"""
Plot expected repeat purchases on calibration period .
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
max_frequency: int, optional
The maximum frequency to plot.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ax: matplotlib.AxesSubplot, optional
Using user axes
label: str, optional
Label for plot.
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
if plt.matplotlib.__version__ >= "1.5":
color_cycle = ax._get_lines.prop_cycler
color = coalesce(kwargs.pop("c", None), kwargs.pop("color", None), next(color_cycle)["color"])
else:
color_cycle = ax._get_lines.color_cycle
color = coalesce(kwargs.pop("c", None), kwargs.pop("color", None), next(color_cycle))
max_T = model.data["T"].max()
times = np.linspace(0, max_T, 100)
ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, label=label, **kwargs)
times = np.linspace(max_T, 1.5 * max_T, 100)
ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, ls="--", **kwargs)
plt.title(title)
plt.xlabel(xlabel)
plt.legend(loc="lower right")
return ax
def plot_history_alive(model, t, transactions, datetime_col, freq="D", start_date=None, ax=None, **kwargs):
"""
Draw a graph showing the probability of being alive for a customer in time.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
t: int
the number of time units since the birth we want to draw the p_alive
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in the transactions that denotes the datetime the purchase was made
freq: str, optional
Default 'D' for days. Other examples= 'W' for weekly
start_date: datetime, optional
Limit xaxis to start date
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if start_date is None:
start_date = min(transactions[datetime_col])
if ax is None:
ax = plt.subplot(111)
# Get purchasing history of user
customer_history = transactions[[datetime_col]].copy()
customer_history.index = pd.DatetimeIndex(customer_history[datetime_col])
# Add transactions column
customer_history["transactions"] = 1
customer_history = customer_history.resample(freq).sum()
# plot alive_path
path = calculate_alive_path(model, transactions, datetime_col, t, freq)
path_dates = pd.date_range(start=min(transactions[datetime_col]), periods=len(path), freq=freq)
plt.plot(path_dates, path, "-", label="P_alive")
# plot buying dates
payment_dates = customer_history[customer_history["transactions"] >= 1].index
plt.vlines(payment_dates.values, ymin=0, ymax=1, colors="r", linestyles="dashed", label="purchases")
plt.ylim(0, 1.0)
plt.yticks(np.arange(0, 1.1, 0.1))
plt.xlim(start_date, path_dates[-1])
plt.legend(loc=3)
plt.ylabel("P_alive")
plt.title("History of P_alive")
return ax
def plot_cumulative_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
t_cal,
datetime_format=None,
freq="D",
set_index_date=False,
title="Tracking Cumulative Transactions",
xlabel="day",
ylabel="Cumulative Transactions",
ax=None,
**kwargs
):
"""
Plot a figure of the predicted and actual cumulative transactions of users.
Parameters
----------
model: lifetimes model
A fitted lifetimes model
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in transactions that denotes the datetime the purchase was made.
customer_id_col: str
The column in transactions that denotes the customer_id
t: float
The number of time units since the begining of
data for which we want to calculate cumulative transactions
t_cal: float
A marker used to indicate where the vertical line for plotting should be.
datetime_format: str, optional
A string that represents the timestamp format. Useful if Pandas
can't understand the provided format.
freq: str, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc.
Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
When True set date as Pandas DataFrame index, default False - number of time units
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the pandas.DataFrame.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
df_cum_transactions = expected_cumulative_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
datetime_format=datetime_format,
freq=freq,
set_index_date=set_index_date,
)
ax = df_cum_transactions.plot(ax=ax, title=title, **kwargs)
if set_index_date:
x_vline = df_cum_transactions.index[int(t_cal)]
xlabel = "date"
else:
x_vline = t_cal
ax.axvline(x=x_vline, color="r", linestyle="--")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_incremental_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
t_cal,
datetime_format=None,
freq="D",
set_index_date=False,
title="Tracking Daily Transactions",
xlabel="day",
ylabel="Transactions",
ax=None,
**kwargs
):
"""
Plot a figure of the predicted and actual incremental transactions of users.
Parameters
----------
model: lifetimes model
A fitted lifetimes model
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in transactions that denotes the datetime the purchase was made.
customer_id_col: str
The column in transactions that denotes the customer_id
t: float
The number of time units since the begining of
data for which we want to calculate cumulative transactions
t_cal: float
A marker used to indicate where the vertical line for plotting should be.
datetime_format: str, optional
A string that represents the timestamp format. Useful if Pandas
can't understand the provided format.
freq: str, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc.
Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
When True set date as Pandas DataFrame index, default False - number of time units
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the pandas.DataFrame.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
df_cum_transactions = expected_cumulative_transactions(
model,
transactions,
datetime_col,
customer_id_col,
t,
datetime_format=datetime_format,
freq=freq,
set_index_date=set_index_date,
)
# get incremental from cumulative transactions
df_cum_transactions = df_cum_transactions.apply(lambda x: x - x.shift(1))
ax = df_cum_transactions.plot(ax=ax, title=title, **kwargs)
if set_index_date:
x_vline = df_cum_transactions.index[int(t_cal)]
xlabel = "date"
else:
x_vline = t_cal
ax.axvline(x=x_vline, color="r", linestyle="--")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_transaction_rate_heterogeneity(
model,
suptitle="Heterogeneity in Transaction Rate",
xlabel="Transaction Rate",
ylabel="Density",
suptitle_fontsize=14,
**kwargs
):
"""
Plot the estimated gamma distribution of lambda (customers' propensities to purchase).
Parameters
----------
model: lifetimes model
A fitted lifetimes model, for now only for BG/NBD
suptitle: str, optional
Figure suptitle
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
r, alpha = model._unload_params("r", "alpha")
rate_mean = r / alpha
rate_var = r / alpha ** 2
rv = stats.gamma(r, scale=1 / alpha)
lim = rv.ppf(0.99)
x = np.linspace(0, lim, 100)
fig, ax = plt.subplots(1)
fig.suptitle("Heterogeneity in Transaction Rate", fontsize=suptitle_fontsize, fontweight="bold")
ax.set_title("mean: {:.3f}, var: {:.3f}".format(rate_mean, rate_var))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.plot(x, rv.pdf(x), **kwargs)
return ax
def plot_dropout_rate_heterogeneity(
model,
suptitle="Heterogeneity in Dropout Probability",
xlabel="Dropout Probability p",
ylabel="Density",
suptitle_fontsize=14,
**kwargs
):
"""
Plot the estimated beta distribution of p.
p - (customers' probability of dropping out immediately after a transaction).
Parameters
----------
model: lifetimes model
A fitted lifetimes model, for now only for BG/NBD
suptitle: str, optional
Figure suptitle
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
a, b = model._unload_params("a", "b")
beta_mean = a / (a + b)
beta_var = a * b / ((a + b) ** 2) / (a + b + 1)
rv = stats.beta(a, b)
lim = rv.ppf(0.99)
x = np.linspace(0, lim, 100)
fig, ax = plt.subplots(1)
fig.suptitle(suptitle, fontsize=suptitle_fontsize, fontweight="bold")
ax.set_title("mean: {:.3f}, var: {:.3f}".format(beta_mean, beta_var))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.plot(x, rv.pdf(x), **kwargs)
return ax
def forceAspect(ax, aspect=1):
im = ax.get_images()
extent = im[0].get_extent()
ax.set_aspect(abs((extent[1] - extent[0]) / (extent[3] - extent[2])) / aspect)
|
|
from typing import List, Optional
from urllib.parse import urlparse
from zeus.exceptions import CommandError, HostError, InvalidPublicKey, UnknownRevision
from zeus.utils.functional import memoize
from zeus.utils import sentry, timezone
from .base import Vcs, RevisionResult, BufferParser
LOG_FORMAT = "%H\x01%an <%ae>\x01%at\x01%cn <%ce>\x01%ct\x01%P\x01%B\x02"
ORIGIN_PREFIX = "remotes/origin/"
class GitVcs(Vcs):
binary_path = "git"
def get_default_env(self) -> dict:
return {"GIT_SSH": self.ssh_connect_path}
def get_default_branch(self) -> str:
return "master"
def get_default_revision(self) -> str:
return "master"
@memoize
def remote_url(self) -> str:
username: Optional[str] = self.username
if self.url.startswith("ssh:") and not self.username:
username = "git"
if username and self.url.startswith(("ssh:", "http:", "https:")):
parsed = urlparse(self.url)
url = "%s://%s@%s/%s" % (
parsed.scheme,
parsed.username or username,
"{}{}".format(
parsed.hostname, ":%s" % (parsed.port,) if parsed.port else ""
),
parsed.path.lstrip("/"),
)
else:
url = self.url
return url
async def get_known_branches(self, commit_id=None) -> list:
""" List all branches or those related to the commit for this repo.
Either gets all the branches (if the commit_id is not specified) or then
the branches related to the given commit reference.
:param commit_id: A commit ID for fetching all related branches. If not
specified, returns all branch names for this repository.
:return: List of branches for the commit, or all branches for the repo.
"""
results = []
command_parameters = ["branch", "-a"]
if commit_id:
command_parameters.extend(["--contains", commit_id])
output = await self.run(command_parameters)
for result in output.splitlines():
# HACK(dcramer): is there a better way around removing the prefix?
result = result[2:].strip()
if result.startswith(ORIGIN_PREFIX):
result = result[len(ORIGIN_PREFIX) :]
if result == "HEAD":
continue
results.append(result)
return list(set(results))
async def run(self, cmd, **kwargs) -> str:
with sentry.Span("vcs.run-command", description=self.remote_url) as span:
span.set_data("command", " ".join(cmd))
span.set_data("backend", "git")
cmd = [self.binary_path] + cmd
try:
output = await super().run(cmd, **kwargs)
span.set_data("output_bytes", len(output))
return output
except CommandError as e:
stderr = e.stderr.decode("utf-8") if e.stderr else ""
if "unknown revision or path" in stderr:
# fatal: ambiguous argument '82f750e7a3b692e049b95ed66bf9149f56218733^..82f750e7a3b692e049b95ed66bf9149f56218733': unknown revision or path not in the working tree.\nUse '--' to separate paths from revisions, like this:\n'git <command> [<revision>...] -- [<file>...]'\n
raise UnknownRevision(
ref=stderr.split("fatal: ambiguous argument ")[-1].split(
"^", 1
)[0],
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr,
) from e
if "fatal: bad object" in stderr:
# bad object 5d953e751835a52472ca2e1906023435a71cb5e4\n
raise UnknownRevision(
ref=stderr.split("\n")[0].split("bad object ", 1)[-1],
cmd=e.cmd,
retcode=e.retcode,
stdout=e.stdout,
stderr=e.stderr,
) from e
if "Permission denied (publickey)" in stderr:
raise InvalidPublicKey(
cmd=e.cmd, retcode=e.retcode, stdout=e.stdout, stderr=e.stderr
) from e
if "fatal: protocol error: bad line length character" in stderr:
raise HostError(
cmd=e.cmd, retcode=e.retcode, stdout=e.stdout, stderr=e.stderr
) from e
raise
async def clone(self):
with sentry.Span("vcs.clone", description=self.remote_url) as par_span:
par_span.set_tag("backend", "git")
await self.run(["clone", "--mirror", self.remote_url, self.path])
async def update(self, allow_cleanup=False):
with sentry.Span("vcs.update", description=self.remote_url) as par_span:
par_span.set_tag("backend", "git")
par_span.set_tag("allow_cleanup", allow_cleanup)
if allow_cleanup:
await self.run(["fetch", "--all", "--force", "-p"])
else:
await self.run(["fetch", "--all", "--force"])
async def cleanup(self):
if self.exists():
await self.run(["remote", "prune", "origin"])
await self.run(["gc"])
async def log(
self,
parent=None,
branch=None,
author=None,
offset=0,
limit=100,
timeout=None,
update_if_exists=False,
) -> List[RevisionResult]:
""" Gets the commit log for the repository.
Each revision returned includes all the branches with which this commit
is associated. There will always be at least one associated branch.
See documentation for the base for general information on this function.
"""
# TODO(dcramer): we should make this streaming
cmd = ["log", "--date-order", "--pretty=format:%s" % (LOG_FORMAT,)]
if author:
cmd.append("--author=%s" % (author,))
if offset:
cmd.append("--skip=%d" % (offset,))
if limit:
cmd.append("--max-count=%d" % (limit,))
if parent and branch:
raise ValueError("Both parent and branch cannot be set")
if branch:
if branch == "!default":
branch = self.get_default_branch()
cmd.append(branch)
# TODO(dcramer): determine correct way to paginate results in git as
# combining --all with --parent causes issues
elif not parent:
cmd.append("--all")
if parent:
cmd.append(parent)
with sentry.Span("vcs.log", description=self.remote_url) as par_span:
par_span.set_tag("branch", branch)
par_span.set_tag("parent", parent)
par_span.set_tag("backend", "git")
for n in range(2):
try:
await self.ensure(update_if_exists=update_if_exists)
result = await self.run(cmd, timeout=timeout)
break
except CommandError as cmd_error:
err_msg = cmd_error.stderr
if err_msg and branch and branch in err_msg.decode("utf-8"):
# TODO: https://stackoverflow.com/questions/45096755/fatal-ambiguous-argument-origin-unknown-revision-or-path-not-in-the-working
default_error = ValueError(
'Unable to fetch commit log for branch "{0}".'.format(
branch
)
)
if not await self.run(["remote"]):
# assume we're in a broken state, and try to repair
# XXX: theory is this might happen when OOMKiller axes clone?
result = await self.run(
[
"symbolic-ref",
"refs/remotes/origin/HEAD",
"refs/remotes/origin/{}".format(
self.get_default_branch()
),
]
)
continue
import traceback
import logging
msg = traceback.format_exception(CommandError, cmd_error, None)
logging.warning(msg)
raise default_error from cmd_error
raise
# we use a list instead of a generator as we were always
# needing to coerce to a list anyways
results = []
for chunk in BufferParser(result, "\x02"):
(
sha,
author,
author_date,
committer,
committer_date,
parents,
message,
) = chunk.split("\x01")
# sha may have a trailing newline due to git log adding it
sha = sha.lstrip("\n")
parents: List[str] = [p for p in parents.split(" ") if p]
author_date = timezone.fromtimestamp(float(author_date))
committer_date = timezone.fromtimestamp(float(committer_date))
results.append(
RevisionResult(
sha=sha,
author=author,
committer=committer,
author_date=author_date,
committer_date=committer_date,
parents=parents,
message=message,
)
)
return results
async def export(self, sha, update_if_exists=False) -> str:
cmd = ["diff", "%s^..%s" % (sha, sha)]
with sentry.Span("vcs.export", self.remote_url) as par_span:
par_span.set_tag("sha", sha)
par_span.set_tag("backend", "git")
await self.ensure(update_if_exists=update_if_exists)
result = await self.run(cmd)
return result
async def show(self, sha, filename, update_if_exists=False) -> str:
cmd = ["show", "{}:{}".format(sha, filename)]
with sentry.Span("vcs.show", self.remote_url) as par_span:
par_span.set_tag("sha", sha)
par_span.set_tag("filename", filename)
par_span.set_tag("backend", "git")
await self.ensure(update_if_exists=update_if_exists)
result = await self.run(cmd)
return result
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from textwrap import dedent
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.what_changed import WhatChanged, Workspace
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.source_root import SourceRoot
from pants_test.tasks.test_base import ConsoleTaskTest
class BaseWhatChangedTest(ConsoleTaskTest):
@property
def alias_groups(self):
return BuildFileAliases.create(
targets={
'java_library': JavaLibrary,
'python_library': PythonLibrary,
'jar_library': JarLibrary,
'resources': Resources,
'java_thrift_library': JavaThriftLibrary,
'python_thrift_library': PythonThriftLibrary,
},
context_aware_object_factories={
'source_root': SourceRoot.factory,
},
objects={
'jar': JarDependency,
}
)
@classmethod
def task_type(cls):
return WhatChanged
def workspace(self, files=None, parent=None):
class MockWorkspace(Workspace):
def touched_files(_, p):
self.assertEqual(parent or 'HEAD', p)
return files or []
return MockWorkspace()
class WhatChangedTestBasic(BaseWhatChangedTest):
def test_nochanges(self):
self.assert_console_output(workspace=self.workspace())
def test_parent(self):
self.assert_console_output(args=['--test-parent=42'], workspace=self.workspace(parent='42'))
def test_files(self):
self.assert_console_output(
'a/b/c',
'd',
'e/f',
args=['--test-files'],
workspace=self.workspace(files=['a/b/c', 'd', 'e/f'])
)
class WhatChangedTest(BaseWhatChangedTest):
def setUp(self):
super(WhatChangedTest, self).setUp()
self.add_to_build_file('root', dedent("""
source_root('src/py', python_library, resources)
source_root('resources/a1', resources)
"""))
self.add_to_build_file('root/src/py/a', dedent("""
python_library(
name='alpha',
sources=['b/c', 'd'],
resources=['test.resources']
)
jar_library(
name='beta',
jars=[
jar(org='gamma', name='ray', rev='1.137.bruce_banner')
]
)
"""))
self.add_to_build_file('root/src/py/1', dedent("""
python_library(
name='numeric',
sources=['2']
)
"""))
self.add_to_build_file('root/src/thrift', dedent("""
java_thrift_library(
name='thrift',
sources=['a.thrift']
)
python_thrift_library(
name='py-thrift',
sources=['a.thrift']
)
"""))
self.add_to_build_file('root/resources/a', dedent("""
resources(
name='a_resources',
sources=['a.resources']
)
"""))
self.add_to_build_file('root/src/java/a', dedent("""
java_library(
name='a_java',
sources=['a.java'],
resources_targets=['root/resources/a:a_resources'],
)
"""))
self.add_to_build_file('root/3rdparty/BUILD.twitter', dedent("""
jar_library(
name='dummy',
jars=[
jar(org='foo', name='ray', rev='1.45')
])
"""))
self.add_to_build_file('root/3rdparty/BUILD', dedent("""
jar_library(
name='dummy1',
jars=[
jar(org='foo1', name='ray', rev='1.45')
])
"""))
# This is a directory that might confuse case insensitive file systems (on macs for example).
# It should not be treated as a BUILD file.
self.create_dir('root/scripts/a/build')
self.add_to_build_file('root/scripts/BUILD', dedent("""
java_library(
name='scripts',
sources=['a/build/scripts.java'],
)
"""))
def test_owned(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'])
)
def test_multiply_owned(self):
self.assert_console_output(
'root/src/thrift:thrift',
'root/src/thrift:py-thrift',
workspace=self.workspace(files=['root/src/thrift/a.thrift'])
)
def test_build(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/a:beta',
workspace=self.workspace(files=['root/src/py/a/BUILD'])
)
def test_resource_changed(self):
self.assert_console_output(
'root/src/py/a:alpha',
workspace=self.workspace(files=['root/src/py/a/test.resources'])
)
def test_resource_changed_for_java_lib(self):
self.assert_console_output(
'root/resources/a:a_resources',
workspace=self.workspace(files=['root/resources/a/a.resources'])
)
def test_build_sibling(self):
self.assert_console_output(
'root/3rdparty:dummy',
workspace=self.workspace(files=['root/3rdparty/BUILD.twitter'])
)
def test_resource_type_error(self):
self.add_to_build_file('root/resources/a1', dedent("""
java_library(
name='a1',
sources=['a1.test'],
resources=[1]
)
"""))
self.assert_console_raises(
Exception,
workspace=self.workspace(files=['root/resources/a1/a1.test'])
)
def test_build_directory(self):
# This should ensure that a directory named the same as build files does not cause an exception.
self.assert_console_output(
'root/scripts:scripts',
workspace=self.workspace(files=['root/scripts/a/build', 'root/scripts/a/build/scripts.java'])
)
|
|
"""Test shopping list component."""
import asyncio
from unittest.mock import patch
import pytest
from homeassistant.bootstrap import async_setup_component
from homeassistant.helpers import intent
from homeassistant.components.websocket_api.const import TYPE_RESULT
@pytest.fixture(autouse=True)
def mock_shopping_list_io():
"""Stub out the persistence."""
with patch('homeassistant.components.shopping_list.ShoppingData.save'), \
patch('homeassistant.components.shopping_list.'
'ShoppingData.async_load'):
yield
@asyncio.coroutine
def test_add_item(hass):
"""Test adding an item intent."""
yield from async_setup_component(hass, 'shopping_list', {})
response = yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
assert response.speech['plain']['speech'] == \
"I've added beer to your shopping list"
@asyncio.coroutine
def test_recent_items_intent(hass):
"""Test recent items."""
yield from async_setup_component(hass, 'shopping_list', {})
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'wine'}}
)
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'soda'}}
)
response = yield from intent.async_handle(
hass, 'test', 'HassShoppingListLastItems'
)
assert response.speech['plain']['speech'] == \
"These are the top 3 items on your shopping list: soda, wine, beer"
@asyncio.coroutine
def test_deprecated_api_get_all(hass, hass_client):
"""Test the API."""
yield from async_setup_component(hass, 'shopping_list', {})
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'wine'}}
)
client = yield from hass_client()
resp = yield from client.get('/api/shopping_list')
assert resp.status == 200
data = yield from resp.json()
assert len(data) == 2
assert data[0]['name'] == 'beer'
assert not data[0]['complete']
assert data[1]['name'] == 'wine'
assert not data[1]['complete']
async def test_ws_get_items(hass, hass_ws_client):
"""Test get shopping_list items websocket command."""
await async_setup_component(hass, 'shopping_list', {})
await intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
await intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'wine'}}
)
client = await hass_ws_client(hass)
await client.send_json({
'id': 5,
'type': 'shopping_list/items',
})
msg = await client.receive_json()
assert msg['success'] is True
assert msg['id'] == 5
assert msg['type'] == TYPE_RESULT
assert msg['success']
data = msg['result']
assert len(data) == 2
assert data[0]['name'] == 'beer'
assert not data[0]['complete']
assert data[1]['name'] == 'wine'
assert not data[1]['complete']
@asyncio.coroutine
def test_deprecated_api_update(hass, hass_client):
"""Test the API."""
yield from async_setup_component(hass, 'shopping_list', {})
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'wine'}}
)
beer_id = hass.data['shopping_list'].items[0]['id']
wine_id = hass.data['shopping_list'].items[1]['id']
client = yield from hass_client()
resp = yield from client.post(
'/api/shopping_list/item/{}'.format(beer_id), json={
'name': 'soda'
})
assert resp.status == 200
data = yield from resp.json()
assert data == {
'id': beer_id,
'name': 'soda',
'complete': False
}
resp = yield from client.post(
'/api/shopping_list/item/{}'.format(wine_id), json={
'complete': True
})
assert resp.status == 200
data = yield from resp.json()
assert data == {
'id': wine_id,
'name': 'wine',
'complete': True
}
beer, wine = hass.data['shopping_list'].items
assert beer == {
'id': beer_id,
'name': 'soda',
'complete': False
}
assert wine == {
'id': wine_id,
'name': 'wine',
'complete': True
}
async def test_ws_update_item(hass, hass_ws_client):
"""Test update shopping_list item websocket command."""
await async_setup_component(hass, 'shopping_list', {})
await intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
await intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'wine'}}
)
beer_id = hass.data['shopping_list'].items[0]['id']
wine_id = hass.data['shopping_list'].items[1]['id']
client = await hass_ws_client(hass)
await client.send_json({
'id': 5,
'type': 'shopping_list/items/update',
'item_id': beer_id,
'name': 'soda'
})
msg = await client.receive_json()
assert msg['success'] is True
data = msg['result']
assert data == {
'id': beer_id,
'name': 'soda',
'complete': False
}
await client.send_json({
'id': 6,
'type': 'shopping_list/items/update',
'item_id': wine_id,
'complete': True
})
msg = await client.receive_json()
assert msg['success'] is True
data = msg['result']
assert data == {
'id': wine_id,
'name': 'wine',
'complete': True
}
beer, wine = hass.data['shopping_list'].items
assert beer == {
'id': beer_id,
'name': 'soda',
'complete': False
}
assert wine == {
'id': wine_id,
'name': 'wine',
'complete': True
}
@asyncio.coroutine
def test_api_update_fails(hass, hass_client):
"""Test the API."""
yield from async_setup_component(hass, 'shopping_list', {})
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
client = yield from hass_client()
resp = yield from client.post(
'/api/shopping_list/non_existing', json={
'name': 'soda'
})
assert resp.status == 404
beer_id = hass.data['shopping_list'].items[0]['id']
resp = yield from client.post(
'/api/shopping_list/item/{}'.format(beer_id), json={
'name': 123,
})
assert resp.status == 400
async def test_ws_update_item_fail(hass, hass_ws_client):
"""Test failure of update shopping_list item websocket command."""
await async_setup_component(hass, 'shopping_list', {})
await intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
client = await hass_ws_client(hass)
await client.send_json({
'id': 5,
'type': 'shopping_list/items/update',
'item_id': 'non_existing',
'name': 'soda'
})
msg = await client.receive_json()
assert msg['success'] is False
data = msg['error']
assert data == {
'code': 'item_not_found',
'message': 'Item not found'
}
await client.send_json({
'id': 6,
'type': 'shopping_list/items/update',
'name': 123,
})
msg = await client.receive_json()
assert msg['success'] is False
@asyncio.coroutine
def test_deprecated_api_clear_completed(hass, hass_client):
"""Test the API."""
yield from async_setup_component(hass, 'shopping_list', {})
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
yield from intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'wine'}}
)
beer_id = hass.data['shopping_list'].items[0]['id']
wine_id = hass.data['shopping_list'].items[1]['id']
client = yield from hass_client()
# Mark beer as completed
resp = yield from client.post(
'/api/shopping_list/item/{}'.format(beer_id), json={
'complete': True
})
assert resp.status == 200
resp = yield from client.post('/api/shopping_list/clear_completed')
assert resp.status == 200
items = hass.data['shopping_list'].items
assert len(items) == 1
assert items[0] == {
'id': wine_id,
'name': 'wine',
'complete': False
}
async def test_ws_clear_items(hass, hass_ws_client):
"""Test clearing shopping_list items websocket command."""
await async_setup_component(hass, 'shopping_list', {})
await intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}
)
await intent.async_handle(
hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'wine'}}
)
beer_id = hass.data['shopping_list'].items[0]['id']
wine_id = hass.data['shopping_list'].items[1]['id']
client = await hass_ws_client(hass)
await client.send_json({
'id': 5,
'type': 'shopping_list/items/update',
'item_id': beer_id,
'complete': True
})
msg = await client.receive_json()
assert msg['success'] is True
await client.send_json({
'id': 6,
'type': 'shopping_list/items/clear'
})
msg = await client.receive_json()
assert msg['success'] is True
items = hass.data['shopping_list'].items
assert len(items) == 1
assert items[0] == {
'id': wine_id,
'name': 'wine',
'complete': False
}
@asyncio.coroutine
def test_deprecated_api_create(hass, hass_client):
"""Test the API."""
yield from async_setup_component(hass, 'shopping_list', {})
client = yield from hass_client()
resp = yield from client.post('/api/shopping_list/item', json={
'name': 'soda'
})
assert resp.status == 200
data = yield from resp.json()
assert data['name'] == 'soda'
assert data['complete'] is False
items = hass.data['shopping_list'].items
assert len(items) == 1
assert items[0]['name'] == 'soda'
assert items[0]['complete'] is False
@asyncio.coroutine
def test_deprecated_api_create_fail(hass, hass_client):
"""Test the API."""
yield from async_setup_component(hass, 'shopping_list', {})
client = yield from hass_client()
resp = yield from client.post('/api/shopping_list/item', json={
'name': 1234
})
assert resp.status == 400
assert len(hass.data['shopping_list'].items) == 0
async def test_ws_add_item(hass, hass_ws_client):
"""Test adding shopping_list item websocket command."""
await async_setup_component(hass, 'shopping_list', {})
client = await hass_ws_client(hass)
await client.send_json({
'id': 5,
'type': 'shopping_list/items/add',
'name': 'soda',
})
msg = await client.receive_json()
assert msg['success'] is True
data = msg['result']
assert data['name'] == 'soda'
assert data['complete'] is False
items = hass.data['shopping_list'].items
assert len(items) == 1
assert items[0]['name'] == 'soda'
assert items[0]['complete'] is False
async def test_ws_add_item_fail(hass, hass_ws_client):
"""Test adding shopping_list item failure websocket command."""
await async_setup_component(hass, 'shopping_list', {})
client = await hass_ws_client(hass)
await client.send_json({
'id': 5,
'type': 'shopping_list/items/add',
'name': 123,
})
msg = await client.receive_json()
assert msg['success'] is False
assert len(hass.data['shopping_list'].items) == 0
|
|
import sys
import json
import unicodedata
from PyQt5 import QtGui, QtCore, QtWidgets
class TextSearch:
#Removes accents and lowers the cases.
def normalizeText(self, text):
return ''.join((c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn')).lower()
def normalStringSearch(self, searchString, text):
return self.normalizeText(searchString) in self.normalizeText(text)
def normalPartSearch(self, searchString, parts):
for i in parts:
if self.normalStringSearch(searchString, " ".join(i)):
return True
return False
def normalSearch(self, searchString, song):
return self.normalStringSearch(searchString, song["title"]) or self.normalPartSearch(searchString, song["parts"])
def normalSearchAll(self, searchString, songList):
resultList = []
for i, s in enumerate(songList):
if self.normalSearch(searchString, s):
resultList.append(i)
return resultList
def filterSearch(self, searchString, songList, songListIndex):
resultList = []
for i, s in enumerate(songListIndex):
if self.normalSearch(searchString, songList[s]):
resultList.append(s)
return resultList
class TextFitter:
def __init__(self):
pass
def getWidth(self, text, size):
font = QtGui.QFont('Source Code Pro Light', size)
fm = QtGui.QFontMetrics(font)
textWidth = fm.width(text)
textHeight = fm.height()
return textWidth, textHeight
def marginWidth(self, widthMargin, width):
return width - widthMargin * 2
def marginHeight(self, heightMargin, height):
return height - heightMargin * 2
def getMaxTextSize(self, text, maxWidth, maxHeight, lineCount):
size = 200
width, height = self.getWidth(text, size)
while (width > maxWidth or (height * lineCount) > maxHeight) and size > 1:
size -= 1
width, height = self.getWidth(text, size)
return size
#TODO: Split too long rows into columns.
def fitText(self, qp, title, textLines, widthMargin, heightMargin, width, height, showTitle, splitColumns, toggleCenter, columnSplit):
centerX = widthMargin + self.marginWidth(widthMargin, width) / 2
centerY = heightMargin + self.marginHeight(heightMargin, height) / 2
lineCount = len(textLines)
longestText = ""
longestTextLen = 0
if showTitle:
lineCount += 2
longestText = title
longestTextLen = len(title)
for i in textLines:
if len(i) > longestTextLen:
longestText = i
longestTextLen = len(i)
textSize = self.getMaxTextSize(longestText, self.marginWidth(widthMargin, width), self.marginHeight(heightMargin, height), lineCount)
#textSizeFactor = 1
#while (True):
# tempTextSize = self.getMaxTextSize(longestText, self.marginWidth(widthMargin, width) / textSizeFactor, self.marginHeight(heightMargin, height), lineCount / textSizeFactor)
# if (textSize <= tempTextSize and columnSplit):
# textSize = tempTextSize
# textSizeFactor += 1
# else:
# break
font = QtGui.QFont('Source Code Pro Light', textSize)
fm = QtGui.QFontMetrics(font)
textWidth = fm.width(longestText)
textHeight = fm.height()
#print ("Text + column", textSize, textSizeFactor)
pen = QtGui.QPen()
pen.setWidth(3)
pen.setColor(QtCore.Qt.gray)
qp.setPen(pen)
textX = centerX - (textWidth / 2)
textY = heightMargin + textHeight
if (toggleCenter):
textY = centerY - (textHeight * len(textLines)) / 2
if showTitle:
path = QtGui.QPainterPath()
path.addText(textX, textY, font, title)
qp.drawPath(path)
textY += textHeight * 2
#textX -= textWidth
#topY = textY
#currentColumn = 0
qp.setBrush(QtGui.QBrush(QtGui.QColor(255, 255, 255, 200)))
for i, tl in enumerate(textLines):
# print (i, tl)
# if (i // (lineCount // textSizeFactor) != currentColumn):
# print ("Current", i // (lineCount // textSizeFactor), currentColumn, (lineCount // textSizeFactor), lineCount)
# currentColumn += 1
# textY = topY
# textX += textWidth
path = QtGui.QPainterPath()
path.addText(textX, textY, font, tl)
qp.drawPath(path)
textY += textHeight
def drawBackground(self, qp, width, height):
qp.drawRect(0, 0, width, height)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 25))
brush.setStyle(QtCore.Qt.BDiagPattern)
qp.setBrush(brush)
qp.drawRect(0, 0, width, height)
class ControlRoom(QtWidgets.QWidget):
def __init__(self, parent):
super(ControlRoom, self).__init__()
self.parent = parent
self.setWindowTitle("ControlRoom")
self.widthMargin = 50
self.heightMargin = 50
self.toggleFullscreen = False;
print ("Len: {}".format(len(self.parent.songList)))
print ("Len: {}".format(len(self.parent.songListIndex)))
print (self.parent.songList[0]["title"])
def keyPressEvent(self, e):
if not self.parent.searchMode and e.key() == QtCore.Qt.Key_F:
self.toggleFullscreen = not self.toggleFullscreen
self.fullscreen(self.toggleFullscreen)
else:
self.parent.keyPressEvent(e)
def fullscreen(self, state):
if state:
self.showFullScreen()
else:
self.showNormal()
def paintEvent(self, e):
qp = QtGui.QPainter(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing)
qp.setRenderHint(QtGui.QPainter.HighQualityAntialiasing)
brush = QtGui.QBrush(QtCore.Qt.SolidPattern)
qp.setBrush(brush)
qp.drawRect(0, 0, self.width(), self.height())
self.parent.textFitter.drawBackground(qp, self.width(), self.height())
#Draw the red corners of the control room.
qp.setPen(QtGui.QPen(QtGui.QColor(255, 0, 0, 50), 10, QtCore.Qt.SolidLine))
qp.drawLine(0, 100, 100, 0)
qp.drawLine(self.width() - 100, 0, self.width(), 100)
qp.drawLine(0, self.height() - 100, 100, self.height())
qp.drawLine(self.width() - 100, self.height(), self.width(), self.height() - 100)
titleList = []
for i in self.parent.songListIndex:
if i == self.parent.getSelectedSongIndex():
titleList.append("-" + self.parent.songList[i]["title"])
else:
titleList.append(self.parent.songList[i]["title"])
if len(self.parent.songListIndex) > 0:
self.parent.textFitter.fitText(qp, self.parent.getSelectedSong()["title"], titleList, self.widthMargin, self.heightMargin, self.width(), self.height(), True, True, False, True)
else:
pen = QtGui.QPen()
pen.setWidth(3)
pen.setColor(QtCore.Qt.gray)
qp.setPen(pen)
#Write search string:
font = QtGui.QFont('Source Code Pro Light', 24)
path = QtGui.QPainterPath()
path.addText(50, 50, font, self.parent.searchString)
qp.drawPath(path)
class AposSong(QtWidgets.QWidget):
def __init__(self):
super(AposSong, self).__init__()
self.textFitter = TextFitter()
self.textSearch = TextSearch()
self.widthMargin = 50
self.heightMargin = 50
self.selectedSongActive = 0
self.selectedSong = 0
self.selectedPart = 0
self.toggleFullscreen = False
self.showTitle = False
self.showSong = True
self.searchMode = False
self.searchModeFilter = False
self.showPlayList = False
self.toggleCenter = False
self.searchString = ""
self.w = None
#Dict that contains labels for song titles
self.songList = []
with open('chansons.json', 'r', encoding='utf8') as fp:
self.songList = json.load(fp)
self.songListIndex = []
self.songListIndex.extend(range(0, len(self.songList)))
self.songPlaylist = []
#self.songList.append({"title": "Hello World!", "parts": [['Line1', 'Line2'], ['Second part line1', 'Second part line2.', 'YAY! Line3']]})
#self.songList.append({"title": "Hello World!", "parts": [['Line1', 'Line2'], ['Second part line1', 'Second part line2.']]})
#self.songList.append({"title": "Praise!", "parts": [['Line1', 'Line2'], ['Second part line1', 'Second part line2.']]})
#with open('chansons.json', 'w', encoding='utf8') as fp:
# json.dump(self.songList, fp)
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 1280, 720)
self.setWindowTitle("AposSong")
self.show()
#f_db = QtGui.QFontDatabase()
#for family in f_db.families():
# print(family)
def keyPressEvent(self, e):
if self.searchMode or self.searchModeFilter:
if e.key() == QtCore.Qt.Key_Escape:
print ("Escaped to default mode.")
self.searchMode = False
self.searchModeFilter = False
self.searchString = ""
self.updateDialod()
elif e.key() == QtCore.Qt.Key_Enter or e.key() == QtCore.Qt.Key_Return:
print ("Searching now! {}".format(self.searchString))
resultList = []
if self.searchMode:
resultList = self.textSearch.normalSearchAll(self.searchString, self.songList)
else:
resultList = self.textSearch.filterSearch(self.searchString, self.songList, self.songListIndex)
print (resultList)
for i in resultList:
print (self.songList[i]["title"])
self.songListIndex = resultList
self.selectedSong = 0
self.updateDialod()
self.searchMode = False
self.searchModeFilter = False
elif e.key() == QtCore.Qt.Key_Backspace:
self.searchString = self.searchString[:-1]
print ("Remove last")
self.updateDialod()
else:
try:
char = "%c" % (e.key())
print ("Key pressed: {}, was: {}".format(char, e.key()))
self.searchString += char.lower()
self.updateDialod()
except OverflowError:
print ("Key pressed: error, was: {}".format(e.key()))
pass
else:
if e.key() == QtCore.Qt.Key_F:
self.toggleFullscreen = not self.toggleFullscreen
self.fullscreen(self.toggleFullscreen)
elif e.key() == QtCore.Qt.Key_D:
if self.w is None:
self.w = ControlRoom(self)
self.w.show()
else:
self.w.show()
elif e.key() == QtCore.Qt.Key_Right:
print ("Next part")
self.selectedPart = (self.selectedPart + 1) % len(self.getSelectedSong()["parts"])
self.update()
elif e.key() == QtCore.Qt.Key_Left:
print ("Previous part")
self.selectedPart = (self.selectedPart - 1) % len(self.getSelectedSong()["parts"])
self.update()
elif e.key() == QtCore.Qt.Key_Up:
self.selectedSong = (self.selectedSong - 1) % len(self.songListIndex)
print ("Previous Song {}".format(self.selectedSong))
self.update()
self.updateDialod()
elif e.key() == QtCore.Qt.Key_Down:
self.selectedSong = (self.selectedSong + 1) % len(self.songListIndex)
print ("Next Song {}".format(self.selectedSong))
self.update()
self.updateDialod()
elif e.key() == QtCore.Qt.Key_S:
self.searchMode = True
self.searchString = ""
print ("SearchMode: {}".format(self.searchMode))
self.updateDialod()
elif e.key() == QtCore.Qt.Key_X:
self.searchModeFilter = True
self.searchString = ""
print ("SearchModeFilter: {}".format(self.searchModeFilter))
self.updateDialod()
elif e.key() == QtCore.Qt.Key_H:
self.showSong = not self.showSong
self.update()
print ("Toggle Show song")
elif e.key() == QtCore.Qt.Key_C:
self.toggleCenter = not self.toggleCenter
self.update()
print ("Toggle Centering")
elif e.key() == QtCore.Qt.Key_A:
print ("New active song")
self.setActiveSong()
self.selectedPart = 0
self.update()
elif e.key() == QtCore.Qt.Key_Q:
if self.getSelectedSongIndex() in self.songPlaylist:
print (self.selectedSong, self.getSelectedSongIndex(), len(self.songPlaylist))
self.songPlaylist.remove(self.getSelectedSongIndex())
if self.selectedSong == len(self.songPlaylist):
self.selectedSong -= 1
print ("Removed song from playlist.")
if self.showPlayList:
self.updateDialod()
else:
self.songPlaylist.append(self.getSelectedSongIndex())
print ("Added song to playlist.")
if self.showPlayList:
self.updateDialod()
elif e.key() == QtCore.Qt.Key_W:
print ("Toggle playlist")
self.showPlayList = not self.showPlayList
self.selectedSong = 0
if self.showPlayList:
self.songListIndex = self.songPlaylist
self.updateDialod()
else:
self.songListIndex = []
self.songListIndex.extend(range(0, len(self.songList)))
self.updateDialod()
elif e.key() == QtCore.Qt.Key_T:
self.showTitle = not self.showTitle
self.update()
print ("Toggle Title!")
else:
for key in range(QtCore.Qt.Key_0, QtCore.Qt.Key_9 + 1):
if e.key() == key:
self.selectedPart = (key - QtCore.Qt.Key_0) % len(self.getSelectedSong()["parts"])
self.update()
def getSelectedSong(self):
return self.songList[self.getSelectedSongIndex()]
def getSelectedSongIndex(self):
return self.songListIndex[self.selectedSong]
def getSelectedSongActive(self):
return self.songList[self.getSelectedSongIndexActive()]
def getSelectedSongIndexActive(self):
return self.selectedSongActive
def setActiveSong(self):
self.selectedSongActive = self.songListIndex[self.selectedSong]
def fullscreen(self, state):
if state:
self.showFullScreen()
else:
self.showNormal()
def updateDialod(self):
if self.w is not None:
self.w.update()
def paintEvent(self, e):
#TODO: Center the text as a whole.
# Also, resize the text to fit the screen.
qp = QtGui.QPainter()
qp.begin(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing)
qp.setRenderHint(QtGui.QPainter.HighQualityAntialiasing)
brush = QtGui.QBrush(QtCore.Qt.SolidPattern)
qp.setBrush(brush)
self.textFitter.drawBackground(qp, self.width(), self.height())
if self.showSong:
self.textFitter.fitText(qp, self.getSelectedSongActive()["title"], self.getSelectedSongActive()["parts"][self.selectedPart], self.widthMargin, self.heightMargin, self.width(), self.height(), self.showTitle, False, self.toggleCenter, False)
qp.end()
def main():
app = QtWidgets.QApplication(sys.argv)
ex = AposSong()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from oslo_log import log as logging
import six
from cinder import context
from cinder.i18n import _LW
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vmax_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class EMCVMAXFCDriver(driver.FibreChannelDriver):
"""EMC FC Drivers for VMAX using SMI-S.
Version history:
1.0.0 - Initial driver
1.1.0 - Multiple pools and thick/thin provisioning,
performance enhancement.
2.0.0 - Add driver requirement functions
2.1.0 - Add consistency group functions
2.1.1 - Fixed issue with mismatched config (bug #1442376)
2.1.2 - Clean up failed clones (bug #1440154)
2.1.3 - Fixed a problem with FAST support (bug #1435069)
2.2.0 - Add manage/unmanage
2.2.1 - Support for SE 8.0.3
2.2.2 - Update Consistency Group
2.2.3 - Pool aware scheduler(multi-pool) support
2.2.4 - Create CG from CG snapshot
2.3.0 - Name change for MV and SG for FAST (bug #1515181)
- Fix for randomly choosing port group. (bug #1501919)
- get_short_host_name needs to be called in find_device_number
(bug #1520635)
- Proper error handling for invalid SLOs (bug #1512795)
- Extend Volume for VMAX3, SE8.1.0.3
https://blueprints.launchpad.net/cinder/+spec/vmax3-extend-volume
- Incorrect SG selected on an attach (#1515176)
- Cleanup Zoning (bug #1501938) NOTE: FC only
- Last volume in SG fix
- _remove_last_vol_and_delete_sg is not being called
for VMAX3 (bug #1520549)
- necessary updates for CG changes (#1534616)
- Changing PercentSynced to CopyState (bug #1517103)
- Getting iscsi ip from port in existing masking view
- Replacement of EMCGetTargetEndpoints api (bug #1512791)
- VMAX3 snapvx improvements (bug #1522821)
2.3.1 - VMAX2/VMAX3 iscsi multipath support (iscsi only)
2.3.2 - VMAX oversubscription Support (blueprint vmax-oversubscription)
2.3.3 - VMAX Driver - Live Migration for VMAX3 (bug #1587967)
2.3.4 - additional locking (bug #1630535)(bug #1660374)
2.3.5 - remove_and_reset_members fix
2.3.6 - copy state fix (bug #1660378)
"""
VERSION = "2.3.6"
def __init__(self, *args, **kwargs):
super(EMCVMAXFCDriver, self).__init__(*args, **kwargs)
self.common = emc_vmax_common.EMCVMAXCommon(
'FC',
self.VERSION,
configuration=self.configuration)
self.zonemanager_lookup_service = fczm_utils.create_lookup_service()
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
volpath = self.common.create_volume(volume)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
volpath = self.common.create_volume_from_snapshot(volume, snapshot)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
volpath = self.common.create_cloned_volume(volume, src_vref)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def delete_volume(self, volume):
"""Deletes an EMC volume."""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
volpath = self.common.create_snapshot(snapshot, volume)
model_update = {}
snapshot['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = snapshot['provider_location']
return model_update
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
self.common.delete_snapshot(snapshot, volume)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
}
}
"""
device_info = self.common.initialize_connection(
volume, connector)
device_number = device_info['hostlunid']
storage_system = device_info['storagesystem']
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector)
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': device_number,
'target_discovered': True,
'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone addition: %(data)s.",
{'data': data})
return data
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:param volume: the volume object
:param connector: the connector object
:returns: dict -- the target_wwns and initiator_target_map if the
zone is to be removed, otherwise empty
"""
data = {'driver_volume_type': 'fibre_channel',
'data': {}}
loc = volume['provider_location']
name = ast.literal_eval(loc)
storage_system = name['keybindings']['SystemName']
LOG.debug("Start FC detach process for volume: %(volume)s.",
{'volume': volume['name']})
mvInstanceName = self.common.get_masking_view_by_volume(
volume, connector)
if mvInstanceName is not None:
portGroupInstanceName = (
self.common.get_port_group_from_masking_view(
mvInstanceName))
initiatorGroupInstanceName = (
self.common.get_initiator_group_from_masking_view(
mvInstanceName))
LOG.debug("Found port group: %(portGroup)s "
"in masking view %(maskingView)s.",
{'portGroup': portGroupInstanceName,
'maskingView': mvInstanceName})
# Map must be populated before the terminate_connection
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector)
self.common.terminate_connection(volume, connector)
LOG.debug("Looking for masking views still associated with "
"Port Group %s.", portGroupInstanceName)
# check if the initiator group has been deleted
checkIgInstanceName = (
self.common.check_ig_instance_name(initiatorGroupInstanceName))
# if it has not been deleted, check for remaining masking views
if checkIgInstanceName is not None:
mvInstances = self._get_common_masking_views(
portGroupInstanceName, initiatorGroupInstanceName)
if len(mvInstances) > 0:
LOG.debug("Found %(numViews)lu MaskingViews.",
{'numViews': len(mvInstances)})
data = {'driver_volume_type': 'fibre_channel',
'data': {}}
else: # no masking views found
LOG.debug("No MaskingViews were found. Deleting zone.")
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data})
else: # The initiator group has been deleted
LOG.debug("Initiator Group has been deleted. Deleting zone.")
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data})
else:
LOG.warning(_LW("Volume %(volume)s is not in any masking view."),
{'volume': volume['name']})
return data
def _get_common_masking_views(
self, portGroupInstanceName, initiatorGroupInstanceName):
"""Check to see the existence of mv in list"""
mvInstances = []
mvInstancesByPG = self.common.get_masking_views_by_port_group(
portGroupInstanceName)
mvInstancesByIG = self.common.get_masking_views_by_initiator_group(
initiatorGroupInstanceName)
for mvInstanceByPG in mvInstancesByPG:
if mvInstanceByPG in mvInstancesByIG:
mvInstances.append(mvInstanceByPG)
return mvInstances
def _build_initiator_target_map(self, storage_system, volume, connector):
"""Build the target_wwns and the initiator target map."""
target_wwns = []
init_targ_map = {}
initiator_wwns = connector['wwpns']
if self.zonemanager_lookup_service:
fc_targets = self.common.get_target_wwns_from_masking_view(
storage_system, volume, connector)
mapping = (
self.zonemanager_lookup_service.
get_device_mapping_from_network(initiator_wwns, fc_targets))
for entry in mapping:
map_d = mapping[entry]
target_wwns.extend(map_d['target_port_wwn_list'])
for initiator in map_d['initiator_port_wwn_list']:
init_targ_map[initiator] = map_d['target_port_wwn_list']
else: # No lookup service, pre-zoned case.
target_wwns = self.common.get_target_wwns(storage_system,
connector)
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return list(set(target_wwns)), init_targ_map
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
:param refresh: boolean -- If True, run update the stats first.
:returns: dict -- the stats dict
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = self.common.update_volume_stats()
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
self._stats = data
def migrate_volume(self, ctxt, volume, host):
"""Migrate a volume from one Volume Backend to another.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param host: the host dict holding the relevant target(destination)
information
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
return self.common.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: Unused parameter.
:param host: the host dict holding the relevant
target(destination) information
:returns: boolean -- True if retype succeeded, False if error
"""
return self.common.retype(ctxt, volume, new_type, diff, host)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
self.common.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.common.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
return self.common.create_cgsnapshot(context, cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
return self.common.delete_cgsnapshot(context, cgsnapshot, snapshots)
def manage_existing(self, volume, external_ref):
"""Manages an existing VMAX Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
"""
return self.common.manage_existing(volume, external_ref)
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing VMAX volume to manage_existing.
:param self: reference to class
:param volume: the volume object including the volume_type_id
:param external_ref: reference to the existing volume
:returns: size of the volume in GB
"""
return self.common.manage_existing_get_size(volume, external_ref)
def unmanage(self, volume):
"""Export VMAX volume from Cinder.
Leave the volume intact on the backend array.
"""
return self.common.unmanage(volume)
def update_consistencygroup(self, context, group,
add_volumes, remove_volumes):
"""Updates LUNs in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
:param context: the context
:param group: the consistency group object to be created
:param volumes: volumes in the consistency group
:param cgsnapshot: the source consistency group snapshot
:param snapshots: snapshots of the source volumes
:param source_cg: the dictionary of a consistency group as source.
:param source_vols: a list of volume dictionaries in the source_cg.
"""
return self.common.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot, snapshots, source_cg,
source_vols)
|
|
from functools import partial
from ...external.qt.QtGui import (QWidget, QAction,
QToolButton, QIcon, QMessageBox)
from ...external.qt.QtCore import Qt
import matplotlib.cm as cm
from .data_viewer import DataViewer
from ... import core
from ...clients.image_client import ImageClient
from ..mouse_mode import (RectangleMode, CircleMode, PolyMode,
ContrastMode, ContourMode)
from ..glue_toolbar import GlueToolbar
from .mpl_widget import MplWidget
from ..ui.imagewidget import Ui_ImageWidget
from .. import glue_qt_resources # pylint: disable=W0611
from ..decorators import set_cursor
from ..qtutil import cmap2pixmap, select_rgb
WARN_THRESH = 10000000 # warn when contouring large images
class ImageWidget(DataViewer):
LABEL = "Image Viewer"
def __init__(self, data, parent=None):
super(ImageWidget, self).__init__(data, parent)
self.central_widget = MplWidget()
self.option_widget = QWidget()
self.setCentralWidget(self.central_widget)
self.ui = Ui_ImageWidget()
self.ui.setupUi(self.option_widget)
self.client = ImageClient(data,
self.central_widget.canvas.fig,
artist_container=self._container)
self._tweak_geometry()
self._create_actions()
self.make_toolbar()
self._connect()
self._init_widgets()
self.set_data(0)
self.set_orientation(0)
self.statusBar().setSizeGripEnabled(False)
self.setFocusPolicy(Qt.StrongFocus)
def _tweak_geometry(self):
self.central_widget.resize(600, 400)
self.resize(self.central_widget.size())
def _create_actions(self):
#pylint: disable=E1101
def act(name, cmap):
a = QAction(name, self)
a.triggered.connect(lambda *args: self.client.set_cmap(cmap))
pm = cmap2pixmap(cmap)
a.setIcon(QIcon(pm))
return a
self._cmaps = []
self._cmaps.append(act('Gray', cm.gray))
self._cmaps.append(act('Purple-Blue', cm.PuBu))
self._cmaps.append(act('Yellow-Green-Blue', cm.YlGnBu))
self._cmaps.append(act('Yellow-Orange-Red', cm.YlOrRd))
self._cmaps.append(act('Red-Purple', cm.RdPu))
self._cmaps.append(act('Blue-Green', cm.BuGn))
self._cmaps.append(act('Hot', cm.hot))
self._cmaps.append(act('Red-Blue', cm.RdBu))
self._cmaps.append(act('Red-Yellow-Blue', cm.RdYlBu))
self._cmaps.append(act('Purple-Orange', cm.PuOr))
self._cmaps.append(act('Purple-Green', cm.PRGn))
self._rgb_add = QAction('RGB', self)
self._rgb_add.triggered.connect(self._add_rgb)
def _add_rgb(self):
drgb = select_rgb(self._data, default=self.current_data)
if drgb is not None:
self.client.add_rgb_layer(*drgb)
def make_toolbar(self):
result = GlueToolbar(self.central_widget.canvas, self, name='Image')
for mode in self._mouse_modes():
result.add_mode(mode)
tb = QToolButton()
tb.setWhatsThis("Set color scale")
tb.setToolTip("Set color scale")
icon = QIcon(":icons/glue_rainbow.png")
tb.setIcon(icon)
tb.setPopupMode(QToolButton.InstantPopup)
tb.addActions(self._cmaps)
result.addWidget(tb)
result.addAction(self._rgb_add)
#connect viewport update buttons to client commands to
#allow resampling
cl = self.client
result.buttons['HOME'].triggered.connect(cl.check_update)
result.buttons['FORWARD'].triggered.connect(cl.check_update)
result.buttons['BACK'].triggered.connect(cl.check_update)
self.addToolBar(result)
return result
@set_cursor(Qt.WaitCursor)
def apply_roi(self, mode):
roi = mode.roi()
self.client.apply_roi(roi)
def _mouse_modes(self):
axes = self.client.axes
rect = RectangleMode(axes, roi_callback=self.apply_roi)
circ = CircleMode(axes, roi_callback=self.apply_roi)
poly = PolyMode(axes, roi_callback=self.apply_roi)
contrast = ContrastMode(axes, move_callback=self._set_norm)
contour = ContourMode(axes, release_callback=self._contour_roi)
return [rect, circ, poly, contour, contrast]
def _init_widgets(self):
self.ui.imageSlider.hide()
self.ui.sliceComboBox.hide()
self.ui.sliceComboBox.addItems(["xy", "xz", "yz"])
def add_data(self, data):
"""Private method to ingest new data into widget"""
self.client.add_layer(data)
self.add_data_to_combo(data)
self.set_data(self._data_index(data))
return True
def add_subset(self, subset):
self.client.add_scatter_layer(subset)
assert subset in self.client.artists
def _data_index(self, data):
combo = self.ui.displayDataCombo
for i in range(combo.count()):
if combo.itemData(i) is data:
return i
return None
def add_data_to_combo(self, data):
""" Add a data object to the combo box, if not already present
"""
if not self.client.can_image_data(data):
return
combo = self.ui.displayDataCombo
label = data.label
pos = combo.findText(label)
if pos == -1:
combo.addItem(label, userData=data)
assert combo.findText(label) >= 0
@property
def current_data(self):
if self.ui.displayDataCombo.count() == 0:
return
index = self.ui.displayDataCombo.currentIndex()
return self.ui.displayDataCombo.itemData(index)
def set_data(self, index):
if index is None:
return
if self.ui.displayDataCombo.count() == 0:
return
data = self.ui.displayDataCombo.itemData(index)
self.client.set_data(data)
self.ui.displayDataCombo.setCurrentIndex(index)
self.set_attribute_combo(data)
if not self.client.is_3D:
self.ui.imageSlider.hide()
self.ui.sliceComboBox.hide()
self.ui.orientationLabel.hide()
else:
self.ui.imageSlider.show()
self.ui.sliceComboBox.show()
self.ui.orientationLabel.show()
self.set_slider_range()
self._update_window_title()
def set_attribute(self, index):
combo = self.ui.attributeComboBox
component_id = combo.itemData(index)
self.client.set_attribute(component_id)
self.ui.attributeComboBox.setCurrentIndex(index)
self._update_window_title()
def set_attribute_combo(self, data):
""" Update attribute combo box to reflect components in data"""
combo = self.ui.attributeComboBox
combo.blockSignals(True)
combo.clear()
fields = data.visible_components
index = 0
for i, f in enumerate(fields):
combo.addItem(f.label, userData=f)
if f == self.client.display_attribute:
index = i
combo.blockSignals(False)
combo.setCurrentIndex(index)
self.set_attribute(index)
def set_slider(self, index):
self.client.slice_ind = index
self.ui.imageSlider.setValue(index)
def set_orientation(self, ori):
# ignore for 2D data (sometimes gets triggered when widgets
# switch state)
if not self.client.is_3D:
return
self.client.set_slice_ori(ori)
self.ui.sliceComboBox.setCurrentIndex(ori)
self.set_slider_range()
def set_slider_range(self):
self.ui.imageSlider.setRange(*self.client.slice_bounds())
def _connect(self):
ui = self.ui
ui.displayDataCombo.currentIndexChanged.connect(self.set_data)
ui.attributeComboBox.currentIndexChanged.connect(self.set_attribute)
ui.sliceComboBox.currentIndexChanged.connect(self.set_orientation)
ui.imageSlider.sliderMoved.connect(self.set_slider)
def register_to_hub(self, hub):
super(ImageWidget, self).register_to_hub(hub)
self.client.register_to_hub(hub)
dc_filt = lambda x: x.sender is self.client._data
layer_present_filter = lambda x: x.data in self.client.artists
hub.subscribe(self,
core.message.DataCollectionAddMessage,
handler=lambda x: self.add_data_to_combo(x.data),
filter=dc_filt)
hub.subscribe(self,
core.message.DataCollectionDeleteMessage,
handler=lambda x: self.remove_data_from_combo(x.data),
filter=dc_filt)
hub.subscribe(self,
core.message.DataUpdateMessage,
handler=lambda x: self._sync_data_labels()
)
hub.subscribe(self,
core.message.ComponentsChangedMessage,
handler=lambda x: self.set_attribute_combo(x.data),
filter=layer_present_filter)
def unregister(self, hub):
for obj in [self, self.client]:
hub.unsubscribe_all(obj)
def remove_data_from_combo(self, data):
""" Remvoe a data object from the combo box, if present """
combo = self.ui.displayDataCombo
pos = combo.findText(data.label)
if pos >= 0:
combo.removeItem(pos)
def _set_norm(self, mode):
""" Use the `ContrastMouseMode` to adjust the transfer function """
clip_lo, clip_hi = mode.get_clip_percentile()
stretch = mode.stretch
return self.client.set_norm(clip_lo=clip_lo, clip_hi=clip_hi,
stretch=stretch,
bias=mode.bias, contrast=mode.contrast)
@set_cursor(Qt.WaitCursor)
def _contour_roi(self, mode):
""" Callback for ContourMode. Set edit_subset as new ROI """
im = self.client.display_data
att = self.client.display_attribute
if im is None or att is None:
return
if im.size > WARN_THRESH and not self._confirm_large_image(im):
return
roi = mode.roi(im[att])
if roi:
self.client.apply_roi(roi)
def _update_window_title(self):
if self.client.display_data is None:
title = ''
else:
title = "%s - %s" % (self.client.display_data.label,
self.client.display_attribute.label)
self.setWindowTitle(title)
def _update_data_combo(self):
combo = self.ui.displayDataCombo
for i in range(combo.count()):
combo.setItemText(i, combo.itemData(i).label)
def _sync_data_labels(self):
self._update_window_title()
self._update_data_combo()
def __str__(self):
return "Image Widget"
def _confirm_large_image(self, data):
"""Ask user to confirm expensive contour operations
:rtype: bool. Whether the user wishes to continue
"""
warn_msg = ("WARNING: Image has %i pixels, and may render slowly."
" Continue?" % data.size)
title = "Contour large image?"
ok = QMessageBox.Ok
cancel = QMessageBox.Cancel
buttons = ok | cancel
result = QMessageBox.question(self, title, warn_msg,
buttons=buttons,
defaultButton=cancel)
return result == ok
def options_widget(self):
return self.option_widget
|
|
from __future__ import absolute_import
# TODO(dcramer): this heavily inspired by pytest-selenium, and it's possible
# we could simply inherit from the plugin at this point
import os
import pytest
import signal
from datetime import datetime
from django.conf import settings
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from six.moves.urllib.parse import quote, urlparse
# if we're not running in a PR, we kill the PERCY_TOKEN because its a push
# to a branch, and we dont want percy comparing things
# we do need to ensure its run on master so that changes get updated
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') == 'false' and os.environ.get('TRAVIS_BRANCH', 'master') != 'master':
os.environ.setdefault('PERCY_ENABLE', '0')
class Browser(object):
def __init__(self, driver, live_server, percy):
self.driver = driver
self.live_server_url = live_server.url
self.percy = percy
self.domain = urlparse(self.live_server_url).hostname
self._has_initialized_cookie_store = False
def __getattr__(self, attr):
return getattr(self.driver, attr)
def route(self, path, *args, **kwargs):
"""
Return the absolute URI for a given route in Sentry.
"""
return '{}/{}'.format(self.live_server_url, path.lstrip('/').format(
*args, **kwargs
))
def get(self, path, *args, **kwargs):
self.driver.get(self.route(path), *args, **kwargs)
return self
def post(self, path, *args, **kwargs):
self.driver.post(self.route(path), *args, **kwargs)
return self
def put(self, path, *args, **kwargs):
self.driver.put(self.route(path), *args, **kwargs)
return self
def delete(self, path, *args, **kwargs):
self.driver.delete(self.route(path), *args, **kwargs)
return self
def element(self, selector):
return self.driver.find_element_by_css_selector(selector)
def element_exists(self, selector):
try:
self.element(selector)
except NoSuchElementException:
return False
return True
def click(self, selector):
self.element(selector).click()
def wait_until(self, selector, timeout=3):
"""
Waits until ``selector`` is found in the browser, or until ``timeout``
is hit, whichever happens first.
"""
from selenium.webdriver.common.by import By
WebDriverWait(self.driver, timeout).until(
expected_conditions.presence_of_element_located(
(By.CSS_SELECTOR, selector)
)
)
return self
def wait_until_not(self, selector, timeout=3):
"""
Waits until ``selector`` is NOT found in the browser, or until
``timeout`` is hit, whichever happens first.
"""
from selenium.webdriver.common.by import By
WebDriverWait(self.driver, timeout).until_not(
expected_conditions.presence_of_element_located(
(By.CSS_SELECTOR, selector)
)
)
return self
def snapshot(self, name):
"""
Capture a screenshot of the current state of the page. Screenshots
are captured both locally (in ``cls.screenshots_path``) as well as
with Percy (when enabled).
"""
# TODO(dcramer): ideally this would take the executing test package
# into account for duplicate names
self.percy.snapshot(name=name)
return self
def save_cookie(self, name, value, path='/',
expires='Tue, 20 Jun 2025 19:07:44 GMT'):
# XXX(dcramer): "hit a url before trying to set cookies"
if not self._has_initialized_cookie_store:
self.get('/')
self._has_initialized_cookie_store = True
# XXX(dcramer): PhantomJS does not let us add cookies with the native
# selenium API because....
# http://stackoverflow.com/questions/37103621/adding-cookies-working-with-firefox-webdriver-but-not-in-phantomjs
# TODO(dcramer): this should be escaped, but idgaf
self.driver.execute_script("document.cookie = '{name}={value}; path={path}; domain={domain}; expires={expires}';\n".format(
name=name,
value=value,
expires=expires,
path=path,
domain=self.domain,
))
def pytest_addoption(parser):
parser.addini('selenium_driver',
help='selenium driver (phantomjs or firefox)')
group = parser.getgroup('selenium', 'selenium')
group._addoption('--selenium-driver',
dest='selenium_driver',
help='selenium driver (phantomjs or firefox)')
group._addoption('--phantomjs-path',
dest='phantomjs_path',
help='path to phantomjs driver')
def pytest_configure(config):
if hasattr(config, 'slaveinput'):
return # xdist slave
config.option.selenium_driver = config.getoption('selenium_driver') or \
config.getini('selenium_driver') or \
os.getenv('SELENIUM_DRIVER')
@pytest.fixture(scope='session')
def percy(request):
import percy
# Initialize Percy.
loader = percy.ResourceLoader(
root_dir=settings.STATIC_ROOT,
base_url=quote(settings.STATIC_URL),
)
percy_config = percy.Config(default_widths=settings.PERCY_DEFAULT_TESTING_WIDTHS)
percy = percy.Runner(loader=loader, config=percy_config)
percy.initialize_build()
request.addfinalizer(percy.finalize_build)
return percy
@pytest.fixture(scope='function')
def browser(request, percy, live_server):
driver_type = request.config.getoption('selenium_driver')
if driver_type == 'firefox':
driver = webdriver.Firefox()
elif driver_type == 'phantomjs':
phantomjs_path = request.config.getoption('phantomjs_path')
if not phantomjs_path:
phantomjs_path = os.path.join(
'node_modules',
'phantomjs-prebuilt',
'bin',
'phantomjs',
)
driver = webdriver.PhantomJS(executable_path=phantomjs_path)
driver.set_window_size(1280, 800)
else:
raise pytest.UsageError('--driver must be specified')
def fin():
# Teardown Selenium.
try:
driver.close()
except Exception:
pass
# TODO: remove this when fixed in: https://github.com/seleniumhq/selenium/issues/767
if hasattr(driver, 'service'):
driver.service.process.send_signal(signal.SIGTERM)
driver.quit()
request.node._driver = driver
request.addfinalizer(fin)
browser = Browser(driver, live_server, percy)
if hasattr(request, 'cls'):
request.cls.browser = browser
request.node.browser = browser
# bind webdriver to percy for snapshots
percy.loader.webdriver = driver
return driver
@pytest.fixture(scope='session', autouse=True)
def _environment(request):
config = request.config
# add environment details to the pytest-html plugin
config._environment.append(('Driver', config.option.selenium_driver))
@pytest.mark.tryfirst
def pytest_runtest_makereport(item, call, __multicall__):
report = __multicall__.execute()
summary = []
extra = getattr(report, 'extra', [])
driver = getattr(item, '_driver', None)
if driver is not None:
_gather_url(item, report, driver, summary, extra)
_gather_screenshot(item, report, driver, summary, extra)
_gather_html(item, report, driver, summary, extra)
_gather_logs(item, report, driver, summary, extra)
if summary:
report.sections.append(('selenium', '\n'.join(summary)))
report.extra = extra
return report
def _gather_url(item, report, driver, summary, extra):
try:
url = driver.current_url
except Exception as e:
summary.append('WARNING: Failed to gather URL: {0}'.format(e))
return
pytest_html = item.config.pluginmanager.getplugin('html')
if pytest_html is not None:
# add url to the html report
extra.append(pytest_html.extras.url(url))
summary.append('URL: {0}'.format(url))
def _gather_screenshot(item, report, driver, summary, extra):
try:
screenshot = driver.get_screenshot_as_base64()
except Exception as e:
summary.append('WARNING: Failed to gather screenshot: {0}'.format(e))
return
pytest_html = item.config.pluginmanager.getplugin('html')
if pytest_html is not None:
# add screenshot to the html report
extra.append(pytest_html.extras.image(screenshot, 'Screenshot'))
def _gather_html(item, report, driver, summary, extra):
try:
html = driver.page_source.encode('utf-8')
except Exception as e:
summary.append('WARNING: Failed to gather HTML: {0}'.format(e))
return
pytest_html = item.config.pluginmanager.getplugin('html')
if pytest_html is not None:
# add page source to the html report
extra.append(pytest_html.extras.text(html, 'HTML'))
def _gather_logs(item, report, driver, summary, extra):
try:
types = driver.log_types
except Exception as e:
# note that some drivers may not implement log types
summary.append('WARNING: Failed to gather log types: {0}'.format(e))
return
for name in types:
try:
log = driver.get_log(name)
except Exception as e:
summary.append('WARNING: Failed to gather {0} log: {1}'.format(
name, e))
return
pytest_html = item.config.pluginmanager.getplugin('html')
if pytest_html is not None:
extra.append(pytest_html.extras.text(
format_log(log), '%s Log' % name.title()))
def format_log(log):
timestamp_format = '%Y-%m-%d %H:%M:%S.%f'
entries = [u'{0} {1[level]} - {1[message]}'.format(
datetime.utcfromtimestamp(entry['timestamp'] / 1000.0).strftime(
timestamp_format), entry).rstrip() for entry in log]
log = '\n'.join(entries)
log = log.encode('utf-8')
return log
|
|
import configparser
import os
import os.path
import queue
import sys
import threading
import time
import traceback
import wx
from . import core
from . import file_io
from . import gui
from . import main
from . import stringconv
from . import version
class Frame(gui.MainFrame):
def __init__(self, parent):
gui.MainFrame.__init__(self, parent)
self.SetTitle(version.title)
self.SetIcon(wx.Icon(version.icon, wx.BITMAP_TYPE_ICO))
self.set_win7_taskbar_icon()
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.set_input_file_list_ctrl_columns(self.InputFileListCtrl)
dt = FileDropTarget(self)
self.InputFileListCtrl.SetDropTarget(dt)
self.opt = core.OptionData()
self.configfn = os.path.normpath(os.path.expanduser('~/.%s.cfg' % version.title.lower()))
self.log = None
self.exitcode = None
self.get_input_dir_from_config()
self.load_options_from_config()
self.set_options_in_ui()
self.Fit()
@staticmethod
def set_win7_taskbar_icon():
""" A hack to make the icon visible in the taskbar in Windows 7.
From http://stackoverflow.com/a/1552105/674475.
"""
if sys.platform == "win32":
import ctypes
appid = 'PointDensitySyn' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(appid)
def OnAddFile(self, event):
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*%s"
% self.opt.input_filename_ext,
wx.FD_MULTIPLE | wx.FD_CHANGE_DIR)
try:
if dlg.ShowModal() == wx.ID_OK:
self.add_files(dlg.GetPaths())
finally:
dlg.Destroy()
def OnRemoveFile(self, event):
while 1:
i = self.InputFileListCtrl.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
if i == -1:
break
else:
self.InputFileListCtrl.DeleteItem(i)
def OnViewFile(self, event):
if self.InputFileListCtrl.GetSelectedItemCount() == 0:
self.show_warning("No file selected.")
return
elif self.InputFileListCtrl.GetSelectedItemCount() > 1:
self.show_warning("You can only view one file at a time.")
return
i = self.InputFileListCtrl.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
try:
fn = os.path.join(self.InputFileListCtrl.GetItemText(i, 1),
self.InputFileListCtrl.GetItemText(i, 0))
except IOError:
self.show_error("Could not open file.")
return
dlg = ViewFileDialog(self, fn)
dlg.SetIcon(wx.Icon(version.icon, wx.BITMAP_TYPE_ICO))
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def OnInterpointCheckbox(self, event):
self.InterpointModeChoice.Enable(self.InterpointCheckBox.GetValue())
self.InterpointModeLabel.Enable(self.InterpointCheckBox.GetValue())
self.InterpointRelationsCheckListBox.Enable(self.InterpointCheckBox.GetValue())
self.InterpointRelationsLabel.Enable(self.InterpointCheckBox.GetValue())
self.ShortestDistCheckBox.Enable(self.InterpointCheckBox.GetValue())
self.LateralDistCheckBox.Enable(self.InterpointCheckBox.GetValue())
self.exclude_particles_checkbox_enable_or_disable()
def OnInterpointRelationsCheckListBoxToggled(self, event):
self.exclude_particles_checkbox_enable_or_disable()
def OnClusterCheckBox(self, event):
self.ClusterDistSpinCtrl.Enable(self.ClusterCheckBox.GetValue())
self.ClusterDistLabel.Enable(self.ClusterCheckBox.GetValue())
self.ClusterDistUnitLabel.Enable(self.ClusterCheckBox.GetValue())
def OnMonteCarloCheckBox(self, event):
self.MonteCarloRunsLabel.Enable(self.MonteCarloCheckBox.GetValue())
self.MonteCarloRunsSpinCtrl.Enable(self.MonteCarloCheckBox.GetValue())
self.SimulationWindowChoice.Enable(self.MonteCarloCheckBox.GetValue())
self.SimulationWindowLabel.Enable(self.MonteCarloCheckBox.GetValue())
if (self.MonteCarloCheckBox.GetValue() and
self.SimulationWindowChoice.GetStringSelection() in ('Profile', 'Postsynaptic density')):
self.StrictLocCheckBox.Enable(True)
else:
self.StrictLocCheckBox.Enable(False)
self.exclude_particles_checkbox_enable_or_disable()
def OnSimulationWindowChoice(self, event):
if self.SimulationWindowChoice.GetStringSelection() in ('Profile', 'Postsynaptic density'):
self.StrictLocCheckBox.Enable(True)
else:
self.StrictLocCheckBox.Enable(False)
def OnOtherSuffixCheckBox(self, event):
self.OtherSuffixTextCtrl.Enable(self.OtherSuffixCheckBox.GetValue())
def OnSaveLogCheckBox(self, event):
self.LogFilePickerCtrl.Enable(self.SaveLogCheckBox.GetValue())
self.IfLogExistsRadioBox.Enable(self.SaveLogCheckBox.GetValue())
def OnSetOptionsAsDefault(self, event):
if self.save_options_to_config():
self.StatusBar.SetStatusText("Current options saved to '%s'." % self.configfn)
def OnStart(self, event):
if self.InputFileListCtrl.GetItemCount() == 0:
self.show_warning("No files to process.")
return
self.set_options_from_ui()
if not self.set_log():
return
if (self.opt.determine_interpoint_dists and
self.opt.determine_interpoint_simulated_points() and
self.opt.monte_carlo_simulation_window != 'profile + shell' and not
self.opt.interpoint_exclude_particles_outside_window):
if not self.yes_no_warn_dialog(
"Determining interpoint distances between particles and simulated points\n"
"generally does not make sense when using a simulation window other than\n"
"'profile + shell' and when 'Exclude particles outside simulation window'\n"
"is unchecked, because also particles outside the simulation window will be\n"
"considered when determining these distances, whereas simulated points are\n"
"only generated in the window.\n\nContinue anyway?\n"):
return
self.StatusBar.SetStatusText("Processing...")
self.exitcode = 1
event_type = ""
msg = "Processing %s \n(File %d of %d)" % (os.path.basename(self.opt.input_file_list[0]), 1,
len(self.opt.input_file_list))
i = 0
dlg = wx.ProgressDialog(version.title, msg,
len(self.opt.input_file_list) + 2,
parent=self,
style=wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT)
pthread = ProcessThread(self.opt)
pthread.start()
while pthread.isAlive() or not pthread.process_queue.empty():
if not pthread.process_queue.empty():
(event_type, data) = pthread.process_queue.get()
if event_type == "new_file":
i += 1
msg = "Processing %s \n(File %d of %d)" \
% (os.path.basename(data), i,
len(self.opt.input_file_list))
if event_type == "saving_summaries":
i += 1
msg = "Saving summaries..."
if event_type == "done":
i = len(self.opt.input_file_list) + 2
msg = "Done."
self.log.update()
if event_type == "done" and self.log.fn != "":
self.StatusBar.SetStatusText("Logged to '" + self.log.fn + "'.")
if not dlg.Update(i, msg)[0] and not self.opt.stop_requested:
if self.yes_no_dialog("Abort process?"):
pthread.stop()
dlg.Hide()
else:
dlg.Resume()
if dlg.GetSize().GetWidth() < dlg.GetBestSize().GetWidth():
dlg.SetSize((dlg.GetBestSize().GetWidth() + 20, dlg.GetBestSize().GetHeight()))
if not pthread.error_queue.empty():
exc_str = pthread.error_queue.get()
if self.log.fn != "":
self.StatusBar.SetStatusText("Logged to '" + self.log.fn + "'.")
sys.stdout.write("\n*** %s session was unexpectedly aborted"
" at %s (local time). \n\nDetails:\n%s"
% (version.title, time.ctime(), exc_str))
self.log.update()
self.show_error("An unexpected error occurred while executing "
"%s - session aborted.\n\nDetails (also "
"sent to log):\n\n %s" % (version.title, exc_str))
dlg.Destroy()
return
# Processing finished.
self.log.update()
if self.log.fn != "":
self.StatusBar.SetStatusText("Logged to '" + self.log.fn + "'.")
dlg.Destroy()
if pthread.exitcode == 0:
self.show_error("One or more errors occurred during processing. "
"See log for details.")
elif pthread.exitcode == 2:
self.show_warning("One or more warnings occurred during "
"processing. See log for details.")
elif pthread.exitcode == 3:
self.show_warning("Session aborted by user.")
def OnAbout(self, event):
dlg = AboutDialog(self)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def OnClose(self, event):
self.save_input_dir_to_config()
sys.stdout = sys.__stdout__
self.Destroy()
#
# utilities
#
def save_input_dir_to_config(self):
config = configparser.ConfigParser()
try:
config.read(self.configfn)
except (configparser.ParsingError, configparser.MissingSectionHeaderError):
pass # Silently suppress parsing errors at this stage
if 'Previous session' not in config.sections():
config['Previous session'] = {}
config['Previous session']['input_dir'] = os.getcwd()
try:
with open(self.configfn, 'w') as f:
config.write(f)
except IOError:
self.show_warning("Configuration file\n(%s)\ncould not be saved."
% self.configfn)
def get_input_dir_from_config(self):
config = configparser.ConfigParser()
if not os.path.exists(self.configfn):
return
try:
config.read(self.configfn)
except (configparser.ParsingError, configparser.MissingSectionHeaderError):
pass # Silently suppress parsing errors at this stage
try:
inputdir = config['Previous session']['input_dir']
except (configparser.NoSectionError, configparser.NoOptionError, KeyError):
self.show_warning("Configuration file '%s' is invalid.\n Using "
"current working directory." % self.configfn)
return
try:
if not os.path.isdir(inputdir):
raise IOError
os.chdir(inputdir)
except (IOError, TypeError):
self.show_warning("Invalid input directory' %s' in configuration "
"file '%s'.\n Using current working directory."
% (inputdir, self.configfn))
def save_options_to_config(self):
def set_option(option):
config['Options'][option] = str(getattr(self.opt, option))
def set_dict_option(option):
optdict = getattr(self.opt, option)
for key, val in list(optdict.items()):
optstr = '.'.join([option, key.replace(' ', '_')])
config['Options'][optstr] = str(val)
self.set_options_from_ui()
config = configparser.ConfigParser()
try:
config.read(self.configfn)
except (configparser.ParsingError, configparser.MissingSectionHeaderError):
pass # Silently suppress parsing errors at this stage
if 'Options' not in config.sections():
config['Options'] = {}
set_option('output_file_format')
set_option('csv_delimiter')
set_option('action_if_output_file_exists')
set_option('output_filename_date_suffix')
set_option('save_coords')
set_option('spatial_resolution')
set_option('shell_width')
set_option('determine_clusters')
set_option('within_cluster_dist')
set_option('run_monte_carlo')
set_option('monte_carlo_runs')
set_option('determine_interpoint_dists')
set_option('monte_carlo_simulation_window')
set_option('monte_carlo_strict_location')
set_option('interpoint_dist_mode')
set_option('interpoint_shortest_dist')
set_option('interpoint_lateral_dist')
set_option('interpoint_exclude_particles_outside_window')
set_dict_option('interpoint_relations')
try:
with open(self.configfn, 'w') as f:
config.write(f)
except IOError:
self.show_warning("Configuration file\n(%s)\ncould not be saved." % self.configfn)
def load_options_from_config(self):
def show_invalid_option_warning(invalid_opt):
self.show_warning("Invalid value '%s' for option '%s' in "
"configuration file '%s'.\nUsing default value."
% (getattr(self.opt, invalid_opt), invalid_opt,
self.configfn))
def check_str_option(opt, valid_strings=()):
if getattr(self.opt, opt) not in valid_strings:
show_invalid_option_warning(opt)
setattr(self.opt, opt, getattr(defaults, opt))
def check_int_option(opt, lower=None, upper=None):
try:
setattr(self.opt, opt,
stringconv.str_to_int(getattr(self.opt, opt), lower, upper))
except ValueError:
show_invalid_option_warning(opt)
setattr(self.opt, opt, getattr(defaults, opt))
def check_bool_option(opt):
try:
setattr(self.opt, opt, stringconv.str_to_bool(getattr(self.opt, opt)))
except ValueError:
show_invalid_option_warning(opt)
setattr(self.opt, opt, getattr(defaults, opt))
def check_bool_dict_option(opt):
optdict = getattr(self.opt, opt)
defaultdict = getattr(defaults, opt)
for key, val in list(optdict.items()):
optstr = '.'.join([opt, key.replace(" ", "_")])
if key not in list(getattr(defaults, opt).keys()):
self.show_warning("Invalid option '%s' in configuration file '%s'."
% (optstr, self.configfn))
del optdict[key]
try:
optdict[key] = stringconv.str_to_bool(val)
except ValueError:
self.show_warning("Invalid value '%s' for option '%s' in "
"configuration file '%s'.\nUsing default "
"value." % (val, optstr, self.configfn))
optdict[key] = defaultdict[key]
config = configparser.ConfigParser()
if not os.path.exists(self.configfn):
return
try:
config.read(self.configfn)
except (configparser.ParsingError, configparser.MissingSectionHeaderError):
return # Silently suppress parsing errors at this stage
if 'Options' not in config.sections():
return # No options present in config file; silently use defaults
defaults = core.OptionData()
for option in config.options('Options'):
if '.' in option:
option_dict, option_key = option.split('.', 1)
option_key = option_key.replace("_", " ")
try:
getattr(self.opt,
option_dict)[option_key] = config.get('Options', option)
except AttributeError:
pass # So, attribute is invalid, but continue silently
else:
setattr(self.opt, option, config.get('Options', option))
check_str_option('output_file_format', ('excel', 'csv'))
check_str_option('csv_delimiter', ('comma', 'tab'))
check_str_option('action_if_output_file_exists', ('enumerate', 'overwrite'))
check_bool_option('output_filename_date_suffix')
check_bool_option('save_coords')
check_int_option('spatial_resolution', lower=0, upper=1000)
check_int_option('shell_width', lower=0, upper=1000)
check_bool_option('determine_clusters')
check_int_option('within_cluster_dist', lower=1, upper=1000)
check_bool_option('run_monte_carlo')
check_int_option('monte_carlo_runs', lower=1, upper=999)
check_bool_option('determine_interpoint_dists')
check_str_option('monte_carlo_simulation_window',
('profile',
'profile + shell',
'profile - postsynaptic density',
'profile + shell - postsynaptic density',
'postsynaptic density'))
check_bool_option('monte_carlo_strict_location')
check_str_option('interpoint_dist_mode', ('nearest neighbour', 'all'))
check_bool_option('interpoint_shortest_dist')
check_bool_option('interpoint_lateral_dist')
check_bool_option('interpoint_exclude_particles_outside_window')
check_bool_dict_option('interpoint_relations')
def set_options_in_ui(self):
self.SpatResSpinCtrl.SetValue(self.opt.spatial_resolution)
self.ShellWidthSpinCtrl.SetValue(self.opt.shell_width)
self.InterpointCheckBox.SetValue(self.opt.determine_interpoint_dists)
self.InterpointModeChoice.SetItems(['Nearest neighbour', 'All'])
self.InterpointModeChoice.SetStringSelection(
self.opt.interpoint_dist_mode)
self.InterpointRelationsCheckListBox.SetItems(sorted(
[key.capitalize() for key in self.opt.interpoint_relations]))
self.InterpointRelationsCheckListBox.SetCheckedStrings(
[key.capitalize() for key in self.opt.interpoint_relations
if self.opt.interpoint_relations[key] is True])
self.ShortestDistCheckBox.SetValue(self.opt.interpoint_shortest_dist)
self.LateralDistCheckBox.SetValue(self.opt.interpoint_lateral_dist)
self.InterpointModeChoice.Enable(self.InterpointCheckBox.GetValue())
self.InterpointModeLabel.Enable(self.InterpointCheckBox.GetValue())
self.InterpointRelationsCheckListBox.Enable(
self.InterpointCheckBox.GetValue())
self.InterpointRelationsLabel.Enable(self.InterpointCheckBox.GetValue())
self.ShortestDistCheckBox.Enable(self.InterpointCheckBox.GetValue())
self.LateralDistCheckBox.Enable(self.InterpointCheckBox.GetValue())
self.exclude_particles_checkbox_enable_or_disable()
self.ClusterCheckBox.SetValue(self.opt.determine_clusters)
self.ClusterDistSpinCtrl.SetValue(self.opt.within_cluster_dist)
self.ClusterDistSpinCtrl.Enable(self.ClusterCheckBox.GetValue())
self.ClusterDistLabel.Enable(self.ClusterCheckBox.GetValue())
self.ClusterDistUnitLabel.Enable(self.ClusterCheckBox.GetValue())
self.MonteCarloCheckBox.SetValue(self.opt.run_monte_carlo)
self.MonteCarloRunsSpinCtrl.SetValue(self.opt.monte_carlo_runs)
self.SimulationWindowChoice.SetItems(['Profile',
'Profile + shell',
'Profile - postsynaptic density',
'Profile + shell - postsynaptic '
'density',
'Postsynaptic density'])
self.SimulationWindowChoice.SetStringSelection(
self.opt.monte_carlo_simulation_window)
self.StrictLocCheckBox.SetValue(self.opt.monte_carlo_strict_location)
self.MonteCarloRunsLabel.Enable(self.MonteCarloCheckBox.GetValue())
self.MonteCarloRunsSpinCtrl.Enable(self.MonteCarloCheckBox.GetValue())
self.SimulationWindowChoice.Enable(self.MonteCarloCheckBox.GetValue())
self.SimulationWindowLabel.Enable(self.MonteCarloCheckBox.GetValue())
self.SaveCoordsCheckBox.SetValue(self.opt.save_coords)
if self.opt.output_file_format == 'excel':
self.OutputFormatRadioBox.SetStringSelection('Excel')
elif self.opt.csv_delimiter == 'comma':
self.OutputFormatRadioBox.SetStringSelection('Comma-delimited text')
else:
self.OutputFormatRadioBox.SetSetStringSelection(
'Tab-delimited text')
self.IfOutputExistsRadioBox.SetStringSelection(
self.opt.action_if_output_file_exists.capitalize())
self.DateSuffixCheckBox.SetValue(self.opt.output_filename_date_suffix)
self.OtherSuffixCheckBox.SetValue(
self.opt.output_filename_other_suffix != '')
self.OtherSuffixTextCtrl.SetValue(self.opt.output_filename_other_suffix)
self.OtherSuffixTextCtrl.Enable(self.OtherSuffixCheckBox.GetValue())
self.LogFilePickerCtrl.SetPath(version.title + '.log')
def set_options_from_ui(self):
self.opt.input_file_list = []
for n in range(0, self.InputFileListCtrl.GetItemCount()):
self.opt.input_file_list.append(os.path.join(
self.InputFileListCtrl.GetItemText(n, 1),
self.InputFileListCtrl.GetItemText(n, 0)))
self.opt.save_coords = self.SaveCoordsCheckBox.GetValue()
if self.OutputFormatRadioBox.GetStringSelection() == 'Excel':
self.opt.output_file_format = 'excel'
self.opt.output_filename_ext = '.xlsx'
elif self.OutputFormatRadioBox.GetStringSelection() == 'Comma-delimited text':
self.opt.output_file_format = 'csv'
self.opt.output_filename_ext = '.csv'
self.opt.csv_delimiter = 'comma'
elif self.OutputFormatRadioBox.GetStringSelection() == 'Tab-delimited text':
self.opt.output_file_format = 'csv'
self.opt.output_filename_ext = '.csv'
self.opt.csv_delimiter = 'tab'
self.opt.action_if_output_file_exists = \
self.IfOutputExistsRadioBox.GetStringSelection().lower()
self.opt.output_filename_date_suffix = self.DateSuffixCheckBox.GetValue()
if self.OtherSuffixCheckBox.GetValue():
self.opt.output_filename_other_suffix = self.OtherSuffixTextCtrl.GetValue()
self.opt.spatial_resolution = int(self.SpatResSpinCtrl.GetValue())
self.opt.shell_width = int(self.ShellWidthSpinCtrl.GetValue())
self.opt.determine_interpoint_dists = self.InterpointCheckBox.GetValue()
for key in self.opt.interpoint_relations:
if (key.capitalize() in
self.InterpointRelationsCheckListBox.GetCheckedStrings()):
self.opt.interpoint_relations[key] = True
else:
self.opt.interpoint_relations[key] = False
if True not in self.opt.interpoint_relations.values():
self.opt.determine_interpoint_dists = False
self.opt.interpoint_dist_mode = \
self.InterpointModeChoice.GetStringSelection().lower()
self.opt.interpoint_shortest_dist = self.ShortestDistCheckBox.GetValue()
self.opt.interpoint_lateral_dist = self.LateralDistCheckBox.GetValue()
self.opt.interpoint_exclude_particles_outside_window = \
self.ExcludeParticlesOutsideWindowCheckBox.GetValue()
self.opt.run_monte_carlo = self.MonteCarloCheckBox.GetValue()
self.opt.monte_carlo_runs = self.MonteCarloRunsSpinCtrl.GetValue()
self.opt.monte_carlo_simulation_window = \
self.SimulationWindowChoice.GetStringSelection().lower()
self.opt.monte_carlo_strict_location = self.StrictLocCheckBox.GetValue()
self.opt.determine_clusters = self.ClusterCheckBox.GetValue()
self.opt.within_cluster_dist = self.ClusterDistSpinCtrl.GetValue()
self.get_output_dir()
def exclude_particles_checkbox_enable_or_disable(self):
simulated_ips = False
if self.InterpointCheckBox.GetValue() and self.MonteCarloCheckBox.GetValue():
for key in ('particle - simulated', 'simulated - particle', 'simulated - simulated'):
if key.capitalize() in self.InterpointRelationsCheckListBox.GetCheckedStrings():
simulated_ips = True
self.ExcludeParticlesOutsideWindowCheckBox.Enable(simulated_ips)
def get_input_dir(self):
for f in self.opt.input_file_list:
if os.path.dirname(f):
return os.path.dirname(f)
return ""
def get_output_dir(self):
self.opt.output_dir = os.path.join(self.get_input_dir() or os.getcwd(), "out")
if not os.path.isdir(self.opt.output_dir):
os.mkdir(self.opt.output_dir)
def add_files(self, fli):
if len(fli) == 0:
return
c = self.InputFileListCtrl.GetItemCount()
n = 0
fn = ""
for fn in fli:
if (os.path.isfile(fn) and
os.path.splitext(fn)[1] == self.opt.input_filename_ext):
self.InputFileListCtrl.InsertItem(c + n, os.path.basename(fn))
self.InputFileListCtrl.SetItem(c + n, 1, os.path.dirname(fn))
n += 1
elif os.path.isdir(fn):
for fn2 in os.listdir(fn):
if (os.path.isfile(os.path.join(fn, fn2)) and
os.path.splitext(fn2)[1] ==
self.opt.input_filename_ext):
self.InputFileListCtrl.InsertItem(c + n, fn2)
self.InputFileListCtrl.SetItem(c + n, 1, fn)
n += 1
if n > 0:
self.InputFileListCtrl.SetColumnWidth(0, -1)
self.InputFileListCtrl.SetColumnWidth(1, -1)
elif os.path.isdir(fn):
self.show_warning("No files with '%s' extension found in folder(s)."
% self.opt.input_filename_ext)
else:
self.show_warning("Input files must have a '%s' extension."
% self.opt.input_filename_ext)
@staticmethod
def set_input_file_list_ctrl_columns(parent):
parent.InsertColumn(col=0, format=wx.LIST_FORMAT_LEFT, heading='Name', width=-1)
parent.InsertColumn(col=1, format=wx.LIST_FORMAT_LEFT, heading='Path', width=-1)
def set_log(self): # hm can't I simplify this?
if self.SaveLogCheckBox.GetValue():
mode = self.IfLogExistsRadioBox.GetStringSelection()
logfn = self.LogFilePickerCtrl.GetPath()
if os.path.dirname(logfn) == "":
logfn = os.path.join(self.opt.output_dir, logfn)
try:
if os.path.exists(logfn):
if self.IfLogExistsRadioBox.GetStringSelection() == "Enumerate":
logfn = file_io.enum_filename(logfn, 2)
else:
f = open(logfn, 'a')
f.close()
# ok, so file doesn't exist but check if name is valid
else:
f = open(logfn, 'w')
f.close()
except IOError:
self.show_error("Could not write to log file. Please choose another filename.")
return 0
self.log = LogQueue(self, logfn, self.LogTextCtrl, mode)
else:
self.log = LogQueue(self, "", self.LogTextCtrl, "")
sys.stdout = self.log
return 1
def show_warning(self, s):
dlg = wx.MessageDialog(self, s, version.title, wx.OK | wx.ICON_EXCLAMATION)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def show_error(self, s):
dlg = wx.MessageDialog(self, s, version.title, wx.OK | wx.ICON_HAND)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def yes_no_dialog(self, s):
dlg = wx.MessageDialog(self, s, version.title, wx.YES_NO | wx.ICON_QUESTION | wx.NO_DEFAULT)
try:
pressed = dlg.ShowModal()
finally:
dlg.Destroy()
if pressed == wx.ID_YES:
return True
return False
def yes_no_warn_dialog(self, s):
dlg = wx.MessageDialog(self, s, version.title, wx.YES_NO | wx.ICON_WARNING | wx.NO_DEFAULT)
try:
pressed = dlg.ShowModal()
finally:
dlg.Destroy()
if pressed == wx.ID_YES:
return True
return False
class ProcessThread(threading.Thread):
def __init__(self, opt):
threading.Thread.__init__(self)
self.opt = opt
self.process_queue = queue.Queue()
self.error_queue = queue.Queue(1)
self.opt.stop_requested = False
self.exitcode = None
def stop(self):
self.opt.stop_requested = True
# noinspection PyBroadException
def run(self):
try:
self.exitcode = main.main_proc(self)
except: # yes, I do want to catch everything
exc_str = "".join(traceback.format_exception(*sys.exc_info()))
self.error_queue.put(exc_str)
class LogQueue:
def __init__(self, parent, fn, win, mode):
self.parent = parent
self.fn = fn
self.win = win
self.q = queue.Queue()
if self.fn != "":
self.errstr = "* Error: could not write to log file: %s\n" % self.fn
if mode == 'Append':
try:
f = open(self.fn, "a")
f.close()
except IOError:
try:
f = open(self.fn, "w")
f.close()
except IOError:
sys.stderr.write(self.errstr)
self.fn = ""
elif mode == 'Overwrite' or mode == 'Enumerate':
try:
f = open(self.fn, "w")
f.close()
except IOError:
sys.stderr.write(self.errstr)
self.fn = ""
def write(self, s):
self.q.put(s)
def update(self):
while not self.q.empty():
s = self.q.get()
self.win.write(s)
self.parent.Update()
if self.fn != "":
try:
f = open(self.fn, "a")
try:
f.write(s)
finally:
f.close()
except IOError:
sys.stderr.write(self.errstr)
class FileDropTarget(wx.FileDropTarget):
def __init__(self, parent):
wx.FileDropTarget.__init__(self)
self.parent = parent
# noinspection PyMethodOverriding
def OnDropFiles(self, x, y, fli):
self.parent.add_files(fli)
return True
class AboutDialog(gui.AboutDialog):
def __init__(self, parent):
gui.AboutDialog.__init__(self, parent)
self.TitleLabel.SetLabel(version.title)
self.IconBitmap.SetBitmap(wx.Bitmap(version.icon, wx.BITMAP_TYPE_ANY))
self.VersionLabel.SetLabel("Version %s" % version.version)
self.LastModLabel.SetLabel("Last modified %s %s, %s." % version.date)
self.CopyrightLabel.SetLabel("Copyright 2001-%s %s." % (version.date[2], version.author))
self.LicenseLabel.SetLabel("Released under the terms of the MIT"
" license.")
self.EmailHyperlink.SetLabel("%s" % version.email)
self.EmailHyperlink.SetURL("mailto://%s" % version.email)
self.WebHyperlink.SetLabel("http://%s" % version.homepage)
self.WebHyperlink.SetURL("http://%s" % version.homepage)
self.SetIcon(wx.Icon(version.icon, wx.BITMAP_TYPE_ICO))
self.Fit()
def OnClose(self, event):
self.Destroy()
class ViewFileDialog(gui.ViewFileDialog):
def __init__(self, parent, fn):
gui.ViewFileDialog.__init__(self, parent)
self.SetIcon(wx.Icon(version.icon, wx.BITMAP_TYPE_ICO))
try:
self.SetTitle(os.path.basename(fn))
f = open(fn, "r")
try:
for s in f.readlines():
self.ViewFileTextCtrl.AppendText(s)
finally:
f.close()
except IOError:
parent.show_error("Could not open file.")
self.Close()
def OnClose(self, event):
self.Destroy()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 2 14:24:25 2017
@author: ajaver
"""
from .helper import get_n_worms_estimate, get_delta_in_frames, add_derivatives
from .events import get_event_stats, event_region_labels
from .path import get_path_extent_stats
from .features import timeseries_feats_columns, \
ventral_signed_columns, path_curvature_columns, curvature_columns
import pandas as pd
import numpy as np
index_colums = ['worm_index', 'timestamp']
blob_feats_columns = ['blob_area',
'blob_perimeter',
'blob_box_length',
'blob_box_width',
'blob_quirkiness',
'blob_compactness',
'blob_solidity',
'blob_hu0',
'blob_hu1',
'blob_hu2',
'blob_hu3',
'blob_hu4',
'blob_hu5',
'blob_hu6'
]
#get the ratios to be normalized
feats2normalize = {
'L' : [
'head_tail_distance',
'major_axis',
'minor_axis',
'dist_from_food_edge',
'length',
'width_head_base',
'width_midbody',
'width_tail_base'
],
'1/L' : path_curvature_columns + curvature_columns,
'L^2' : ['area']
}
feats2normalize['L'] += [x for x in timeseries_feats_columns if 'radial_velocity' in x]
feats2normalize['L'] += [x for x in timeseries_feats_columns if 'speed' in x]
#add derivatives and make sure there are not duplicates
for k,dat in feats2normalize.items():
dfeats = ['d_' + x for x in dat if not x.startswith('d_')]
feats2normalize[k] = list(set(dat) ^ set(dfeats))
def _normalize_by_w_length(timeseries_data, feats2norm):
'''
Normalize features by body length. This is far from being the most efficient solution, but it is the easier to implement.
'''
def _get_conversion_vec(units_t, median_length_vec):
'''helper function to find how to make the conversion'''
if units_t == 'L':
conversion_vec = 1/median_length_vec
elif units_t == '1/L':
conversion_vec = median_length_vec
elif units_t == 'L^2':
conversion_vec = median_length_vec**2
return conversion_vec
timeseries_data = timeseries_data.copy()
median_length = timeseries_data.groupby('worm_index').agg({'length':'median'})
median_length_vec = timeseries_data['worm_index'].map(median_length['length'])
changed_feats_l = []
for units_t, feats in feats2norm.items():
feats_f = [x for x in timeseries_data if any(x.startswith(f) for f in feats)]
conversion_vec = _get_conversion_vec(units_t, median_length_vec)
for f in feats_f:
timeseries_data[f] *= conversion_vec
changed_feats_l += feats_f
changed_feats = {x: x + '_norm' for x in changed_feats_l}
timeseries_data = timeseries_data.rename(columns = changed_feats)
return timeseries_data, changed_feats
def get_df_quantiles(df,
feats2check = timeseries_feats_columns,
subdivision_dict = {'food_region':['orientation_food_edge']},
feats2norm = feats2normalize,
feats2abs = ventral_signed_columns,
is_remove_subdivided = True,
is_abs_ventral = True,
is_normalize = False
):
'''
Get quantile statistics for all the features given by `feats2check`.
In the features in `feats2abs` we are going to use only the absolute. This is to
deal with worms with unknown dorsal/ventral orientation.
'''
q_vals = (0.1, 0.5, 0.9) #percentiles to calculate
iqr_limits = (0.25, 0.75) # range of percentiles used for the interquantile distance
valid_q = q_vals + iqr_limits
df = df.copy() #like this i can modify directoy the df without long lasting consequences
#filter features to be abs
def _filter_ventral_features(feats2check):#%%
valid_f = [x for x in feats2check if any(x.startswith(f) for f in feats2abs)]
return valid_f
#filter default columns in case they are not present
feats2check = [x for x in feats2check if x in df]
#filter default columns in case they are not present. Same for the subdivision dictionary.
subdivision_dict_r = {}
for e_subdivide, feats2subdivide in subdivision_dict.items():
ff = [x for x in feats2check if x in feats2subdivide]
if e_subdivide in df and ff:
subdivision_dict_r[e_subdivide] = ff
subdivision_dict = subdivision_dict_r
#subdivide a feature using the event features
subdivided_df = _get_subdivided_features(df, subdivision_dict = subdivision_dict)
df = df.join(subdivided_df)
feats2check += subdivided_df.columns.tolist()
if is_remove_subdivided:
df = df[[x for x in df if not x in feats2subdivide]]
feats2check = [x for x in feats2check if x not in feats2subdivide]
#add normalized features
if is_normalize:
df, changed_feats = _normalize_by_w_length(df, feats2norm = feats2norm)
feats2check = [x if not x in changed_feats else changed_feats[x] for x in feats2check]
#abs features that are ventral/dorsal side
if is_abs_ventral:
feats2abs = _filter_ventral_features(feats2check)
#find features that match ventral_signed_columns
if feats2abs:
#normalize
if df.size > 0:
df[feats2abs] = df[feats2abs].abs()
#change name
df.columns = [x + '_abs' if x in feats2abs else x for x in df.columns]
feats2check = [x + '_abs' if x in feats2abs else x for x in feats2check]
#calculate quantiles
feat_mean = None
Q = df[feats2check].quantile(valid_q)
feat_mean = pd.concat((feat_mean, Q), axis=1)
#name correctly
dat = []
for q in q_vals:
q_dat = feat_mean.loc[q]
q_str = '_{}th'.format(int(round(q*100)))
for feat, val in q_dat.iteritems():
dat.append((val, feat+q_str))
IQR = feat_mean.loc[0.75] - feat_mean.loc[0.25]
dat += [(val, feat + '_IQR') for feat, val in IQR.iteritems()]
feat_mean_s = pd.Series(*list(zip(*dat)))
return feat_mean_s
def _get_subdivided_features(timeseries_data, subdivision_dict):
'''
subdivision_dict = {event_v1: [feature_v1, feature_v2, ...], event_v2: [feature_vn ...], ...}
event_vector = [-1, -1, 0, 0, 1, 1]
feature_vector = [1, 3, 4, 5, 6, 6]
new_vectors ->
[1, 3, nan, nan, nan, nan]
[nan, nan, 4, 5, nan, nan]
[nan, nan, nan, nan, 6, 6]
'''
#assert all the subdivision keys are known events
assert all(x in event_region_labels.keys() for x in subdivision_dict)
event_type_link = {#%%
'food_region' : '_in_',
'motion_mode' : '_w_'
}
subdivided_data = []
for e_col, timeseries_cols in subdivision_dict.items():
e_data = timeseries_data[e_col].values
if e_col in event_type_link:
str_l = event_type_link[e_col]
else:
str_l = '_'
for flag, label in event_region_labels[e_col].items():
_flag = e_data != flag
for f_col in timeseries_cols:
f_data = timeseries_data[f_col].values.copy()
try:
f_data[_flag] = np.nan
except:
import pdb
pdb.set_trace()
new_name = f_col + str_l + label
subdivided_data.append((new_name, f_data))
if not subdivided_data:
#return empty df if nothing was subdivided
return pd.DataFrame([])
columns, data = zip(*subdivided_data)
subdivided_df = pd.DataFrame(np.array(data).T, columns = columns)
subdivided_df.index = timeseries_data.index
return subdivided_df
def process_blob_data(blob_features, derivate_delta_time, fps):
'''
Filter only the selected features and add derivatives
'''
assert not ((blob_features is None) and (derivate_delta_time is None))
assert all(x in blob_features for x in index_colums)
#add the blob prefix to the blob features if it is not present
filt_func = lambda x : (not x.startswith('blob_') and not (x in index_colums))
blob_features.columns = ['blob_' + x if filt_func(x) else x for x in blob_features.columns ]
#add blob derivatives
derivate_delta_frames = get_delta_in_frames(derivate_delta_time, fps)
blob_l = []
for w_ind, blob_w in blob_features.groupby('worm_index'):
blob_w = add_derivatives(blob_w, blob_feats_columns, derivate_delta_frames, fps)
blob_l.append(blob_w)
if blob_l:
blob_features = pd.concat(blob_l, axis=0)
#select only the valid columns
blob_feats_columns_d = blob_feats_columns + ['d_' + x for x in blob_feats_columns]
blob_cols = [x for x in blob_feats_columns_d if x in blob_features]
blob_features = blob_features[blob_cols]
else:
blob_features, blob_cols = pd.DataFrame([]), []
return blob_features, blob_cols
def get_summary_stats(timeseries_data,
fps,
blob_features = None,
derivate_delta_time = None,
only_abs_ventral = False,
):
if timeseries_data.size == 0:
return pd.DataFrame([])
ts_cols_all, v_sign_cols, feats2norm = timeseries_feats_columns, ventral_signed_columns, feats2normalize
ts_cols_norm = sum(feats2norm.values(), [])
#summarize everything
exp_feats = []
## event features
n_worms_estimate = get_n_worms_estimate(timeseries_data['timestamp'])
event_stats_s = get_event_stats(timeseries_data, fps , n_worms_estimate)
## timeseries features
##### simple
timeseries_stats_s = get_df_quantiles(timeseries_data,
feats2check = ts_cols_all,
feats2abs = v_sign_cols,
feats2norm = feats2norm,
is_normalize = False)
path_grid_stats_s = get_path_extent_stats(timeseries_data, fps, is_normalized = False)
feat_stats = pd.concat((timeseries_stats_s, path_grid_stats_s, event_stats_s))
exp_feats.append(feat_stats)
##### normalized by worm length
timeseries_stats_n = get_df_quantiles(timeseries_data,
feats2check = ts_cols_norm,
feats2abs = v_sign_cols,
feats2norm = feats2norm,
is_normalize = True)
path_grid_stats_n = get_path_extent_stats(timeseries_data, fps, is_normalized = True)
feat_stats_n = pd.concat((timeseries_stats_n, path_grid_stats_n))
exp_feats.append(feat_stats_n)
#add subdivisions
feat_stats_m_subdiv = get_df_quantiles(timeseries_data,
feats2check = ts_cols_all,
feats2abs = v_sign_cols,
feats2norm = feats2norm,
subdivision_dict = {'motion_mode' : ts_cols_all},
is_abs_ventral = True) #i only calculate the subdivision abs or not abs
exp_feats.append(feat_stats_m_subdiv)
if not only_abs_ventral:
##### non-abs ventral signed features
feat_stats_v = get_df_quantiles(timeseries_data,
feats2check = v_sign_cols,
feats2abs = v_sign_cols,
feats2norm = feats2norm,
is_abs_ventral = False,
is_normalize = False)
exp_feats.append(feat_stats_v)
##### non-abs and normalized ventral signed features
v_sign_cols_norm = list(set(v_sign_cols) & set(ts_cols_norm))
feat_stats_v_n = get_df_quantiles(timeseries_data,
feats2check = v_sign_cols_norm,
feats2abs = v_sign_cols,
feats2norm = feats2norm,
is_abs_ventral = False,
is_normalize = True)
exp_feats.append(feat_stats_v_n)
#add subdivisions
feat_stats_m_subdiv_v = get_df_quantiles(timeseries_data,
feats2check = v_sign_cols,
feats2abs = v_sign_cols,
subdivision_dict = {'motion_mode' : ts_cols_all},
is_abs_ventral = False,
is_normalize = False) #i only calculate the subdivision abs or not abs
exp_feats.append(feat_stats_m_subdiv_v)
if blob_features is not None:
#I need to add the worm index and timesstamp before calculating the derivative
blob_features = pd.concat((timeseries_data[index_colums], blob_features), axis=1)
blob_features, blob_cols = process_blob_data(blob_features, derivate_delta_time, fps)
#get blobstats
blob_stats = get_df_quantiles(blob_features, feats2check = blob_cols)
blob_features['motion_mode'] = timeseries_data['motion_mode']
blob_stats_m_subdiv = get_df_quantiles(blob_features,
feats2check = blob_cols,
subdivision_dict = {'motion_mode':blob_cols},
is_abs_ventral = False)
exp_feats += [blob_stats, blob_stats_m_subdiv]
exp_feats_df = pd.concat(exp_feats)
assert not np.any(exp_feats_df.index.duplicated()) #If there are duplicated indexes there might be an error here
return exp_feats_df
#%%
if __name__ == '__main__':
from tierpsy.helper.params import read_fps
#fname = '/Users/ajaver/OneDrive - Imperial College London/aggregation/N2_1_Ch1_29062017_182108_comp3_featuresN.hdf5'
#%%
fname = '/Volumes/behavgenom_archive$/Avelino/screening/CeNDR/Results/CeNDR_Set1_020617/WN2002_worms10_food1-10_Set1_Pos4_Ch4_02062017_115723_featuresN.hdf5'
with pd.HDFStore(fname, 'r') as fid:
timeseries_data = fid['/timeseries_data']
blob_features = fid['/blob_features']
fps = read_fps(fname)
feat_stats = get_summary_stats(timeseries_data,
fps,
blob_features,
1/3,
only_abs_ventral = True
)
print(feat_stats)
|
|
import uuid
from datetime import date, datetime
import pytest
from freezegun import freeze_time
from app.models import (
EMAIL_TYPE,
KEY_TYPE_NORMAL,
KEY_TYPE_TEAM,
KEY_TYPE_TEST,
LETTER_TYPE,
PRECOMPILED_TEMPLATE_NAME,
SMS_TYPE,
)
from tests.app.db import (
create_ft_notification_status,
create_notification,
create_service,
create_template,
)
@freeze_time('2017-11-11 02:00')
def test_get_template_usage_by_month_returns_correct_data(
admin_request,
sample_template
):
create_ft_notification_status(bst_date=date(2017, 4, 2), template=sample_template, count=3)
create_notification(sample_template, created_at=datetime.utcnow())
resp_json = admin_request.get(
'service.get_monthly_template_usage',
service_id=sample_template.service_id,
year=2017
)
resp_json = resp_json['stats']
assert len(resp_json) == 2
assert resp_json[0]["template_id"] == str(sample_template.id)
assert resp_json[0]["name"] == sample_template.name
assert resp_json[0]["type"] == sample_template.template_type
assert resp_json[0]["month"] == 4
assert resp_json[0]["year"] == 2017
assert resp_json[0]["count"] == 3
assert resp_json[1]["template_id"] == str(sample_template.id)
assert resp_json[1]["name"] == sample_template.name
assert resp_json[1]["type"] == sample_template.template_type
assert resp_json[1]["month"] == 11
assert resp_json[1]["year"] == 2017
assert resp_json[1]["count"] == 1
@freeze_time('2017-11-11 02:00')
def test_get_template_usage_by_month_returns_two_templates(admin_request, sample_template, sample_service):
template_one = create_template(
sample_service,
template_type=LETTER_TYPE,
template_name=PRECOMPILED_TEMPLATE_NAME,
hidden=True
)
create_ft_notification_status(bst_date=datetime(2017, 4, 1), template=template_one, count=1)
create_ft_notification_status(bst_date=datetime(2017, 4, 1), template=sample_template, count=3)
create_notification(sample_template, created_at=datetime.utcnow())
resp_json = admin_request.get(
'service.get_monthly_template_usage',
service_id=sample_template.service_id,
year=2017
)
resp_json = sorted(resp_json['stats'], key=lambda k: (k['year'], k['month'], k['count']))
assert len(resp_json) == 3
assert resp_json[0]["template_id"] == str(template_one.id)
assert resp_json[0]["name"] == template_one.name
assert resp_json[0]["type"] == template_one.template_type
assert resp_json[0]["month"] == 4
assert resp_json[0]["year"] == 2017
assert resp_json[0]["count"] == 1
assert resp_json[0]["is_precompiled_letter"] is True
assert resp_json[1]["template_id"] == str(sample_template.id)
assert resp_json[1]["name"] == sample_template.name
assert resp_json[1]["type"] == sample_template.template_type
assert resp_json[1]["month"] == 4
assert resp_json[1]["year"] == 2017
assert resp_json[1]["count"] == 3
assert resp_json[1]["is_precompiled_letter"] is False
assert resp_json[2]["template_id"] == str(sample_template.id)
assert resp_json[2]["name"] == sample_template.name
assert resp_json[2]["type"] == sample_template.template_type
assert resp_json[2]["month"] == 11
assert resp_json[2]["year"] == 2017
assert resp_json[2]["count"] == 1
assert resp_json[2]["is_precompiled_letter"] is False
@pytest.mark.parametrize('today_only, stats', [
(False, {'requested': 2, 'delivered': 1, 'failed': 0}),
(True, {'requested': 1, 'delivered': 0, 'failed': 0})
], ids=['seven_days', 'today'])
def test_get_service_notification_statistics(admin_request, sample_service, sample_template, today_only, stats):
create_ft_notification_status(date(2000, 1, 1), 'sms', sample_service, count=1)
with freeze_time('2000-01-02T12:00:00'):
create_notification(sample_template, status='created')
resp = admin_request.get(
'service.get_service_notification_statistics',
service_id=sample_template.service_id,
today_only=today_only
)
assert set(resp['data'].keys()) == {SMS_TYPE, EMAIL_TYPE, LETTER_TYPE}
assert resp['data'][SMS_TYPE] == stats
def test_get_service_notification_statistics_with_unknown_service(admin_request):
resp = admin_request.get(
'service.get_service_notification_statistics',
service_id=uuid.uuid4()
)
assert resp['data'] == {
SMS_TYPE: {'requested': 0, 'delivered': 0, 'failed': 0},
EMAIL_TYPE: {'requested': 0, 'delivered': 0, 'failed': 0},
LETTER_TYPE: {'requested': 0, 'delivered': 0, 'failed': 0},
}
@pytest.mark.parametrize('kwargs, expected_json', [
({'year': 'baz'}, {'message': 'Year must be a number', 'result': 'error'}),
({}, {'message': 'Year must be a number', 'result': 'error'}),
])
def test_get_monthly_notification_stats_returns_errors(admin_request, sample_service, kwargs, expected_json):
response = admin_request.get(
'service.get_monthly_notification_stats',
service_id=sample_service.id,
_expected_status=400,
**kwargs
)
assert response == expected_json
def test_get_monthly_notification_stats_returns_404_if_no_service(admin_request):
response = admin_request.get(
'service.get_monthly_notification_stats',
service_id=uuid.uuid4(),
_expected_status=404,
)
assert response == {'message': 'No result found', 'result': 'error'}
def test_get_monthly_notification_stats_returns_empty_stats_with_correct_dates(admin_request, sample_service):
response = admin_request.get(
'service.get_monthly_notification_stats',
service_id=sample_service.id,
year=2016
)
assert len(response['data']) == 12
keys = [
'2016-04', '2016-05', '2016-06', '2016-07', '2016-08', '2016-09', '2016-10', '2016-11', '2016-12',
'2017-01', '2017-02', '2017-03'
]
assert sorted(response['data'].keys()) == keys
for val in response['data'].values():
assert val == {'sms': {}, 'email': {}, 'letter': {}}
def test_get_monthly_notification_stats_returns_stats(admin_request, sample_service):
sms_t1 = create_template(sample_service)
sms_t2 = create_template(sample_service)
email_template = create_template(sample_service, template_type=EMAIL_TYPE)
create_ft_notification_status(datetime(2016, 6, 1), template=sms_t1)
create_ft_notification_status(datetime(2016, 6, 2), template=sms_t1)
create_ft_notification_status(datetime(2016, 7, 1), template=sms_t1)
create_ft_notification_status(datetime(2016, 7, 1), template=sms_t2)
create_ft_notification_status(datetime(2016, 7, 1), template=sms_t1, notification_status='created')
create_ft_notification_status(datetime(2016, 7, 1), template=email_template)
response = admin_request.get(
'service.get_monthly_notification_stats',
service_id=sample_service.id,
year=2016
)
assert len(response['data']) == 12
assert response['data']['2016-06'] == {
'sms': {
# it combines the two days
'delivered': 2
},
'email': {},
'letter': {}
}
assert response['data']['2016-07'] == {
# it combines the two template types
'sms': {
'created': 1,
'delivered': 2,
},
'email': {
'delivered': 1
},
'letter': {}
}
@freeze_time('2016-06-05 12:00:00')
def test_get_monthly_notification_stats_combines_todays_data_and_historic_stats(admin_request, sample_template):
create_ft_notification_status(datetime(2016, 5, 1), template=sample_template, count=1)
create_ft_notification_status(datetime(2016, 6, 1), template=sample_template, notification_status='created', count=2) # noqa
create_notification(sample_template, created_at=datetime(2016, 6, 5), status='created')
create_notification(sample_template, created_at=datetime(2016, 6, 5), status='delivered')
# this doesn't get returned in the stats because it is old - it should be in ft_notification_status by now
create_notification(sample_template, created_at=datetime(2016, 6, 4), status='sending')
response = admin_request.get(
'service.get_monthly_notification_stats',
service_id=sample_template.service_id,
year=2016
)
assert len(response['data']) == 3 # apr, may, jun
assert response['data']['2016-05'] == {
'sms': {
'delivered': 1
},
'email': {},
'letter': {}
}
assert response['data']['2016-06'] == {
'sms': {
# combines the stats from the historic ft_notification_status and the current notifications
'created': 3,
'delivered': 1,
},
'email': {},
'letter': {}
}
def test_get_monthly_notification_stats_ignores_test_keys(admin_request, sample_service):
create_ft_notification_status(datetime(2016, 6, 1), service=sample_service, key_type=KEY_TYPE_NORMAL, count=1)
create_ft_notification_status(datetime(2016, 6, 1), service=sample_service, key_type=KEY_TYPE_TEAM, count=2)
create_ft_notification_status(datetime(2016, 6, 1), service=sample_service, key_type=KEY_TYPE_TEST, count=4)
response = admin_request.get('service.get_monthly_notification_stats', service_id=sample_service.id, year=2016)
assert response['data']['2016-06']['sms'] == {'delivered': 3}
def test_get_monthly_notification_stats_checks_dates(admin_request, sample_service):
t = create_template(sample_service)
create_ft_notification_status(datetime(2016, 3, 31), template=t, notification_status='created')
create_ft_notification_status(datetime(2016, 4, 1), template=t, notification_status='sending')
create_ft_notification_status(datetime(2017, 3, 31), template=t, notification_status='delivered')
create_ft_notification_status(datetime(2017, 4, 11), template=t, notification_status='permanent-failure')
response = admin_request.get('service.get_monthly_notification_stats', service_id=sample_service.id, year=2016)
assert '2016-03' not in response['data']
assert '2017-04' not in response['data']
assert response['data']['2016-04']['sms'] == {'sending': 1}
assert response['data']['2017-03']['sms'] == {'delivered': 1}
def test_get_monthly_notification_stats_only_gets_for_one_service(admin_request, notify_db_session):
services = [create_service(), create_service(service_name="2")]
templates = [create_template(services[0]), create_template(services[1])]
create_ft_notification_status(datetime(2016, 6, 1), template=templates[0], notification_status='created')
create_ft_notification_status(datetime(2016, 6, 1), template=templates[1], notification_status='delivered')
response = admin_request.get('service.get_monthly_notification_stats', service_id=services[0].id, year=2016)
assert response['data']['2016-06'] == {
'sms': {'created': 1},
'email': {},
'letter': {}
}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 NTT
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.network import manager as network_manager
from nova.network import linux_net
import mox
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
HOST = "testhost"
instances = [{'id': 0,
'host': 'fake_instance00',
'hostname': 'fake_instance00'},
{'id': 1,
'host': 'fake_instance01',
'hostname': 'fake_instance01'}]
addresses = [{"address": "10.0.0.1"},
{"address": "10.0.0.2"},
{"address": "10.0.0.3"},
{"address": "10.0.0.4"},
{"address": "10.0.0.5"},
{"address": "10.0.0.6"}]
networks = [{'id': 0,
'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2'},
{'id': 1,
'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'dhcp_server': '0.0.0.0',
'dhcp_start': '192.168.100.1',
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': True,
'virtual_interface_id': 0,
'virtual_interface': addresses[0],
'instance': instances[0],
'floating_ips': []},
{'id': 1,
'network_id': 1,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': True,
'virtual_interface_id': 1,
'virtual_interface': addresses[1],
'instance': instances[0],
'floating_ips': []},
{'id': 2,
'network_id': 1,
'address': '192.168.0.101',
'instance_id': 1,
'allocated': True,
'virtual_interface_id': 2,
'virtual_interface': addresses[2],
'instance': instances[1],
'floating_ips': []},
{'id': 3,
'network_id': 0,
'address': '192.168.1.101',
'instance_id': 1,
'allocated': True,
'virtual_interface_id': 3,
'virtual_interface': addresses[3],
'instance': instances[1],
'floating_ips': []},
{'id': 4,
'network_id': 0,
'address': '192.168.0.102',
'instance_id': 0,
'allocated': True,
'virtual_interface_id': 4,
'virtual_interface': addresses[4],
'instance': instances[0],
'floating_ips': []},
{'id': 5,
'network_id': 1,
'address': '192.168.1.102',
'instance_id': 1,
'allocated': True,
'virtual_interface_id': 5,
'virtual_interface': addresses[5],
'instance': instances[1],
'floating_ips': []}]
vifs = [{'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'network': networks[0],
'instance_id': 0},
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'network': networks[1],
'instance_id': 0},
{'id': 2,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 1,
'network': networks[1],
'instance_id': 1},
{'id': 3,
'address': 'DE:AD:BE:EF:00:03',
'uuid': '00000000-0000-0000-0000-0000000000000003',
'network_id': 0,
'network': networks[0],
'instance_id': 1},
{'id': 4,
'address': 'DE:AD:BE:EF:00:04',
'uuid': '00000000-0000-0000-0000-0000000000000004',
'network_id': 0,
'network': networks[0],
'instance_id': 0},
{'id': 5,
'address': 'DE:AD:BE:EF:00:05',
'uuid': '00000000-0000-0000-0000-0000000000000005',
'network_id': 1,
'network': networks[1],
'instance_id': 1}]
class LinuxNetworkTestCase(test.TestCase):
def setUp(self):
super(LinuxNetworkTestCase, self).setUp()
network_driver = FLAGS.network_driver
self.driver = utils.import_object(network_driver)
self.driver.db = db
def test_update_dhcp_for_nw00(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips')
self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance')
db.network_get_associated_fixed_ips(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([fixed_ips[0],
fixed_ips[3]])
db.network_get_associated_fixed_ips(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([fixed_ips[0],
fixed_ips[3]])
db.virtual_interface_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([vifs[0], vifs[1]])
db.virtual_interface_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([vifs[2], vifs[3]])
self.mox.ReplayAll()
self.driver.update_dhcp(None, "eth0", networks[0])
def test_update_dhcp_for_nw01(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips')
self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance')
db.network_get_associated_fixed_ips(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([fixed_ips[1],
fixed_ips[2]])
db.network_get_associated_fixed_ips(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([fixed_ips[1],
fixed_ips[2]])
db.virtual_interface_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([vifs[0], vifs[1]])
db.virtual_interface_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([vifs[2], vifs[3]])
self.mox.ReplayAll()
self.driver.update_dhcp(None, "eth0", networks[0])
def test_get_dhcp_hosts_for_nw00(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips')
db.network_get_associated_fixed_ips(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([fixed_ips[0],
fixed_ips[3]])
self.mox.ReplayAll()
expected = \
"10.0.0.1,fake_instance00.novalocal,"\
"192.168.0.100,net:NW-i00000000-0\n"\
"10.0.0.4,fake_instance01.novalocal,"\
"192.168.1.101,net:NW-i00000001-0"
actual_hosts = self.driver.get_dhcp_hosts(None, networks[1])
self.assertEquals(actual_hosts, expected)
def test_get_dhcp_hosts_for_nw01(self):
self.flags(use_single_default_gateway=True)
self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips')
db.network_get_associated_fixed_ips(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([fixed_ips[1],
fixed_ips[2]])
self.mox.ReplayAll()
expected = \
"10.0.0.2,fake_instance00.novalocal,"\
"192.168.1.100,net:NW-i00000000-1\n"\
"10.0.0.3,fake_instance01.novalocal,"\
"192.168.0.101,net:NW-i00000001-1"
actual_hosts = self.driver.get_dhcp_hosts(None, networks[0])
self.assertEquals(actual_hosts, expected)
def test_get_dhcp_opts_for_nw00(self):
self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips')
self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance')
db.network_get_associated_fixed_ips(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([fixed_ips[0],
fixed_ips[3],
fixed_ips[4]])
db.virtual_interface_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([vifs[0],
vifs[1],
vifs[4]])
db.virtual_interface_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([vifs[2],
vifs[3],
vifs[5]])
self.mox.ReplayAll()
expected_opts = 'NW-i00000001-0,3'
actual_opts = self.driver.get_dhcp_opts(None, networks[0])
self.assertEquals(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw01(self):
self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips')
self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance')
db.network_get_associated_fixed_ips(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([fixed_ips[1],
fixed_ips[2],
fixed_ips[5]])
db.virtual_interface_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([vifs[0],
vifs[1],
vifs[4]])
db.virtual_interface_get_by_instance(mox.IgnoreArg(),
mox.IgnoreArg())\
.AndReturn([vifs[2],
vifs[3],
vifs[5]])
self.mox.ReplayAll()
expected_opts = "NW-i00000000-1,3"
actual_opts = self.driver.get_dhcp_opts(None, networks[1])
self.assertEquals(actual_opts, expected_opts)
def test_dhcp_opts_not_default_gateway_network(self):
expected = "NW-i00000000-0,3"
actual = self.driver._host_dhcp_opts(fixed_ips[0])
self.assertEquals(actual, expected)
def test_host_dhcp_without_default_gateway_network(self):
expected = ("10.0.0.1,fake_instance00.novalocal,192.168.0.100")
actual = self.driver._host_dhcp(fixed_ips[0])
self.assertEquals(actual, expected)
def _test_initialize_gateway(self, existing, expected):
self.flags(fake_network=False)
executes = []
def fake_execute(*args, **kwargs):
executes.append(args)
if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show':
return existing, ""
self.stubs.Set(utils, 'execute', fake_execute)
network = {'dhcp_server': '192.168.1.1',
'cidr': '192.168.1.0/24',
'broadcast': '192.168.1.255',
'cidr_v6': '2001:db8::/64'}
self.driver.initialize_gateway_device('eth0', network)
self.assertEqual(executes, expected)
def test_initialize_gateway_moves_wrong_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'addr', 'del', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', 'addr', 'add', '192.168.0.1/24',
'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
('ip', 'link', 'set', 'dev', 'eth0', 'promisc', 'on'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_no_move_right_ip(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n"
" inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
('ip', 'link', 'set', 'dev', 'eth0', 'promisc', 'on'),
]
self._test_initialize_gateway(existing, expected)
def test_initialize_gateway_add_if_blank(self):
existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
" mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
" link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
" inet6 dead::beef:dead:beef:dead/64 scope link\n"
" valid_lft forever preferred_lft forever\n")
expected = [
('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
('ip', 'addr', 'add', '192.168.1.1/24',
'brd', '192.168.1.255', 'dev', 'eth0'),
('ip', '-f', 'inet6', 'addr', 'change',
'2001:db8::/64', 'dev', 'eth0'),
('ip', 'link', 'set', 'dev', 'eth0', 'promisc', 'on'),
]
self._test_initialize_gateway(existing, expected)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Metadata request handler."""
import hashlib
import hmac
import os
from oslo_log import log as logging
from oslo_utils import secretutils as secutils
import six
import webob.dec
import webob.exc
from nova.api.metadata import base
from nova import cache_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.network.neutronv2 import api as neutronapi
from nova import wsgi
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
self._cache = cache_utils.get_client(
expiration_time=CONF.metadata_cache_expiration)
def get_metadata_by_remote_address(self, address):
if not address:
raise exception.FixedIpNotFoundForAddress(address=address)
cache_key = 'metadata-%s' % address
data = self._cache.get(cache_key)
if data:
LOG.debug("Using cached metadata for %s", address)
return data
try:
data = base.get_metadata_by_address(address)
except exception.NotFound:
return None
if CONF.metadata_cache_expiration > 0:
self._cache.set(cache_key, data)
return data
def get_metadata_by_instance_id(self, instance_id, address):
cache_key = 'metadata-%s' % instance_id
data = self._cache.get(cache_key)
if data:
LOG.debug("Using cached metadata for instance %s", instance_id)
return data
try:
data = base.get_metadata_by_instance_id(instance_id, address)
except exception.NotFound:
return None
if CONF.metadata_cache_expiration > 0:
self._cache.set(cache_key, data)
return data
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if os.path.normpath(req.path_info) == "/":
resp = base.ec2_md_print(base.VERSIONS + ["latest"])
req.response.body = resp
req.response.content_type = base.MIME_TYPE_TEXT_PLAIN
return req.response
if CONF.neutron.service_metadata_proxy:
if req.headers.get('X-Metadata-Provider'):
meta_data = self._handle_instance_id_request_from_lb(req)
else:
meta_data = self._handle_instance_id_request(req)
else:
if req.headers.get('X-Instance-ID'):
LOG.warning(
_LW("X-Instance-ID present in request headers. The "
"'service_metadata_proxy' option must be "
"enabled to process this header."))
meta_data = self._handle_remote_ip_request(req)
if meta_data is None:
raise webob.exc.HTTPNotFound()
try:
data = meta_data.lookup(req.path_info)
except base.InvalidMetadataPath:
raise webob.exc.HTTPNotFound()
if callable(data):
return data(req, meta_data)
resp = base.ec2_md_print(data)
if isinstance(resp, six.text_type):
req.response.text = resp
else:
req.response.body = resp
req.response.content_type = meta_data.get_mimetype()
return req.response
def _handle_remote_ip_request(self, req):
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
try:
meta_data = self.get_metadata_by_remote_address(remote_address)
except Exception:
LOG.exception(_LE('Failed to get metadata for IP: %s'),
remote_address)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error(_LE('Failed to get metadata for IP: %s'),
remote_address)
return meta_data
def _handle_instance_id_request(self, req):
instance_id = req.headers.get('X-Instance-ID')
tenant_id = req.headers.get('X-Tenant-ID')
signature = req.headers.get('X-Instance-ID-Signature')
remote_address = req.headers.get('X-Forwarded-For')
# Ensure that only one header was passed
if instance_id is None:
msg = _('X-Instance-ID header is missing from request.')
elif signature is None:
msg = _('X-Instance-ID-Signature header is missing from request.')
elif tenant_id is None:
msg = _('X-Tenant-ID header is missing from request.')
elif not isinstance(instance_id, six.string_types):
msg = _('Multiple X-Instance-ID headers found within request.')
elif not isinstance(tenant_id, six.string_types):
msg = _('Multiple X-Tenant-ID headers found within request.')
else:
msg = None
if msg:
raise webob.exc.HTTPBadRequest(explanation=msg)
self._validate_shared_secret(instance_id, signature,
remote_address)
return self._get_meta_by_instance_id(instance_id, tenant_id,
remote_address)
def _get_instance_id_from_lb(self, provider_id, instance_address):
# We use admin context, admin=True to lookup the
# inter-Edge network port
context = nova_context.get_admin_context()
neutron = neutronapi.get_client(context, admin=True)
# Tenant, instance ids are found in the following method:
# X-Metadata-Provider contains id of the metadata provider, and since
# overlapping networks cannot be connected to the same metadata
# provider, the combo of tenant's instance IP and the metadata
# provider has to be unique.
#
# The networks which are connected to the metadata provider are
# retrieved in the 1st call to neutron.list_subnets()
# In the 2nd call we read the ports which belong to any of the
# networks retrieved above, and have the X-Forwarded-For IP address.
# This combination has to be unique as explained above, and we can
# read the instance_id, tenant_id from that port entry.
# Retrieve networks which are connected to metadata provider
md_subnets = neutron.list_subnets(
context,
advanced_service_providers=[provider_id],
fields=['network_id'])
md_networks = [subnet['network_id']
for subnet in md_subnets['subnets']]
try:
# Retrieve the instance data from the instance's port
instance_data = neutron.list_ports(
context,
fixed_ips='ip_address=' + instance_address,
network_id=md_networks,
fields=['device_id', 'tenant_id'])['ports'][0]
except Exception as e:
LOG.error(_LE('Failed to get instance id for metadata '
'request, provider %(provider)s '
'networks %(networks)s '
'requester %(requester)s. Error: %(error)s'),
{'provider': provider_id,
'networks': md_networks,
'requester': instance_address,
'error': e})
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPBadRequest(explanation=msg)
instance_id = instance_data['device_id']
tenant_id = instance_data['tenant_id']
# instance_data is unicode-encoded, while cache_utils doesn't like
# that. Therefore we convert to str
if isinstance(instance_id, six.text_type):
instance_id = instance_id.encode('utf-8')
return instance_id, tenant_id
def _handle_instance_id_request_from_lb(self, req):
remote_address = req.headers.get('X-Forwarded-For')
if remote_address is None:
msg = _('X-Forwarded-For is missing from request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
provider_id = req.headers.get('X-Metadata-Provider')
if provider_id is None:
msg = _('X-Metadata-Provider is missing from request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
instance_address = remote_address.split(',')[0]
# If authentication token is set, authenticate
if CONF.neutron.metadata_proxy_shared_secret:
signature = req.headers.get('X-Metadata-Provider-Signature')
self._validate_shared_secret(provider_id, signature,
instance_address)
instance_id, tenant_id = self._get_instance_id_from_lb(
provider_id, instance_address)
return self._get_meta_by_instance_id(instance_id, tenant_id,
instance_address)
def _validate_shared_secret(self, requestor_id, signature,
requestor_address):
expected_signature = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
requestor_id, hashlib.sha256).hexdigest()
if not secutils.constant_time_compare(expected_signature, signature):
if requestor_id:
LOG.warning(_LW('X-Instance-ID-Signature: %(signature)s does '
'not match the expected value: '
'%(expected_signature)s for id: '
'%(requestor_id)s. Request From: '
'%(requestor_address)s'),
{'signature': signature,
'expected_signature': expected_signature,
'requestor_id': requestor_id,
'requestor_address': requestor_address})
msg = _('Invalid proxy request signature.')
raise webob.exc.HTTPForbidden(explanation=msg)
def _get_meta_by_instance_id(self, instance_id, tenant_id, remote_address):
try:
meta_data = self.get_metadata_by_instance_id(instance_id,
remote_address)
except Exception:
LOG.exception(_LE('Failed to get metadata for instance id: %s'),
instance_id)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error(_LE('Failed to get metadata for instance id: %s'),
instance_id)
elif meta_data.instance.project_id != tenant_id:
LOG.warning(_LW("Tenant_id %(tenant_id)s does not match tenant_id "
"of instance %(instance_id)s."),
{'tenant_id': tenant_id, 'instance_id': instance_id})
# causes a 404 to be raised
meta_data = None
return meta_data
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import os
import logging
from flask import Flask, request, make_response, abort, \
url_for, render_template, jsonify
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from werkzeug.utils import secure_filename
from curl2share import config
from curl2share import utils
if config.storage == 's3':
from curl2share.storages.s3 import S3
from curl2share.storages.redis import Redis
s3 = S3()
elif config.storage == 'file':
from curl2share.storages.filesystem import FileSystem
fs = FileSystem()
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = config.max_file_size * 1024 * 1024
app.config['RATELIMIT_HEADERS_ENABLED'] = True
logger = logging.getLogger(__name__)
limiter = Limiter(app, key_func=get_remote_address)
@app.errorhandler(400)
def bad_request(err):
''' HTTP 400 code '''
logger.error('Invalid request: {} {}.'.format(request.method,
request.path))
return make_response('Bad Request', 400)
@app.errorhandler(404)
def not_found(err):
''' HTTP 404 code '''
logger.error('File not found: {}'.format(request.path))
return make_response('Not Found', 404)
@app.errorhandler(411)
def no_contentlength(err):
''' HTTP 411 code '''
return make_response('File Is Empty', 411)
@app.errorhandler(405)
def not_allowed(err):
''' HTTP 405 code '''
logger.error('Method not allowed: {} {}'.format(request.method,
request.path))
return make_response('Method Not Allowed', 405)
@app.errorhandler(413)
def file_too_large(err):
''' HTTP 413 code '''
size = request.content_length // 1024 // 1024
logger.error('Request {} {} file too '
'large {}MB.'.format(request.method, request.path, size))
return make_response('File too large.'
'Limit {}MB'.format(config.max_file_size), 413)
@app.errorhandler(429)
def limit_exceeded(err):
''' Rate limit message'''
remote_ip = get_remote_address()
logger.error('IP {} exceeded rate limit'
'with request {} {}'.format(remote_ip,
request.method,
request.path))
return make_response('Rate limit exceeded!', 429)
@app.errorhandler(500)
def internal_error(err):
''' HTTP 500 code '''
return make_response('Something went wrong.'
'Sorry for this inconvenience', 500)
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/', defaults={'file_name': ''}, methods=['POST', 'PUT'])
@app.route('/<string:file_name>', methods=['POST', 'PUT'])
@limiter.limit(config.rate_limit)
def upload(file_name):
''' Write data '''
prefix_path = utils.rand()
content_type = request.headers.get('Content-Type')
if content_type and 'multipart/form-data' in content_type:
req = request.files['file']
# In mulitpart/form-data, request.content_length doesn't represent
# actual file size because of boundary strings.
# We have to check it manually.
req.seek(0, os.SEEK_END)
filesize = req.tell()
req.seek(0)
utils.validate_filesize(filesize)
# Handle in case no file_name in request.
# Eg: curl -X POST -F file=@file server
if not file_name:
fname = secure_filename(req.filename)
else:
fname = secure_filename(file_name)
elif not content_type and file_name:
# Request sent file by stream must have file_name
# Eg: curl -X POST|PUT --upload-file myfile server
req = request.stream
filesize = request.content_length
utils.validate_filesize(filesize)
fname = secure_filename(file_name)
else:
headers = request.headers
logger.error('Invalid request header: \n{}'.format(headers))
abort(400)
dest_file = '/'.join([prefix_path, fname])
if config.storage == 'file':
fs.upload(dest_file, req)
if config.storage == 's3':
partsize = 1024 * 1024 * 5
if filesize >= partsize:
s3.upload_multipart(dest_file, req)
else:
s3.upload(dest_file, req)
url = url_for("preview", path=dest_file, _external=True)
return url + '\n', 201
@app.route('/d/<path:path>', methods=['GET'])
def download(path):
'''
Return file.
This method should be used in development only.
In production, consider using nginx for this.
'''
filename = secure_filename(os.path.basename(path))
if config.storage == 'file':
resp = fs.download(path)
resp.headers['Content-Disposition'] = \
'attachment; filename="{}"'.format(filename)
return resp
@app.route('/<path:path>', methods=['GET'])
def preview(path):
''' Render a preview page based on file information '''
logger.info('Rendering preview page for {}'.format(path))
if config.storage == 's3':
if config.enable_redis:
# try to get file info from redis first
# if no info available, then get info from S3 and
# insert back to redis for future use
redis = Redis()
info = redis.get_object(path)
if not info:
info = s3.info(path)
redis.set_object(path, info)
else:
info = s3.info(path) or abort(404)
dl_url = s3.download(path)
filesize = info['content_length']
filetype = info['content_type']
if config.storage == 'file':
dl_url = url_for('download', path=path, _external=True)
dst_file = os.path.join(config.path, path)
filesize, filetype = fs.info(dst_file)
return render_template('preview.html',
title=os.path.basename(path),
file_name=os.path.basename(path),
file_size=filesize,
file_type=filetype,
url=dl_url
)
@app.route('/healthcheck', methods=['GET'])
def healthcheck():
''' Check availability of app '''
redis_enabled = False
redis_conn = ''
redis_host = ''
if config.storage == 'file':
storage_writable = os.access(config.path, os.W_OK)
elif config.storage == 's3':
redis_enabled = config.enable_redis
if redis_enabled:
redis = Redis()
redis_conn = redis.healthcheck()
redis_host = config.redis_host
storage_writable = s3.healthcheck()
resp = jsonify(StorageType=config.storage,
StorageConnectionOK=storage_writable,
RedisEnabled=redis_enabled,
RedisHost=redis_host,
RedisConnectionOK=redis_conn
)
return resp
|
|
"""
***********
Annotations
***********
Annotations are arbitrary metadata attached to Synapse entities. They can be
accessed like ordinary object properties or like dictionary keys::
entity.my_annotation = 'This is one way to do it'
entity['other_annotation'] = 'This is another'
Annotations can be given in the constructor for Synapse Entities::
entity = File('data.xyz', parent=my_project, rating=9.1234)
Annotate the entity with location data::
entity.lat_long = [47.627477, -122.332154]
Record when we collected the data::
from datetime import datetime as Datetime
entity.collection_date = Datetime.now()
See:
- :py:meth:`synapseclient.Synapse.getAnnotations`
- :py:meth:`synapseclient.Synapse.setAnnotations`
~~~~~~~~~~~~~~~~~~~~~~~
Annotating data sources
~~~~~~~~~~~~~~~~~~~~~~~
Data sources are best recorded using Synapse's `provenance <Activity.html>`_ tools.
~~~~~~~~~~~~~~~~~~~~~~
Implementation details
~~~~~~~~~~~~~~~~~~~~~~
In Synapse, entities have both properties and annotations. Properties are used by
the system, whereas annotations are completely user defined. In the Python client,
we try to present this situation as a normal object, with one set of properties.
For more on the implementation and a few gotchas, see the documentation on
:py:mod:`synapseclient.entity`.
See also:
- :py:class:`synapseclient.entity.Entity`
- :py:mod:`synapseclient.entity`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from builtins import int
import six
import collections
import warnings
from .utils import to_unix_epoch_time, from_unix_epoch_time, _is_date, _to_list
from .exceptions import SynapseError
def is_synapse_annotations(annotations):
"""Tests if the given object is a Synapse-style Annotations object."""
keys=['id', 'etag', 'creationDate', 'uri', 'stringAnnotations','longAnnotations','doubleAnnotations','dateAnnotations', 'blobAnnotations']
if not isinstance(annotations, collections.Mapping): return False
return all([key in keys for key in annotations.keys()])
def to_synapse_annotations(annotations):
"""Transforms a simple flat dictionary to a Synapse-style Annotation object."""
if is_synapse_annotations(annotations):
return annotations
synapseAnnos = {}
for key in Annotations.system_properties:
if hasattr(annotations, key):
synapseAnnos[key] = getattr(annotations, key)
for key, value in six.iteritems(annotations):
if key in ['id', 'etag', 'blobAnnotations', 'creationDate', 'uri']:
synapseAnnos[key] = value
elif key in ['stringAnnotations','longAnnotations','doubleAnnotations','dateAnnotations'] and isinstance(value, collections.Mapping):
synapseAnnos.setdefault(key, {}).update({k:_to_list(v) for k,v in six.iteritems(value)})
else:
elements = _to_list(value)
if all((isinstance(elem, six.string_types) for elem in elements)):
synapseAnnos.setdefault('stringAnnotations', {})[key] = elements
elif all((isinstance(elem, bool) for elem in elements)):
synapseAnnos.setdefault('stringAnnotations', {})[key] = [str(element).lower() for element in elements]
elif all((isinstance(elem, int) for elem in elements)):
synapseAnnos.setdefault('longAnnotations', {})[key] = elements
elif all((isinstance(elem, float) for elem in elements)):
synapseAnnos.setdefault('doubleAnnotations', {})[key] = elements
elif all((_is_date(elem) for elem in elements)):
synapseAnnos.setdefault('dateAnnotations', {})[key] = [to_unix_epoch_time(elem) for elem in elements]
## TODO: support blob annotations
# elif all((isinstance(elem, ???) for elem in elements)):
# synapseAnnos.setdefault('blobAnnotations', {})[key] = [???(elem) for elem in elements]
else:
synapseAnnos.setdefault('stringAnnotations', {})[key] = [str(elem) for elem in elements]
return synapseAnnos
def from_synapse_annotations(annotations):
"""Transforms a Synapse-style Annotation object to a simple flat dictionary."""
def process_user_defined_annotations(kvps, annos, func):
"""
for each annotation of a given class (date, string, double, ...), process the
annotation with the given function and add it to the dict 'annos'.
"""
for k,v in six.iteritems(kvps):
## don't overwrite system keys which won't be lists
if k in Annotations.system_properties:
warnings.warn('A user defined annotation, "%s", has the same name as a system defined annotation and will be dropped. Try syn._getRawAnnotations to get annotations in native Synapse format.' % k)
else:
annos.setdefault(k,[]).extend([func(elem) for elem in v])
# Flatten the raw annotations to consolidate doubleAnnotations, longAnnotations,
# stringAnnotations and dateAnnotations into one dictionary
annos = Annotations()
for key, value in annotations.items():
if key in Annotations.system_properties:
setattr(annos, key, value)
elif key=='dateAnnotations':
process_user_defined_annotations(value, annos, lambda x: from_unix_epoch_time(float(x)))
elif key in ['stringAnnotations','longAnnotations']:
process_user_defined_annotations(value, annos, lambda x: x)
elif key == 'doubleAnnotations':
process_user_defined_annotations(value, annos, lambda x: float(x))
elif key=='blobAnnotations':
process_user_defined_annotations(value, annos, lambda x: x)
else:
warnings.warn('Unknown key in annotations response: %s' % key)
return annos
def is_submission_status_annotations(annotations):
"""Tests if the given dictionary is in the form of annotations to submission status"""
keys = ['objectId', 'scopeId', 'stringAnnos','longAnnos','doubleAnnos']
if not isinstance(annotations, collections.Mapping): return False
return all([key in keys for key in annotations.keys()])
def to_submission_status_annotations(annotations, is_private=True):
"""
Converts a normal dictionary to the format used to annotate submission
statuses, which is different from the format used to annotate entities.
:param annotations: A normal Python dictionary whose values are strings, floats, ints or doubles
:param is_private: Set privacy on all annotations at once. These can be set individually using :py:func:`set_privacy`.
Example::
from synapseclient.annotations import to_submission_status_annotations, from_submission_status_annotations
from datetime import datetime as Datetime
## create a submission and get its status
submission = syn.submit(evaluation, 'syn11111111')
submission_status = syn.getSubmissionStatus(submission)
## add annotations
submission_status.annotations = {'foo':'bar', 'shoe_size':12, 'IQ':12, 'timestamp':Datetime.now()}
## convert annotations
submission_status.annotations = to_submission_status_annotations(submission_status.annotations)
submission_status = syn.store(submission_status)
Synapse categorizes these annotations by: stringAnnos, doubleAnnos,
longAnnos. If date or blob annotations are supported, they are not
`documented <http://rest.synapse.org/org/sagebionetworks/repo/model/annotation/Annotations.html>`_
"""
if is_submission_status_annotations(annotations):
return annotations
synapseAnnos = {}
for key, value in six.iteritems(annotations):
if key in ['objectId', 'scopeId', 'stringAnnos','longAnnos','doubleAnnos']:
synapseAnnos[key] = value
elif isinstance(value, bool):
synapseAnnos.setdefault('stringAnnos', []).append({ 'key':key, 'value':str(value).lower(), 'isPrivate':is_private })
elif isinstance(value, int):
synapseAnnos.setdefault('longAnnos', []).append({ 'key':key, 'value':value, 'isPrivate':is_private })
elif isinstance(value, float):
synapseAnnos.setdefault('doubleAnnos', []).append({ 'key':key, 'value':value, 'isPrivate':is_private })
elif isinstance(value, six.string_types):
synapseAnnos.setdefault('stringAnnos', []).append({ 'key':key, 'value':value, 'isPrivate':is_private })
elif _is_date(value):
synapseAnnos.setdefault('longAnnos', []).append({ 'key':key, 'value':to_unix_epoch_time(value), 'isPrivate':is_private })
else:
synapseAnnos.setdefault('stringAnnos', []).append({ 'key':key, 'value':str(value), 'isPrivate':is_private })
return synapseAnnos
## TODO: this should accept a status object and return its annotations or an empty dict if there are none
def from_submission_status_annotations(annotations):
"""
Convert back from submission status annotation format to a normal dictionary.
Example::
submission_status.annotations = from_submission_status_annotations(submission_status.annotations)
"""
dictionary = {}
for key, value in six.iteritems(annotations):
if key in ['stringAnnos','longAnnos']:
dictionary.update( { kvp['key']:kvp['value'] for kvp in value } )
elif key == 'doubleAnnos':
dictionary.update( { kvp['key']:float(kvp['value']) for kvp in value } )
else:
dictionary[key] = value
return dictionary
def set_privacy(annotations, key, is_private=True, value_types=['longAnnos', 'doubleAnnos', 'stringAnnos']):
"""
Set privacy of individual annotations, where annotations are in the format used by Synapse
SubmissionStatus objects. See the `Annotations documentation <http://rest.synapse.org/org/sagebionetworks/repo/model/annotation/Annotations.html>`_
and the docs regarding `querying annotations <http://rest.synapse.org/GET/evaluation/submission/query.html>`_.
:param annotations: Annotations that have already been converted to Synapse format using
:py:func:`to_submission_status_annotations`.
:param key: The key of the annotation whose privacy we're setting.
:param is_private: If False, the annotation will be visible to users with READ permission on the evaluation.
If True, the it will be visible only to users with READ_PRIVATE_SUBMISSION on the evaluation.
Note: Is this really correct???
:param value_types: A list of the value types in which to search for the key. Defaults to all types
['longAnnos', 'doubleAnnos', 'stringAnnos'].
"""
for value_type in value_types:
kvps = annotations.get(value_type, None)
if kvps:
for kvp in kvps:
if kvp['key'] == key:
kvp['isPrivate'] = is_private
return kvp
raise KeyError('The key "%s" couldn\'t be found in the annotations.' % key)
class Annotations(dict):
"""
Represent Synapse Entity annotations as a flat dictionary with the system
assigned properties id, etag, creationDate and uri as object attributes.
"""
system_properties = ['id', 'etag', 'creationDate', 'uri']
def __init__(self, *args, **kwargs):
"""
Create an Annotations object taking key value pairs from a dictionary or
from keyword arguments. System properties id, etag, creationDate and uri
become attributes of the object.
"""
## make sure all system properties exist
for key in Annotations.system_properties:
self.__dict__[key] = None
for arg in args + (kwargs,):
if isinstance(arg, collections.Mapping):
for key in arg:
if key in Annotations.system_properties:
self.__dict__[key] = arg[key]
else:
self.__setitem__(key, arg[key])
else:
raise ValueError("Unrecognized argument to constructor of Annotations: %s" + str(arg))
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if hasattr(self,key):
return super(Annotations, self).__setattr__(key, value)
else:
return self.__setitem__(key, value)
|
|
# -*- coding: utf-8 -*-
"""
This module implements the base LaTeX object.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
from ordered_set import OrderedSet
from ..utils import dumps_list
from abc import abstractmethod, ABCMeta
from reprlib import recursive_repr
from inspect import getfullargspec
class _CreatePackages(ABCMeta):
def __init__(cls, name, bases, d): # noqa
packages = OrderedSet()
for b in bases:
if hasattr(b, 'packages'):
packages |= b.packages
if 'packages' in d:
packages |= d['packages']
cls.packages = packages
super().__init__(name, bases, d)
class LatexObject(metaclass=_CreatePackages):
"""The class that every other LaTeX class is a subclass of.
This class implements the main methods that every LaTeX object needs. For
conversion to LaTeX formatted strings it implements the dumps, dump and
generate_tex methods. It also provides the methods that can be used to
represent the packages required by the LatexObject.
"""
_latex_name = None
_star_latex_name = False # latex_name + ('*' if True else '')
#: Set this to an iterable to override the list of default repr
#: attributes.
_repr_attributes_override = None
#: Set this to a dict to change some of the default repr attributes to
#: other attributes. The key is the old one, the value the new one.
_repr_attributes_mapping = None
#: Set on a class to make instances default to a certain kind of escaping
_default_escape = True
#: Only set this directly by changing the cls.escape
_escape = None
@property
def escape(self):
"""Determine whether or not to escape content of this class.
This defaults to `True` for most classes.
"""
if self._escape is not None:
return self._escape
if self._default_escape is not None:
return self._default_escape
return True
@escape.setter
def escape(self, value):
"""Escape flag setter - to be used at object level."""
self._escape = value
#: Start a new paragraph before this environment.
begin_paragraph = False
#: Start a new paragraph after this environment.
end_paragraph = False
#: Same as enabling `begin_paragraph` and `end_paragraph`, so
#: effectively placing this element in its own paragraph.
separate_paragraph = False
def __init__(self):
# TODO: only create a copy of packages when it will
# Create a copy of the packages attribute, so changing it in an
# instance will not change the class default.
self.packages = self.packages.copy()
@recursive_repr()
def __repr__(self):
"""Create a printable representation of the object."""
return self.__class__.__name__ + '(' + \
', '.join(map(repr, self._repr_values)) + ')'
@property
def _repr_values(self):
"""Return values that are to be shown in repr string."""
def getattr_better(obj, field):
try:
return getattr(obj, field)
except AttributeError as e:
try:
return getattr(obj, '_' + field)
except AttributeError:
raise e
return (getattr_better(self, attr) for attr in self._repr_attributes)
@property
def _repr_attributes(self):
"""Return attributes that should be part of the repr string."""
if self._repr_attributes_override is None:
# Default to init arguments
attrs = getfullargspec(self.__init__).args[1:]
mapping = self._repr_attributes_mapping
if mapping:
attrs = [mapping[a] if a in mapping else a for a in attrs]
return attrs
return self._repr_attributes_override
@property
def latex_name(self):
"""Return the name of the class used in LaTeX.
It can be `None` when the class doesn't have a name.
"""
star = ('*' if self._star_latex_name else '')
if self._latex_name is not None:
return self._latex_name + star
return self.__class__.__name__.lower() + star
@latex_name.setter
def latex_name(self, value):
self._latex_name = value
@abstractmethod
def dumps(self):
"""Represent the class as a string in LaTeX syntax.
This method should be implemented by any class that subclasses this
class.
"""
def dump(self, file_w):
"""Write the LaTeX representation of the class to a file.
Args
----
file_w: io.TextIOBase
The file object in which to save the data
"""
file_w.write(self.dumps())
def generate_tex(self, filepath):
"""Generate a .tex file.
Args
----
filepath: str
The name of the file (without .tex)
"""
with open(filepath + '.tex', 'w', encoding='utf-8') as newf:
self.dump(newf)
def dumps_packages(self):
"""Represent the packages needed as a string in LaTeX syntax.
Returns
-------
list
"""
return dumps_list(self.packages)
def dump_packages(self, file_w):
"""Write the LaTeX representation of the packages to a file.
Args
----
file_w: io.TextIOBase
The file object in which to save the data
"""
file_w.write(self.dumps_packages())
def dumps_as_content(self):
"""Create a string representation of the object as content.
This is currently only used to add new lines before and after the
output of the dumps function. These can be added or removed by changing
the `begin_paragraph`, `end_paragraph` and `separate_paragraph`
attributes of the class.
"""
string = self.dumps()
if self.separate_paragraph or self.begin_paragraph:
string = '\n\n' + string.lstrip('\n')
if self.separate_paragraph or self.end_paragraph:
string = string.rstrip('\n') + '\n\n'
return string
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016 Fedele Mantuano (https://twitter.com/fedelemantuano)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import hashlib
import logging
import os
import shutil
import six
import sys
import tempfile
import unittest
base_path = os.path.realpath(os.path.dirname(__file__))
root = os.path.join(base_path, '..')
sys.path.append(root)
logging.getLogger().addHandler(logging.NullHandler())
import mailparser
from mailparser.utils import (
convert_mail_date,
fingerprints,
get_header,
get_mail_keys,
get_to_domains,
msgconvert,
ported_open,
ported_string,
receiveds_parsing,
parse_received,
random_string,
)
from mailparser.exceptions import MailParserEnvironmentError
mail_test_1 = os.path.join(base_path, 'mails', 'mail_test_1')
mail_test_2 = os.path.join(base_path, 'mails', 'mail_test_2')
mail_test_3 = os.path.join(base_path, 'mails', 'mail_test_3')
mail_test_4 = os.path.join(base_path, 'mails', 'mail_test_4')
mail_test_5 = os.path.join(base_path, 'mails', 'mail_test_5')
mail_test_6 = os.path.join(base_path, 'mails', 'mail_test_6')
mail_test_7 = os.path.join(base_path, 'mails', 'mail_test_7')
mail_test_8 = os.path.join(base_path, 'mails', 'mail_test_8')
mail_test_9 = os.path.join(base_path, 'mails', 'mail_test_9')
mail_test_10 = os.path.join(base_path, 'mails', 'mail_test_10')
mail_test_11 = os.path.join(base_path, 'mails', 'mail_test_11')
mail_test_12 = os.path.join(base_path, 'mails', 'mail_test_12')
mail_test_13 = os.path.join(base_path, 'mails', 'mail_test_13')
mail_test_14 = os.path.join(base_path, 'mails', 'mail_test_14')
mail_test_15 = os.path.join(base_path, 'mails', 'mail_test_15')
mail_malformed_1 = os.path.join(base_path, 'mails', 'mail_malformed_1')
mail_malformed_2 = os.path.join(base_path, 'mails', 'mail_malformed_2')
mail_malformed_3 = os.path.join(base_path, 'mails', 'mail_malformed_3')
mail_outlook_1 = os.path.join(base_path, 'mails', 'mail_outlook_1')
class TestMailParser(unittest.TestCase):
def setUp(self):
self.all_mails = (
mail_test_1,
mail_test_2,
mail_test_3,
mail_test_4,
mail_test_5,
mail_test_6,
mail_test_7,
mail_test_8,
mail_test_9,
mail_test_10,
mail_test_11,
mail_test_12,
mail_test_13,
mail_malformed_1,
mail_malformed_2,
mail_malformed_3)
def test_write_attachments(self):
attachments = [
"<_1_0B4E44A80B15F6FC005C1243C12580DD>",
"<_1_0B4E420C0B4E3DD0005C1243C12580DD>",
"<_1_0B4E24640B4E1564005C1243C12580DD>",
"Move To Eight ZWEP6227F.pdf"]
random_path = os.path.join(root, "tests", random_string())
mail = mailparser.parse_from_file(mail_test_10)
os.makedirs(random_path)
mail.write_attachments(random_path)
for i in attachments:
self.assertTrue(os.path.exists(os.path.join(random_path, i)))
shutil.rmtree(random_path)
def test_issue62(self):
mail = mailparser.parse_from_file(mail_test_14)
received_spf = mail.Received_SPF
self.assertIsInstance(received_spf, list)
self.assertIn("custom_header1", received_spf)
self.assertIn("custom_header2", received_spf)
def test_html_field(self):
mail = mailparser.parse_from_file(mail_malformed_1)
self.assertIsInstance(mail.text_html, list)
self.assertIsInstance(mail.text_html_json, six.text_type)
self.assertEqual(len(mail.text_html), 1)
def test_text_not_managed(self):
mail = mailparser.parse_from_file(mail_test_14)
self.assertIsInstance(mail.text_not_managed, list)
self.assertIsInstance(mail.text_not_managed_json, six.text_type)
self.assertEqual(len(mail.text_not_managed), 1)
self.assertEqual("PNG here", mail.text_not_managed[0])
def test_get_mail_keys(self):
mail = mailparser.parse_from_file(mail_test_11)
all_parts = get_mail_keys(mail.message)
mains_parts = get_mail_keys(mail.message, False)
self.assertNotEqual(all_parts, mains_parts)
self.assertIn("message-id", mains_parts)
self.assertIn("x-filterd-recvd-size", all_parts)
self.assertNotIn("x-filterd-recvd-size", mains_parts)
def test_mail_partial(self):
mail = mailparser.parse_from_file(mail_test_10)
self.assertNotEqual(mail.mail, mail.mail_partial)
self.assertIn("message-id", mail.mail_partial)
self.assertIn("x-ibm-av-version", mail.mail)
self.assertNotIn("x-ibm-av-version", mail.mail_partial)
result = mail.mail_partial_json
self.assertIsInstance(result, six.text_type)
nr_attachments = len(mail._attachments)
self.assertEqual(nr_attachments, 4)
def test_not_parsed_received(self):
mail = mailparser.parse_from_file(mail_test_9)
for i in mail.received:
self.assertNotIn("raw", i)
self.assertIn("hop", i)
def test_issue_received(self):
mail = mailparser.parse_from_file(mail_test_8)
for i in mail.received:
self.assertIn("date_utc", i)
self.assertIsNotNone(i["date_utc"])
def test_get_header(self):
mail = mailparser.parse_from_file(mail_test_1)
h1 = get_header(mail.message, "from")
self.assertIsInstance(h1, six.text_type)
def test_receiveds_parsing(self):
for i in self.all_mails:
mail = mailparser.parse_from_file(i)
receiveds = mail.received_raw
result = receiveds_parsing(receiveds)
self.assertIsInstance(result, list)
for j in result:
self.assertIsInstance(j, dict)
self.assertIn("hop", j)
self.assertIn("delay", j)
def test_ipaddress(self):
mail = mailparser.parse_from_file(mail_test_2)
trust = "smtp.customers.net"
ip = "217.76.210.112"
result = mail.get_server_ipaddress(trust)
self.assertEqual(result, ip)
trust = ""
result = mail.get_server_ipaddress(trust)
self.assertEqual(result, None)
trust = " "
result = mail.get_server_ipaddress(trust)
self.assertEqual(result, None)
def test_ipaddress_unicodeerror(self):
mail = mailparser.parse_from_file(mail_test_12)
trust = "localhost"
result = mail.get_server_ipaddress(trust)
self.assertEqual(result, "96.202.181.20")
def test_fingerprints_body(self):
mail = mailparser.parse_from_file(mail_test_1)
md5, sha1, sha256, sha512 = fingerprints(
mail.body.encode("utf-8"))
self.assertEqual(md5, "55852a2efe95e7249887c92cc02123f8")
self.assertEqual(sha1, "62fef1e38327ed09363624c3aff8ea11723ee05f")
self.assertEqual(sha256, ("cd4af1017f2e623f6d38f691048b6"
"a28d8b1f44a0478137b4337eac6de78f71a"))
self.assertEqual(sha512, ("4a573c7929b078f2a2c1c0f869d418b0c020d4"
"d37196bd6dcc209f9ccb29ca67355aa5e47b97"
"c8bf90377204f59efde7ba1fc071b6f250a665"
"72f63b997e92e8"))
def test_fingerprints_unicodeencodeerror(self):
mail = mailparser.parse_from_file(mail_test_7)
for i in mail.attachments:
fingerprints(i["payload"])
def test_malformed_mail(self):
mail = mailparser.parse_from_file(mail_malformed_3)
defects_categories = mail.defects_categories
self.assertIn("StartBoundaryNotFoundDefect", defects_categories)
self.assertIn("MultipartInvariantViolationDefect", defects_categories)
self.assertIn("reply-to", mail.mail)
self.assertNotIn("reply_to", mail.mail)
reply_to = [(u'VICTORIA Souvenirs', u'[email protected]')]
self.assertEqual(mail.reply_to, reply_to)
self.assertEqual(mail.fake_header, six.text_type())
# This email has header X-MSMail-Priority
msmail_priority = mail.X_MSMail_Priority
self.assertEqual(msmail_priority, "High")
def test_type_error(self):
mail = mailparser.parse_from_file(mail_test_5)
self.assertEqual(len(mail.attachments), 5)
for i in mail.attachments:
self.assertIsInstance(i["filename"], six.text_type)
def test_filename_decode(self):
mail = mailparser.parse_from_file(mail_test_11)
for i in mail.attachments:
self.assertIsInstance(i["filename"], six.text_type)
def test_valid_mail(self):
m = mailparser.parse_from_string("fake mail")
self.assertFalse(m.message)
def test_receiveds(self):
mail = mailparser.parse_from_file(mail_test_1)
self.assertEqual(len(mail.received), 6)
self.assertIsInstance(mail.received, list)
for i in mail.received:
self.assertIsInstance(i, dict)
self.assertIsInstance(mail.received_raw, list)
for i in mail.received_raw:
self.assertIsInstance(i, six.text_type)
self.assertIsInstance(mail.received_json, six.text_type)
def test_parsing_know_values(self):
mail = mailparser.parse_from_file(mail_test_2)
trust = "smtp.customers.net"
self.assertEqual(False, mail.has_defects)
raw = "217.76.210.112"
result = mail.get_server_ipaddress(trust)
self.assertEqual(raw, result)
raw = "<[email protected]>"
result = mail.message_id
self.assertEqual(raw, result)
raw = "[email protected]"
result = mail.to
self.assertEqual(len(result), 2)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], tuple)
self.assertIsInstance(mail.to_json, six.text_type)
self.assertIsInstance(mail.to_raw, six.text_type)
self.assertEqual(raw, result[0][1])
raw = "[email protected]"
result = mail.from_
self.assertEqual(raw, result[0][1])
raw = "Bollettino Meteorologico del 29/11/2015"
result = mail.subject
self.assertEqual(raw, result)
result = mail.has_defects
self.assertEqual(False, result)
result = len(mail.attachments)
self.assertEqual(3, result)
# raw = "Sun, 29 Nov 2015 09:45:18 +0100"
self.assertIsInstance(mail.date_raw, six.text_type)
self.assertIsInstance(mail.date_json, six.text_type)
raw_utc = datetime.datetime(2015, 11, 29, 8, 45, 18, 0).isoformat()
result = mail.date.isoformat()
self.assertEqual(raw_utc, result)
def test_types(self):
mail = mailparser.parse_from_file(mail_test_2)
trust = "smtp.customers.net"
self.assertEqual(False, mail.has_defects)
result = mail.mail
self.assertIsInstance(result, dict)
self.assertNotIn("defects", result)
self.assertIn("has_defects", result)
result = mail.get_server_ipaddress(trust)
self.assertIsInstance(result, six.text_type)
result = mail.mail_json
self.assertIsInstance(result, six.text_type)
result = mail.headers_json
self.assertIsInstance(result, six.text_type)
result = mail.headers
self.assertIsInstance(result, dict)
result = mail.body
self.assertIsInstance(result, six.text_type)
result = mail.date
self.assertIsInstance(result, datetime.datetime)
result = mail.from_
self.assertIsInstance(result, list)
result = mail.to
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], tuple)
self.assertEqual(len(result[0]), 2)
result = mail.subject
self.assertIsInstance(result, six.text_type)
result = mail.message_id
self.assertIsInstance(result, six.text_type)
result = mail.attachments
self.assertIsInstance(result, list)
result = mail.date
self.assertIsInstance(result, datetime.datetime)
result = mail.defects
self.assertIsInstance(result, list)
def test_defects(self):
mail = mailparser.parse_from_file(mail_malformed_1)
self.assertEqual(True, mail.has_defects)
self.assertEqual(1, len(mail.defects))
self.assertEqual(1, len(mail.defects_categories))
self.assertIn("defects", mail.mail)
self.assertIn("StartBoundaryNotFoundDefect",
mail.defects_categories)
self.assertIsInstance(mail.mail_json, six.text_type)
result = len(mail.attachments)
self.assertEqual(1, result)
mail = mailparser.parse_from_file(mail_test_1)
if six.PY2:
self.assertEqual(False, mail.has_defects)
self.assertNotIn("defects", mail.mail)
elif six.PY3:
self.assertEqual(True, mail.has_defects)
self.assertEqual(1, len(mail.defects))
self.assertEqual(1, len(mail.defects_categories))
self.assertIn("defects", mail.mail)
self.assertIn(
"CloseBoundaryNotFoundDefect", mail.defects_categories)
def test_defects_bug(self):
mail = mailparser.parse_from_file(mail_malformed_2)
self.assertEqual(True, mail.has_defects)
self.assertEqual(1, len(mail.defects))
self.assertEqual(1, len(mail.defects_categories))
self.assertIn("defects", mail.mail)
self.assertIn("StartBoundaryNotFoundDefect",
mail.defects_categories)
self.assertIsInstance(mail.parsed_mail_json, six.text_type)
result = len(mail.attachments)
self.assertEqual(0, result)
def test_add_content_type(self):
mail = mailparser.parse_from_file(mail_test_3)
self.assertEqual(False, mail.has_defects)
result = mail.mail
self.assertEqual(len(result["attachments"]), 1)
self.assertIsInstance(
result["attachments"][0]["mail_content_type"], six.text_type)
self.assertFalse(result["attachments"][0]["binary"])
self.assertIsInstance(
result["attachments"][0]["payload"], six.text_type)
self.assertEqual(
result["attachments"][0]["content_transfer_encoding"],
"quoted-printable")
self.assertEqual(
result["attachments"][0]["charset"],
"iso-8859-1")
self.assertEqual(
result["attachments"][0]["content-disposition"], "inline")
mail = mailparser.parse_from_file(mail_malformed_1)
attachments = mail.mail["attachments"]
self.assertEqual(attachments[0]["content-disposition"], "")
def test_from_bytes(self):
if six.PY2:
with self.assertRaises(MailParserEnvironmentError):
mailparser.MailParser.from_bytes(b"")
def test_classmethods(self):
# MailParser.from_file
m = mailparser.MailParser.from_file(mail_test_3)
m.parse()
result = m.mail
self.assertEqual(len(result["attachments"]), 1)
# MailParser.from_string
m = mailparser.MailParser.from_string(m.message_as_string)
m.parse()
result = m.mail
self.assertEqual(len(result["attachments"]), 1)
def test_bug_UnicodeDecodeError(self):
m = mailparser.parse_from_file(mail_test_6)
self.assertIsInstance(m.mail, dict)
self.assertIsInstance(m.mail_json, six.text_type)
def test_parse_from_file_msg(self):
"""
Tested mail from VirusTotal: md5 b89bf096c9e3717f2d218b3307c69bd0
The email used for unittest were found randomly on VirusTotal and
then already publicly available so can not be considered
as privacy violation
"""
m = mailparser.parse_from_file_msg(mail_outlook_1)
email = m.mail
self.assertIn("attachments", email)
self.assertEqual(len(email["attachments"]), 6)
self.assertIn("from", email)
self.assertEqual(email["from"][0][1], "[email protected]")
self.assertIn("subject", email)
def test_msgconvert(self):
"""
Tested mail from VirusTotal: md5 b89bf096c9e3717f2d218b3307c69bd0
The email used for unittest were found randomly on VirusTotal and
then already publicly available so can not be considered
as privacy violation
"""
f, _ = msgconvert(mail_outlook_1)
self.assertTrue(os.path.exists(f))
m = mailparser.parse_from_file(f)
self.assertEqual(m.from_[0][1], "[email protected]")
def test_from_file_obj(self):
with ported_open(mail_test_2) as fp:
mail = mailparser.parse_from_file_obj(fp)
trust = "smtp.customers.net"
self.assertEqual(False, mail.has_defects)
result = mail.mail
self.assertIsInstance(result, dict)
self.assertNotIn("defects", result)
self.assertNotIn("anomalies", result)
self.assertIn("has_defects", result)
result = mail.get_server_ipaddress(trust)
self.assertIsInstance(result, six.text_type)
result = mail.mail_json
self.assertIsInstance(result, six.text_type)
result = mail.headers
self.assertIsInstance(result, dict)
result = mail.headers_json
self.assertIsInstance(result, six.text_type)
result = mail.body
self.assertIsInstance(result, six.text_type)
result = mail.date
self.assertIsInstance(result, datetime.datetime)
result = mail.from_
self.assertIsInstance(result, list)
result = mail.to
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], tuple)
self.assertEqual(len(result[0]), 2)
result = mail.subject
self.assertIsInstance(result, six.text_type)
result = mail.message_id
self.assertIsInstance(result, six.text_type)
result = mail.attachments
self.assertIsInstance(result, list)
result = mail.date
self.assertIsInstance(result, datetime.datetime)
result = mail.defects
self.assertIsInstance(result, list)
result = mail.timezone
self.assertEqual(result, "+1.0")
def test_get_to_domains(self):
m = mailparser.parse_from_file(mail_test_6)
domains_1 = get_to_domains(m.to, m.reply_to)
self.assertIsInstance(domains_1, list)
self.assertIn("test.it", domains_1)
domains_2 = m.to_domains
self.assertIsInstance(domains_2, list)
self.assertIn("test.it", domains_2)
self.assertEqual(domains_1, domains_2)
self.assertIsInstance(m.to_domains_json, six.text_type)
def test_convert_mail_date(self):
s = "Mon, 20 Mar 2017 05:12:54 +0600"
d, t = convert_mail_date(s)
self.assertEqual(t, "+6.0")
self.assertEqual(str(d), "2017-03-19 23:12:54")
s = "Mon, 20 Mar 2017 05:12:54 -0600"
d, t = convert_mail_date(s)
self.assertEqual(t, "-6.0")
s = "Mon, 11 Dec 2017 15:27:44 +0530"
d, t = convert_mail_date(s)
self.assertEqual(t, "+5.5")
def test_ported_string(self):
raw_data = ""
s = ported_string(raw_data)
self.assertEqual(s, six.text_type())
raw_data = u"test"
s = ported_string(raw_data)
self.assertEqual(s, "test")
def test_standard_outlook(self):
""" Verify a basic outlook received header works. """
received = """
from DM3NAM03FT035
by CY4PR0601CA0051.outlook.office365.com
with Microsoft SMTP Server version=TLS1_2, cipher=TLS
id 15.20.1185.23
via Frontend Transport; Mon, 1 Oct 2018 09:49:21 +0000
""".strip()
expected = {
'from': 'DM3NAM03FT035',
'by': 'CY4PR0601CA0051.outlook.office365.com',
'with': 'Microsoft SMTP Server version=TLS1_2, cipher=TLS',
'id': '15.20.1185.23',
'via': 'Frontend Transport',
'date': 'Mon, 1 Oct 2018 09:49:21 +0000'
}
values_by_clause = parse_received(received)
self.assertEqual(expected, values_by_clause)
def test_standard_google__with_cipher(self):
""" Verify that we don't match 'with cipher' a la google. """
received = """
from mail_yw1_f65.google.com
by subdomain.domain.com Postfix with ESMTPS
id abc123 for <[email protected]>;
Tue, 25 Sep 2018 13:09:36 +0000 (UTC)"""
expected = {
'from': 'mail_yw1_f65.google.com',
'by': 'subdomain.domain.com Postfix',
'with': 'ESMTPS',
'id': 'abc123',
'for': '<[email protected]>',
'date': 'Tue, 25 Sep 2018 13:09:36 +0000 (UTC)'
}
values_by_clause = parse_received(received)
self.assertEqual(expected, values_by_clause)
@unittest.skipIf(sys.version_info[0] < 3, "Must be using Python 3")
def test_parse_from_bytes(self):
with open(mail_test_2, "rb") as f:
mail_bytes = f.read()
mail = mailparser.parse_from_bytes(mail_bytes)
trust = "smtp.customers.net"
self.assertEqual(False, mail.has_defects)
raw = "217.76.210.112"
result = mail.get_server_ipaddress(trust)
self.assertEqual(raw, result)
raw = "<[email protected]>"
result = mail.message_id
self.assertEqual(raw, result)
raw = "[email protected]"
result = mail.to
self.assertEqual(len(result), 2)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], tuple)
self.assertIsInstance(mail.to_json, six.text_type)
self.assertIsInstance(mail.to_raw, six.text_type)
self.assertEqual(raw, result[0][1])
raw = "[email protected]"
result = mail.from_
self.assertEqual(raw, result[0][1])
raw = "Bollettino Meteorologico del 29/11/2015"
result = mail.subject
self.assertEqual(raw, result)
result = mail.has_defects
self.assertEqual(False, result)
result = len(mail.attachments)
self.assertEqual(3, result)
# raw = "Sun, 29 Nov 2015 09:45:18 +0100"
self.assertIsInstance(mail.date_raw, six.text_type)
self.assertIsInstance(mail.date_json, six.text_type)
raw_utc = datetime.datetime(2015, 11, 29, 8, 45, 18, 0).isoformat()
result = mail.date.isoformat()
self.assertEqual(raw_utc, result)
def test_write_uuencode_attachment(self):
mail = mailparser.parse_from_file(mail_test_15)
temp_dir = tempfile.mkdtemp()
mail.write_attachments(temp_dir)
md5 = hashlib.md5()
with open(os.path.join(temp_dir, 'REQUEST FOR QUOTE.zip'), 'rb') as f:
md5.update(f.read())
shutil.rmtree(temp_dir)
self.assertEqual(md5.hexdigest(), '4f2cf891e7cfb349fca812091f184ecc')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
"""
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012
# License: BSD 3 clause
from tempfile import mkdtemp
import warnings
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rnd = np.random.RandomState(42)
X = rnd.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# Deprecation of Ward class
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", DeprecationWarning)
Ward().fit(X)
assert_equal(len(warning_list), 1)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
"""
Check that we obtain the correct solution for structured linkage trees.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
"""
Check that we obtain the correct solution for unstructured linkage trees.
"""
rnd = np.random.RandomState(0)
X = rnd.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("ignore", DeprecationWarning)
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", UserWarning)
warnings.simplefilter("ignore", DeprecationWarning)
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
children, n_nodes, n_leaves, parent = tree_builder(
this_X.T, n_clusters=10)
assert_equal(len(warning_list), 1)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
"""
Check that the height of the results of linkage tree is sorted.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
"""
Check that we obtain the correct number of clusters with
agglomerative clustering.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rnd.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=mkdtemp(),
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
np.testing.assert_array_equal(clustering.labels_, labels)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(
clustering2.labels_,
clustering.labels_), 1)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
assert_warns(DeprecationWarning, WardAgglomeration)
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", DeprecationWarning)
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
assert_equal(len(warning_list), 1)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_array_equal(agglo.labels_, ward.labels_)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
"""
n, p, k = 10, 5, 3
rnd = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rnd.normal(size=(n, p))
X -= 4 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_popagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
from sklearn.neighbors import kneighbors_graph
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
connectivity = kneighbors_graph(X, 10)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from operator import attrgetter
import yaml
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse # noqa
from django.utils.translation import ugettext_lazy as _
import django.views.generic
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.stacks \
import api as project_api
from openstack_dashboard.dashboards.project.stacks \
import forms as project_forms
from openstack_dashboard.dashboards.project.stacks \
import tables as project_tables
from openstack_dashboard.dashboards.project.stacks \
import tabs as project_tabs
LOG = logging.getLogger(__name__)
class IndexView(tables.DataTableView):
table_class = project_tables.StacksTable
template_name = 'project/stacks/index.html'
def __init__(self, *args, **kwargs):
super(IndexView, self).__init__(*args, **kwargs)
self._more = None
def has_prev_data(self, table):
return self._prev
def has_more_data(self, table):
return self._more
def get_data(self):
stacks = []
prev_marker = self.request.GET.get(
project_tables.StacksTable._meta.prev_pagination_param)
if prev_marker is not None:
sort_dir = 'asc'
marker = prev_marker
else:
sort_dir = 'desc'
marker = self.request.GET.get(
project_tables.StacksTable._meta.pagination_param)
try:
stacks, self._more, self._prev = api.heat.stacks_list(
self.request,
marker=marker,
paginate=True,
sort_dir=sort_dir)
if prev_marker is not None:
stacks = sorted(stacks, key=attrgetter('creation_time'),
reverse=True)
except Exception:
self._prev = False
self._more = False
msg = _('Unable to retrieve stack list.')
exceptions.handle(self.request, msg)
return stacks
class SelectTemplateView(forms.ModalFormView):
form_class = project_forms.TemplateForm
template_name = 'project/stacks/select_template.html'
success_url = reverse_lazy('horizon:project:stacks:launch')
def get_form_kwargs(self):
kwargs = super(SelectTemplateView, self).get_form_kwargs()
kwargs['next_view'] = CreateStackView
return kwargs
class ChangeTemplateView(forms.ModalFormView):
form_class = project_forms.ChangeTemplateForm
template_name = 'project/stacks/change_template.html'
success_url = reverse_lazy('horizon:project:stacks:edit_stack')
def get_context_data(self, **kwargs):
context = super(ChangeTemplateView, self).get_context_data(**kwargs)
context['stack'] = self.get_object()
return context
@memoized.memoized_method
def get_object(self):
stack_id = self.kwargs['stack_id']
try:
self._stack = api.heat.stack_get(self.request, stack_id)
except Exception:
msg = _("Unable to retrieve stack.")
redirect = reverse('horizon:project:stacks:index')
exceptions.handle(self.request, msg, redirect=redirect)
return self._stack
def get_initial(self):
stack = self.get_object()
return {'stack_id': stack.id,
'stack_name': stack.stack_name
}
def get_form_kwargs(self):
kwargs = super(ChangeTemplateView, self).get_form_kwargs()
kwargs['next_view'] = EditStackView
return kwargs
class CreateStackView(forms.ModalFormView):
form_class = project_forms.CreateStackForm
template_name = 'project/stacks/create.html'
success_url = reverse_lazy('horizon:project:stacks:index')
def get_initial(self):
initial = {}
self.load_kwargs(initial)
if 'parameters' in self.kwargs:
initial['parameters'] = json.dumps(self.kwargs['parameters'])
return initial
def load_kwargs(self, initial):
# load the "passed through" data from template form
for prefix in ('template', 'environment'):
for suffix in ('_data', '_url'):
key = prefix + suffix
if key in self.kwargs:
initial[key] = self.kwargs[key]
def get_form_kwargs(self):
kwargs = super(CreateStackView, self).get_form_kwargs()
if 'parameters' in self.kwargs:
kwargs['parameters'] = self.kwargs['parameters']
else:
data = json.loads(self.request.POST['parameters'])
kwargs['parameters'] = data
return kwargs
# edit stack parameters, coming from template selector
class EditStackView(CreateStackView):
form_class = project_forms.EditStackForm
template_name = 'project/stacks/update.html'
success_url = reverse_lazy('horizon:project:stacks:index')
def get_initial(self):
initial = super(EditStackView, self).get_initial()
initial['stack'] = self.get_object()['stack']
if initial['stack']:
initial['stack_id'] = initial['stack'].id
initial['stack_name'] = initial['stack'].stack_name
return initial
def get_context_data(self, **kwargs):
context = super(EditStackView, self).get_context_data(**kwargs)
context['stack'] = self.get_object()['stack']
return context
@memoized.memoized_method
def get_object(self):
stack_id = self.kwargs['stack_id']
try:
stack = {}
stack['stack'] = api.heat.stack_get(self.request, stack_id)
stack['template'] = api.heat.template_get(self.request, stack_id)
self._stack = stack
except Exception:
msg = _("Unable to retrieve stack.")
redirect = reverse('horizon:project:stacks:index')
exceptions.handle(self.request, msg, redirect=redirect)
return self._stack
class DetailView(tabs.TabView):
tab_group_class = project_tabs.StackDetailTabs
template_name = 'project/stacks/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
stack = self.get_data(self.request, **kwargs)
table = project_tables.StacksTable(self.request)
context["stack"] = stack
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(stack)
context["page_title"] = _("Stack Details: "
"%(stack_name)s") % {'stack_name':
stack.stack_name}
return context
@memoized.memoized_method
def get_data(self, request, **kwargs):
stack_id = kwargs['stack_id']
try:
stack = api.heat.stack_get(request, stack_id)
request.session['stack_id'] = stack.id
request.session['stack_name'] = stack.stack_name
return stack
except Exception:
msg = _("Unable to retrieve stack.")
exceptions.handle(request, msg, redirect=self.get_redirect_url())
@memoized.memoized_method
def get_template(self, request, **kwargs):
try:
stack_template = api.heat.template_get(
request,
kwargs['stack_id'])
return yaml.safe_dump(stack_template, indent=2)
except Exception:
msg = _("Unable to retrieve stack template.")
exceptions.handle(request, msg, redirect=self.get_redirect_url())
def get_tabs(self, request, **kwargs):
stack = self.get_data(request, **kwargs)
stack_template = self.get_template(request, **kwargs)
return self.tab_group_class(
request, stack=stack, stack_template=stack_template, **kwargs)
@staticmethod
def get_redirect_url():
return reverse('horizon:project:stacks:index')
class ResourceView(tabs.TabView):
tab_group_class = project_tabs.ResourceDetailTabs
template_name = 'project/stacks/resource.html'
def get_context_data(self, **kwargs):
context = super(ResourceView, self).get_context_data(**kwargs)
resource = self.get_data(self.request, **kwargs)
context["resource"] = resource
context["metadata"] = self.get_metadata(self.request, **kwargs)
context["page_title"] = _("Resource Details: %s") % resource
return context
@memoized.memoized_method
def get_data(self, request, **kwargs):
try:
resource = api.heat.resource_get(
request,
kwargs['stack_id'],
kwargs['resource_name'])
return resource
except Exception:
msg = _("Unable to retrieve resource.")
redirect = reverse('horizon:project:stacks:index')
exceptions.handle(request, msg, redirect=redirect)
@memoized.memoized_method
def get_metadata(self, request, **kwargs):
try:
metadata = api.heat.resource_metadata_get(
request,
kwargs['stack_id'],
kwargs['resource_name'])
return json.dumps(metadata, indent=2)
except Exception:
msg = _("Unable to retrieve metadata.")
redirect = reverse('horizon:project:stacks:index')
exceptions.handle(request, msg, redirect=redirect)
def get_tabs(self, request, **kwargs):
resource = self.get_data(request, **kwargs)
metadata = self.get_metadata(request, **kwargs)
return self.tab_group_class(
request, resource=resource, metadata=metadata, **kwargs)
class JSONView(django.views.generic.View):
def get(self, request, stack_id=''):
return HttpResponse(project_api.d3_data(request, stack_id=stack_id),
content_type="application/json")
|
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Batch normalization layers."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import summary_utils
from tensorflow.python.ops import nn # pylint:disable=g-direct-tensorflow-import
from tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import
_BN_FLOPS_PER_ELEMENT = 10
# TODO(rpang): move AddingAccumulator to a separate library.
class AddingAccumulator(base_layer.Accumulator):
"""Accumulator for the sufficient statistics."""
def __init__(self, shape, dtype):
super().__init__()
self.dtype = dtype
self.shape = shape
def DefaultValue(self):
"""Returns the default value of the accumulator."""
return tf.zeros(self.shape, dtype=self.dtype)
def Update(self, value):
"""Adds value to the accumulator."""
self.SetValue(self.GetValue() + tf.cast(value, self.dtype))
def ComputeMoments(inputs,
padding,
reduce_over_dims,
cumulative_axis=None,
enable_cross_replica_sum_on_tpu=False,
keepdims=False):
"""Computes mean and variance over the valid data points in inputs."""
mask = 1.0 - padding
inputs = py_utils.with_dependencies([
py_utils.assert_equal(tf.rank(inputs), tf.rank(mask)),
py_utils.assert_greater_equal(mask, tf.zeros_like(mask)),
], inputs)
sum_v = tf.reduce_sum(
inputs * tf.cast(mask, inputs.dtype), reduce_over_dims, keepdims=keepdims)
count_v = tf.reduce_sum(mask, reduce_over_dims, keepdims=keepdims)
if cumulative_axis is not None:
sum_v = tf.math.cumsum(sum_v, axis=cumulative_axis)
count_v = tf.math.cumsum(count_v, axis=cumulative_axis)
# Input shape is guaranteed to be a multiple of mask shape because the
# inputs * mask op above was successfully broadcasted.
input_size_on_reduced_dims = tf.reduce_prod(
tf.gather(tf.shape(inputs), reduce_over_dims))
mask_size_on_reduced_dims = tf.reduce_prod(
tf.gather(tf.shape(mask), reduce_over_dims))
mask_multiplier = tf.math.truediv(input_size_on_reduced_dims,
mask_size_on_reduced_dims)
count_v *= tf.cast(mask_multiplier, count_v.dtype)
if py_utils.use_tpu() and enable_cross_replica_sum_on_tpu:
sum_v = tf.tpu.cross_replica_sum(sum_v)
count_v = tf.tpu.cross_replica_sum(count_v)
count_v = tf.maximum(count_v, 1.0)
mean = sum_v / count_v
sum_vv = tf.reduce_sum(
(inputs - mean) * (inputs - mean) * mask,
reduce_over_dims,
keepdims=keepdims)
if cumulative_axis is not None:
sum_vv = tf.math.cumsum(sum_vv, axis=cumulative_axis)
if py_utils.use_tpu() and enable_cross_replica_sum_on_tpu:
sum_vv = tf.tpu.cross_replica_sum(sum_vv)
variance = py_utils.with_dependencies([
py_utils.assert_greater_equal(sum_vv, tf.zeros_like(sum_vv)),
], sum_vv / count_v)
return mean, variance
class BatchNormLayer(base_layer.BaseLayer):
"""Batch normalization layer."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.999,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define(
'enable_cross_replica_sum_on_tpu', True,
'If true, computes global mean and variance across all replicas.'
'Only effective for tpu.')
p.Define(
'use_moving_avg_in_training', False,
'If True, use global moving avg (mean, variance) during training'
' to avoid mismatch between train and eval, which then'
' essentially acts as an adaptive normalization step.')
p.Define(
'freeze_bn_stats', False,
'If True, uses moving avg (mean, variance) during both training and '
'inference. It behaves like force_eval but the gamma/beta are still '
'trained when do_eval is False. The moving mean/var can be set by '
'loading pretrained checkpoints. A use case is training detectors '
'based on an pretrained checkpoint while BN stats are frozen.')
p.Define(
'gamma_zero_init', False,
'If True, initialize gamma to zeros according to the technique '
'introduced in the tech report: https://arxiv.org/abs/1706.02677')
p.Define('gamma_one_init', False,
'If True, explicitly initialize gamma to one.')
# TODO(rpang): remove this hparam, as it is replaced
# by p.train.ema_decay_moving_vars.
p.Define(
'add_stats_to_moving_average_variables', None,
'If True, adds (mean, variance) to the MOVING_AVERAGE_VARIABLES '
'collection to be compatible with ema_decay. '
'Recommendation: set to True for new models, and to False to maintain '
'checkpoint compatibility.')
p.Define('set_padded_output_to_zero', True,
'If True, sets the padded outputs to zero.')
p.Define(
'use_fused_batch_norm_for_eval', False,
'If True, uses tf.compat.v1.nn.fused_batch_norm instead of '
'tf.nn.batch_normalization during eval. The fused version may be more '
'efficient but it has more restrictions on the expected input shapes.'
'The input tensor has to be rank 4, where the first dimension '
'corresponds to the batch, and the last dimension corresponds to the '
'features to normalize over. This usually corresponds to NHWC with '
'image inputs. Note that fused_batch_norm wants to track its own '
'mean and variance during training, so we are unable to use it '
'for training since we want to have a custom mean and variance to '
'support padding.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self._epsilon = 0.001
self._decay = p.decay
def _GetWeightShape(self):
return [self.params.dim]
def _CreateLayerVariables(self):
p = self.params
beta_pc = py_utils.WeightParams(
shape=self._GetWeightShape(),
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
gamma_pc = py_utils.WeightParams(
shape=self._GetWeightShape(),
init=py_utils.WeightInit.Constant(1.0)
if p.gamma_one_init else py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[self.__class__.__name__ + '_vars'])
if not p.use_moving_avg_in_training:
self.CreateVariable('beta', beta_pc)
self.CreateVariable('gamma', gamma_pc)
# Two statistics.
moving_collections = ['moving_vars', self.__class__.__name__ + '_vars']
if p.add_stats_to_moving_average_variables:
moving_collections += [tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
elif p.add_stats_to_moving_average_variables is None:
# TODO(rpang): force all models to set this param explicitly.
tf.logging.warning(
'BatchNormLayer.add_stats_to_moving_average_variables should be '
'set to True for new models, and to False explicitly for '
'checkpoint compatibility.')
# Add to the MOVING_AVERAGE_VARIABLES collection so that they are returned
# by tf.moving_average_variables() and included in EMA variables if
# ema_decay is enabled.
mva = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=moving_collections)
self.CreateVariable(
'moving_mean',
mva,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
mvv = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(1.0),
dtype=p.dtype,
collections=moving_collections)
self.CreateVariable(
'moving_variance',
mvv,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
@property
def epsilon(self):
return self._epsilon
def _GetDefaultPaddings(self, inputs):
"""Gets the default paddings for an input."""
return tf.zeros(
tf.concat([tf.shape(inputs)[:-1], [1]], 0), dtype=inputs.dtype)
def _GetBetaGamma(self, theta, inputs, **kwargs):
del inputs
del kwargs
p = self.params
if p.use_moving_avg_in_training:
beta = 0.0
gamma = 1.0
else:
beta = theta.beta
gamma = theta.gamma
if not p.gamma_zero_init and not p.gamma_one_init:
# Note, The real gamma to use is 1 + gamma.
gamma = 1.0 + gamma
return beta, gamma
def GetCurrentMoments(self, theta):
"""Gets the current computed moments, which should be applied at eval.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if p.use_moving_avg_in_training:
beta = 0.0
gamma = 1.0
else:
beta = theta.beta
gamma = theta.gamma
if not p.gamma_zero_init and not p.gamma_one_init:
# Note, The real gamma to use is 1 + gamma.
gamma = 1.0 + gamma
return self.vars.moving_mean, self.vars.moving_variance, beta, gamma
def ComputeAndUpdateMoments(self, theta, inputs, paddings=None, **kwargs):
"""Computes moments and updates state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., dim].
paddings: The paddings tensor. Shaped [..., 1], with the same rank as the
input tensor.
**kwargs: Additional inputs.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if paddings is None:
paddings = self._GetDefaultPaddings(inputs)
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match([tf.shape(paddings)[-1]], [1]),
], inputs)
with tf.name_scope(p.name):
if self.do_eval or p.freeze_bn_stats:
# The mean and variance used for normalization.
norm_mean, norm_variance = (self.vars.moving_mean,
self.vars.moving_variance)
norm_mean, norm_variance = self._CastToFPropDtype(
(norm_mean, norm_variance))
else:
rank = tf.rank(paddings)
reduce_over_dims = tf.range(0, rank - 1)
mean, variance = ComputeMoments(inputs, paddings, reduce_over_dims,
None, p.enable_cross_replica_sum_on_tpu)
py_utils.UpdateBatchNormVars(self.vars.moving_mean, mean, self._decay)
py_utils.UpdateBatchNormVars(self.vars.moving_variance, variance,
self._decay)
# Add some summaries for visualization.
summary_utils.histogram('%s_mean' % p.name, tf.cast(mean, tf.float32))
summary_utils.histogram('%s_variance' % p.name,
tf.cast(variance, tf.float32))
summary_utils.histogram('%s_moving_mean' % p.name,
tf.cast(self.vars.moving_mean, tf.float32))
summary_utils.histogram('%s_moving_variance' % p.name,
tf.cast(self.vars.moving_variance, tf.float32))
summary_utils.histogram(
'%s_mean_diff' % p.name,
tf.cast(
tf.cast(mean, self.vars.moving_mean.dtype.base_dtype) -
self.vars.moving_mean, tf.float32))
summary_utils.histogram(
'%s_variance_diff' % p.name,
tf.cast(
tf.cast(variance, self.vars.moving_variance.dtype.base_dtype) -
self.vars.moving_variance, tf.float32))
if p.use_moving_avg_in_training:
# Use the global statistics for normalization.
# Control dependencies on mean and variance make sure
# moving_mean and variance will be updated for every training step.
norm_mean = py_utils.with_dependencies([mean], self.vars.moving_mean)
norm_variance = py_utils.with_dependencies([variance],
self.vars.moving_variance)
else:
# Use the batch statistics for normalization.
norm_mean = mean
norm_variance = variance
norm_mean = py_utils.CheckNumerics(
norm_mean, 'mean of %s failed numeric check' % p.name)
norm_variance = py_utils.CheckNumerics(
norm_variance, 'variance of %s failed numeric check' % p.name)
beta, gamma = self._GetBetaGamma(theta, inputs, **kwargs)
return norm_mean, norm_variance, beta, gamma
def _ComputeBN(self, inputs, paddings, gamma, beta, norm_mean, norm_variance):
p = self.params
with tf.control_dependencies([
py_utils.assert_greater_equal(norm_variance,
tf.zeros_like(norm_variance)),
py_utils.assert_shape_match([tf.shape(inputs)[-1]],
tf.shape(norm_mean)),
py_utils.assert_shape_match([tf.shape(inputs)[-1]],
tf.shape(norm_variance)),
]):
if p.use_fused_batch_norm_for_eval and (self.do_eval or
p.freeze_bn_stats):
bn_output, _, _ = nn.fused_batch_norm(
inputs,
gamma,
beta,
norm_mean,
norm_variance,
self._epsilon,
is_training=False)
else:
bn_output = tf.nn.batch_normalization(inputs, norm_mean, norm_variance,
beta, gamma, self._epsilon)
if p.set_padded_output_to_zero:
bn_output = py_utils.ApplyPadding(paddings, bn_output)
return bn_output
def _MaybeExpandPaddings(self, inputs, paddings):
# rank difference is at most one.
rank_diff = tf.rank(inputs) - tf.rank(paddings)
paddings = py_utils.with_dependencies([
py_utils.assert_less_equal(rank_diff, 1),
py_utils.assert_greater_equal(rank_diff, 0)
], paddings)
# Pads [1] to the end of paddings.
paddings = tf.reshape(
paddings,
tf.concat(
[tf.shape(paddings), tf.tile([1], [rank_diff])], axis=0))
return paddings
def FProp(self, theta, inputs, paddings=None):
"""Apply batch normalization.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., dim].
paddings: The paddings tensor. Shaped [..., 1] or [...], the rank is
either the same as inputs or tf.rank(inputs) - 1.
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
inputs, paddings = self._CastToFPropDtype((inputs, paddings))
if py_utils.testonly_skip_norm_layers():
return inputs
p = self.params
if paddings is None:
paddings = self._GetDefaultPaddings(inputs)
# shape [..., 1]
paddings = self._MaybeExpandPaddings(inputs, paddings)
with tf.name_scope(p.name):
norm_mean, norm_variance, beta, gamma = self.ComputeAndUpdateMoments(
theta, inputs, paddings)
return self._ComputeBN(inputs, paddings, gamma, beta, norm_mean,
norm_variance)
@classmethod
def FPropMeta(cls, p, inputs, padding=None):
py_utils.CheckShapes((inputs,))
return py_utils.NestedMap(
flops=inputs.num_elements() * _BN_FLOPS_PER_ELEMENT,
out_shapes=(inputs,))
class CategoricalBN(BatchNormLayer):
"""Implements a categorical BN which is akin to ...
https://arxiv.org/pdf/1809.11096.pdf
Specifically, the moving stats are category-agnostic, while {beta, gamma} are
category-aware.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('class_emb_dim', None, 'Dim of input class embedding.')
p.use_moving_avg_in_training = False
p.use_fused_batch_norm_for_eval = False
p.add_stats_to_moving_average_variables = True
return p
def __init__(self, params):
assert params.name
assert not params.use_moving_avg_in_training
assert not params.use_fused_batch_norm_for_eval
assert params.add_stats_to_moving_average_variables
super().__init__(params)
def _GetWeightShape(self):
return [self.params.class_emb_dim, self.params.dim]
def _GetBetaGamma(self, theta, inputs, **kwargs):
p = self.params
assert 'class_emb' in kwargs
class_emb = kwargs['class_emb']
# class_emb is a one-hot vector of shape [batch, class_emb_dim=num_classes].
class_ids = tf.math.argmax(class_emb, axis=-1, output_type=tf.int32)
# [batch, dim]
# Not using matmul/einsum to avoid potential precision problem on TPU with
# sparse inputs.
beta = tf.gather(theta.beta, class_ids)
gamma = tf.gather(theta.gamma, class_ids)
if not p.gamma_zero_init and not p.gamma_one_init:
# Note, The real gamma to use is 1 + gamma.
gamma = 1.0 + gamma
# Extend to [batch, 1, ... 1, dim]
batch = py_utils.GetShape(inputs)[0]
to_shape = tf.concat(
[[batch],
tf.ones([py_utils.GetRank(inputs) - 2], tf.int32), [self.params.dim]],
axis=0)
beta = tf.reshape(beta, to_shape)
gamma = tf.reshape(gamma, to_shape)
return beta, gamma
def FProp(self, theta, inputs, paddings, class_emb):
"""Apply batch normalization.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [batch, ..., dim].
paddings: The paddings tensor. Shaped [batch, ..., 1], with the same rank
as the input tensor.
class_emb: The conditioning inputs, Shaped [batch, emb_dim].
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
if py_utils.testonly_skip_norm_layers():
return inputs
p = self.params
batch = py_utils.GetShape(inputs)[0]
class_emb = py_utils.HasShape(class_emb, [batch, p.class_emb_dim])
if not py_utils.use_tpu():
class_emb = py_utils.with_dependencies([
py_utils.assert_less_equal(
tf.cast(class_emb, tf.int32), 1, name='one_hot_assert1'),
py_utils.assert_greater_equal(
tf.cast(class_emb, tf.int32), 0, name='one_hot_assert2'),
py_utils.assert_equal(
tf.ones([batch], tf.int32),
tf.cast(tf.reduce_sum(class_emb, -1), tf.int32),
name='one_hot_assert3'),
], class_emb)
with tf.name_scope(p.name):
norm_mean, norm_variance, beta, gamma = self.ComputeAndUpdateMoments(
theta, inputs, paddings=paddings, class_emb=class_emb)
return self._ComputeBN(inputs, paddings, gamma, beta, norm_mean,
norm_variance)
class BatchNormLayerNoPadding(base_layer.BaseLayer):
"""Batchnorm layer without padding."""
@classmethod
def Params(cls):
"""Parameters for BatchNormLayerNoPadding."""
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.997,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define('epsilon', 0.001,
'Small float added to variance to avoid dividing by zero.')
p.Define(
'bn_group_size', 1,
'The number of shards participating in normalization when distributed'
' batchnorm is used. Only used for TPU.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name, 'Name of BatchNormLayerNoPadding is not set.'
p.fprop_dtype = None
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
# Skip L-P regularization for these variables.
collections = [
self.__class__.__name__ + '_vars', py_utils.SKIP_LP_REGULARIZATION
]
pc = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=collections)
self.CreateVariable('beta', pc)
self.CreateVariable('gamma', pc)
moving_collections = [
'moving_vars', tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
self.__class__.__name__ + '_vars'
]
mva = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=moving_collections)
# Two statistics computed from sufficient stats.
self.CreateVariable('moving_mean', mva, trainable=False)
mvv = py_utils.WeightParams(
shape=[p.dim],
init=py_utils.WeightInit.Constant(1.0),
dtype=p.dtype,
collections=moving_collections)
self.CreateVariable('moving_variance', mvv, trainable=False)
# Accumulate bn sufficient stats over micro-batches.
dim = self.vars.beta.shape[0]
self.RegisterAccumulator('counts', AddingAccumulator([], p.dtype))
self.RegisterAccumulator('mean_ss', AddingAccumulator([dim], p.dtype))
self.RegisterAccumulator('variance_ss', AddingAccumulator([dim], p.dtype))
def PostTrainingStepUpdate(self):
"""Updates moving_mean, moving_variance after each training step."""
p = self.params
# Get sufficient stats that accumulates over microbatches.
counts = self.accumulators.counts.GetValue()
mean_ss = self.accumulators.mean_ss.GetValue()
variance_ss = self.accumulators.variance_ss.GetValue()
# Compute batch mean and batch variance from sufficient stats
mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)
decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)
# Update moving_mean, moving_variance from batch mean and batch variance.
with tf.name_scope(p.name) as scope:
with tf.ops.colocate_with(self.vars.moving_mean):
mean_update = tf.assign_sub(
self.vars.moving_mean,
tf.where(
tf.greater(counts, 0.5),
(self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,
tf.zeros_like(self.vars.moving_mean)),
name='moving_mean_update')
with tf.ops.colocate_with(self.vars.moving_variance):
var_update = tf.assign_sub(
self.vars.moving_variance,
tf.where(
tf.greater(counts, 0.5),
(self.vars.moving_variance - tf.cast(variance, p.dtype)) *
decay, tf.zeros_like(self.vars.moving_variance)),
name='moving_variance_update')
py_utils.CheckNumerics(
self.vars.moving_mean,
'moving mean of {} failed numeric check'.format(scope))
py_utils.CheckNumerics(
self.vars.moving_variance,
'moving variance of {} failed numeric check'.format(scope))
self.accumulators.counts.Reset()
self.accumulators.mean_ss.Reset()
self.accumulators.variance_ss.Reset()
return tf.group(mean_update, var_update)
def _Moments(self, inputs, group_size):
"""Computes mean and variance over N,H,W dimensions in inputs."""
counts, mean_ss, variance_ss, _, = tf.nn.sufficient_statistics(
inputs, axes=[0, 1, 2], keepdims=False)
self.accumulators.counts.Update(counts)
self.accumulators.mean_ss.Update(mean_ss)
self.accumulators.variance_ss.Update(variance_ss)
# Distributed batch norm that computes sufficient statistics from group_size
# replicas. This is useful when batch_size_per_replica is too small to
# compute reliable sufficient statistics.
if py_utils.use_tpu() and group_size > 1:
group_assignment = None
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is not None:
if num_shards < group_size:
raise ValueError('TPU shards={} less than bn_gropu_size={}.'.format(
num_shards, group_size))
if num_shards % group_size:
raise ValueError(
'TPU shards={} not divisible by bn_group_size={}.'.format(
num_shards, group_size))
num_groups = num_shards // group_size
group_assignment = []
for g in range(num_groups):
replica_ids = [g * group_size + i for i in range(group_size)]
group_assignment.append(replica_ids)
counts *= group_size
mean_ss = tf.tpu.cross_replica_sum(mean_ss, group_assignment)
variance_ss = tf.tpu.cross_replica_sum(variance_ss, group_assignment)
# At each micro-step, batch_mean and batch_variance are computed
# to normalize inputs. But they are not used to update moving_mean and
# moving_variance variables until the last micro batch.
mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)
return mean, variance
def FProp(self, theta, inputs):
"""Applies batch normalization.
Using the implementation in github.com/
tensorflow/tpu/blob/master/models/official/amoeba_net/network_utils.py#L550
Args:
theta: A nested map object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. Shaped [..., dim].
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
if py_utils.testonly_skip_norm_layers():
return inputs
p = self.params
inputs_dtype = inputs.dtype
inputs = tf.cast(inputs, p.dtype)
inputs = py_utils.with_dependencies([
py_utils.assert_shape_match([tf.shape(inputs)[-1]], tf.shape(
theta.beta))
], inputs)
with tf.name_scope(p.name) as scope:
# Note, The real gamma to use is 1 + gamma.
gamma = 1.0 + theta.gamma
if self.do_eval:
outputs = tf.nn.batch_normalization(inputs, theta.moving_mean,
theta.moving_variance, theta.beta,
gamma, p.epsilon)
else:
mean, variance = self._Moments(inputs, p.bn_group_size)
mean = py_utils.CheckNumerics(
mean, 'mean of {} failed numeric check'.format(scope))
variance = py_utils.CheckNumerics(
variance, 'variance of {} failed numeric check'.format(scope))
outputs = tf.nn.batch_normalization(inputs, mean, variance, theta.beta,
gamma, p.epsilon)
outputs.set_shape(inputs.get_shape())
return tf.cast(outputs, inputs_dtype)
@classmethod
def FPropMeta(cls, p, inputs):
"""Returns metadata about the `FProp` computation for this layer."""
py_utils.CheckShapes((inputs,))
return py_utils.NestedMap(
flops=inputs.num_elements() * _BN_FLOPS_PER_ELEMENT,
out_shapes=(inputs,))
class GroupNormLayer(base_layer.BaseLayer):
"""Group normalization layer(https://arxiv.org/abs/1803.08494)."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define('num_groups', 32, 'Number of groups for GroupNorm.')
p.Define('min_group_size', 1, 'Minimum group size for GroupNorm')
p.Define('cumulative', False, 'If true, only normalize by current and '
'previous time steps.')
p.Define(
'enable_cross_replica_sum_on_tpu', False,
'If true, computes global mean and variance across all replicas.'
'Only effective for tpu.')
p.Define('input_rank', 4, 'Rank of input. Only 3(BTD) and 4(NHWC) are '
'supported.')
p.Define('epsilon', 0.001, 'Epsilon.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
assert p.name
assert p.num_groups > 0
assert p.min_group_size > 0
assert p.dim % p.min_group_size == 0
if p.dim >= p.num_groups:
assert p.dim % p.num_groups == 0, ('p.dim({0}) is not dividable by '
'p.num_groups({1})').format(
p.dim, p.num_groups)
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
p = self.params
assert p.input_rank == 3 or p.input_rank == 4
collections = [
self.__class__.__name__ + '_vars', py_utils.SKIP_LP_REGULARIZATION
]
shape = [1, 1, 1, p.dim] if p.input_rank == 4 else [1, 1, p.dim]
pc = py_utils.WeightParams(
shape=shape,
init=py_utils.WeightInit.Constant(0.0),
dtype=p.dtype,
collections=collections)
self.CreateVariable('beta', pc)
self.CreateVariable('gamma', pc)
@property
def group_size(self):
p = self.params
assert p.min_group_size <= p.dim
return max(p.dim // p.num_groups, p.min_group_size)
@property
def num_groups(self):
p = self.params
return p.dim // self.group_size
def zero_state(self, batch_size):
p = self.params
num_groups = self.num_groups
if not p.cumulative:
return py_utils.NestedMap()
# Note: Prefer storing data in <=4D tensors, as TFLite doesn't support
# implicit broadcasting for 5D (or larger) tensors on many operators.
cached_count_shape = [batch_size, 1]
cached_moment_shape = [batch_size, num_groups]
cached_sum = tf.zeros(cached_moment_shape, py_utils.FPropDtype(p))
cached_count = tf.zeros(cached_count_shape, py_utils.FPropDtype(p))
cached_var = tf.zeros(cached_moment_shape, py_utils.FPropDtype(p))
return py_utils.NestedMap(
cached_sum=cached_sum, cached_count=cached_count, cached_var=cached_var)
def _Normalize(self, theta, grouped_inputs, group_mean, group_variance):
p = self.params
group_mean = py_utils.CheckNumerics(
group_mean, f'mean of {p.name} failed numeric check.')
group_variance = py_utils.CheckNumerics(
group_variance, f'variance of {p.name} failed numeric check.')
input_shape = py_utils.GetShape(grouped_inputs)
moment_shape = list(input_shape)
if p.input_rank == 4:
moment_shape[2] = 1
moment_shape[-1] = 1
else:
moment_shape[-1] = 1
if not p.cumulative:
# If not cumulative, the seqlen dimension is also reduced.
moment_shape[1] = 1
group_mean = py_utils.HasShape(group_mean, moment_shape)
group_variance = py_utils.HasShape(group_variance, moment_shape)
group_variance = py_utils.with_dependencies([
py_utils.assert_greater_equal(group_variance,
tf.cast(0, group_variance.dtype))
], group_variance)
if group_variance.dtype == tf.bfloat16:
# tf.rsqrt is not implemented for bfloat16, hence we always cast into
# tf.float32.
group_stddev_inv = tf.cast(
tf.math.rsqrt(tf.cast(group_variance + p.epsilon, tf.float32)),
group_mean.dtype)
else:
group_stddev_inv = tf.math.rsqrt(group_variance + p.epsilon)
grouped_inputs = (grouped_inputs - group_mean) * group_stddev_inv
# Merges the last two dims.
grouped_inputs = tf.reshape(grouped_inputs, input_shape[:-2] + [-1])
# Note, The real gamma to use is 1 + gamma.
outputs = grouped_inputs * (theta.gamma + 1) + theta.beta
return outputs
def FProp(self, theta, inputs, paddings=None):
"""Apply group normalization.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: The inputs tensor with shape [batch_size, height, width, channel].
if p.rank == 4, else [batch, height, channel].
paddings: The paddings tensor with shape [batch_size, height]. Intended to
be used for sequence processing where `height` is `time`.
Returns:
A single tensor as the output after applying group normalization, with
the same shape as 'inputs'. Or a output, output_paddings pair if input
paddings is not None.
"""
inputs, paddings = self._CastToFPropDtype((inputs, paddings))
if py_utils.testonly_skip_norm_layers():
if paddings is None:
return inputs
else:
return inputs, paddings
p = self.params
inputs = py_utils.HasRank(inputs, p.input_rank)
num_groups = self.num_groups
input_shape = py_utils.GetShape(inputs)
with tf.name_scope(p.name):
x = tf.reshape(inputs, input_shape[:-1] + [num_groups, self.group_size])
expanded_rank = p.input_rank + 1
all_dims = list(range(expanded_rank))
if paddings is None or not p.cumulative:
# Skips batch and num_groups.
reduce_over_dims = all_dims[1:-2] + all_dims[-1:]
else:
# Skips batch, seqlen and num_groups.
reduce_over_dims = all_dims[2:-2] + all_dims[-1:]
if paddings is None and not p.cumulative:
# Fast path on tpu without reshape.
group_mean, group_variance = tf.nn.moments(
x, axes=reduce_over_dims, keepdims=True)
else:
expanded_paddings = tf.reshape(
paddings, input_shape[:2] + [1] * (expanded_rank - 2))
group_mean, group_variance = ComputeMoments(
x,
expanded_paddings,
reduce_over_dims,
cumulative_axis=1,
enable_cross_replica_sum_on_tpu=p.enable_cross_replica_sum_on_tpu,
keepdims=True)
outputs = self._Normalize(theta, x, group_mean, group_variance)
if paddings is None:
return outputs
else:
return outputs, paddings
def _StreamMoments(self, inputs, paddings, cached_sum, cached_count,
cached_var):
"""Computes mean and variance over the valid data points in inputs.
Args:
inputs: [B, T, F, N, G] or [B, T, N, G]
paddings: [B, T, 1, 1, 1] or [B, T, 1, 1] (same rank as inputs)
cached_sum: [B, N]
cached_count: [B, 1]
cached_var: [B, N]
Returns:
mean: [B, T, 1, N, 1] or [B, T, N, 1] (same rank as inputs)
variance: same shape as mean.
new_cached_sum: same shape as cached_sum.
new_cached_count: same shape as cached_count.
new_cached_var: same shape as cached_var.
"""
tf.logging.vlog(1, 'inputs: %r', inputs)
tf.logging.vlog(1, 'paddings: %r', paddings)
tf.logging.vlog(1, 'cached_sum: %r', cached_sum)
tf.logging.vlog(1, 'cached_count: %r', cached_count)
tf.logging.vlog(1, 'cached_var: %r', cached_var)
input_rank = py_utils.GetRank(inputs)
paddings = py_utils.HasRank(paddings, input_rank)
cached_sum = py_utils.HasRank(cached_sum, 2)
cached_count = py_utils.HasRank(cached_count, 2)
cached_var = py_utils.HasRank(cached_var, 2)
input_shape = py_utils.GetShape(inputs)
output_shape = input_shape[:]
if input_rank == 4:
# Skip {B,T,N}. Reduce just G.
reduce_over_dims = [3]
multiplier = input_shape[3]
output_shape[3] = 1
else:
assert input_rank == 5
# Skip {B,T,N}. Reduce {F,G}.
reduce_over_dims = [2, 4]
multiplier = input_shape[2] * input_shape[4]
output_shape[2] = 1
output_shape[4] = 1
# [B, T, N]
sum_v = tf.reduce_sum(
py_utils.ApplyPadding(paddings, inputs),
reduce_over_dims,
keepdims=False)
sum_v = tf.math.cumsum(sum_v, axis=1)
sum_v += cached_sum[:, tf.newaxis, :]
# [B, T, 1]
count_v = tf.reduce_sum(
py_utils.ApplyPadding(
paddings, tf.cast(multiplier, inputs.dtype), ensure_shape=False),
reduce_over_dims,
keepdims=False)
count_v = tf.math.cumsum(count_v, axis=1)
count_v += cached_count[:, tf.newaxis, :]
# [B, T, 1, N, 1] or [B, T, N, 1]
mean = tf.reshape(sum_v / tf.maximum(count_v, 1.0), output_shape)
# [B, T, N]
sum_vv = tf.reduce_sum(
py_utils.ApplyPadding(paddings,
tf.math.squared_difference(inputs, mean)),
reduce_over_dims,
keepdims=False)
sum_vv = tf.math.cumsum(sum_vv, axis=1)
sum_vv += cached_var[:, tf.newaxis, :]
# [B, N]
cached_sum = sum_v[:, -1]
# [B, 1]
cached_count = count_v[:, -1]
# [B, N]
cached_var = sum_vv[:, -1]
# [B, T, 1, N, 1] or [B, T, N, 1]
variance = tf.reshape(sum_vv / tf.maximum(count_v, 1.0), output_shape)
tf.logging.vlog(1, 'sum_v: %r', sum_v)
tf.logging.vlog(1, 'count_v: %r', count_v)
tf.logging.vlog(1, 'sum_vv: %r', sum_vv)
return mean, variance, cached_sum, cached_count, cached_var
def StreamStep(self, theta, inputs, paddings, state0):
if py_utils.testonly_skip_norm_layers():
return inputs, paddings, state0
p = self.params
assert p.cumulative
inputs = py_utils.HasRank(inputs, p.input_rank)
group_size = self.group_size
num_groups = self.num_groups
tf.logging.vlog(1, 'group_size: %s', group_size)
tf.logging.vlog(1, 'num_groups: %s', num_groups)
input_shape = py_utils.GetShape(inputs)
with tf.name_scope(f'{p.name}/StreamStep'):
x = tf.reshape(inputs, input_shape[:-1] + [num_groups, group_size])
expanded_rank = p.input_rank + 1
expanded_paddings = tf.reshape(
paddings, input_shape[:2] + [1] * (expanded_rank - 2))
(group_mean, group_variance, cached_sum, cached_count,
cached_var) = self._StreamMoments(x, expanded_paddings,
state0.cached_sum, state0.cached_count,
state0.cached_var)
outputs = self._Normalize(theta, x, group_mean, group_variance)
return outputs, paddings, py_utils.NestedMap(
cached_sum=cached_sum,
cached_count=cached_count,
cached_var=cached_var)
@classmethod
def FPropMeta(cls, p, inputs):
py_utils.CheckShapes((inputs,))
flops_per_element = 10 # Approximately 10 flops per element.
return py_utils.NestedMap(
flops=inputs.num_elements() * flops_per_element, out_shapes=(inputs,))
|
|
"""
Filename: Network.py
Author: Nipun Gunawardena
Acknowledgements: Based off of Michael Nielsen's Neural Network and Deep Learning Tutorial code found at
https://github.com/mnielsen/neural-networks-and-deep-learning/blob/master/src/network.py.
Also based off of info found at http://iamtrask.github.io/2015/07/12/basic-python-network/?
Requirements: Python 2.7.6 (Tested on)
Numpy
Matplotlib
Notes:
- The purpose of this file is to help me understand neural networks on a programming level
- This code is specifically made to perform neural network regressions, not classifications. It won't
work for classifications
TODO:
- Test different datasets
- Convert backprop/forward prop to matrix multiplication instead
of looping through samples
- Hyperparameter search
- Gradient checking?
"""
## Imports ----------------------------------------------------------------------------------------
import sys
import math
import random
import numpy as np
import matplotlib.pyplot as plt
## Miscellaneous Functions ------------------------------------------------------------------------
def sigmoid(z):
"""
The sigmoid function
"""
return 1.0/(1.0 + np.exp(-z))
def sigmoidPrime(z):
"""
Derivative of the sigmoid function
"""
return sigmoid(z) * (1 - sigmoid(z))
def dataSplit(inputs, targets, trainSplit = 0.75, testSplit = 0.15, valSplit = 0.10):
"""
- Splits data into test, train, and validation data
Original input data needs to be an m x n array where there are n variables and m samples
- By default randomly splits the data, but block split needs to be added.
- The number from valSplit isn't actually used, it's just the leftovers after testSplit and trainSplit
are accounted for.
- Hasn't been tested on any dataset other than house dataset yet. Multiple targets may cause problems
TODO: Add blockSplit
"""
# Check correct split
if testSplit + trainSplit + valSplit != 1.0:
print("ERROR: Please enter splits that add up to 1!")
sys.exit()
# Check correct lengths
if len(inputs) != len(targets):
print("ERROR: Please ensure inputs and targets are same length!")
sys.exit()
dataLen = len(inputs)
trainLen = int(math.ceil(trainSplit*dataLen))
testLen = int(math.ceil(testSplit*dataLen))
valLen = dataLen - trainLen - testLen
shuffle = np.random.permutation(dataLen)
trainIdx = shuffle[0:trainLen]
testIdx = shuffle[trainLen:trainLen+testLen]
valIdx = shuffle[trainLen+testLen:trainLen+testLen+valLen]
inputsTrain = inputs[trainIdx, :]
inputsTest = inputs[testIdx, :]
inputsVal = inputs[valIdx, :]
targetsTrain = targets[trainIdx]
targetsTest = targets[testIdx]
targetsVal = targets[valIdx]
if __debug__:
print("{0} elements total, {1} elements towards training, {2} elements towards testing, and {3} elements towards validation\n".format(dataLen, trainLen, testLen, valLen))
return np.array(inputsTrain).T, np.array(inputsTest).T, np.array(inputsVal).T, np.array(targetsTrain).T, np.array(targetsTest).T, np.array(targetsVal).T
def normalizeMatrix(mat, toMin, toMax):
"""
Converts matrix to span from toMin to toMax
Returns converted matrix and array of tuples representing the original min/max
Works on the rows of a matrix (min/max taken on rows)
"""
numRows = mat.shape[0]
sMat = np.zeros(mat.shape)
fromSpans = []
for i in xrange(numRows):
fromMin, fromMax = min(mat[i,:]), max(mat[i,:]) # Original min/max
sMat[i,:] = mat[i,:] - fromMin
sMat[i,:] = sMat[i,:]/(fromMax - fromMin)
sMat[i,:] = sMat[i,:]*(toMax - toMin)+toMin # Convert to new range
fromSpans.append((fromMin, fromMax))
return sMat, fromSpans
## Vectorized Functions ---------------------------------------------------------------------------
sigmoidVec = np.vectorize(sigmoid)
sigmoidPrimeVec = np.vectorize(sigmoidPrime)
## Classes ----------------------------------------------------------------------------------------
class Network():
"""Neural Network Class"""
def __init__(self, sizes):
"""
Initialize neural network.
'sizes' should be a list where each element is the size
of that layer. For example, a network with 3 input nodes,
4 hidden nodes, and 1 output node would be [3,4,1]
"""
# if __debug__:
# np.random.seed(42) # Consistent random seed
self.numLayers = len(sizes)
varAccount = np.sqrt(2.0/sizes[0]) # Variance calibration as described by http://cs231n.github.io/neural-networks-2/#reg. May change to 1 if not working
self.biases = [0.01*np.random.rand(i, 1)*varAccount for i in sizes[1:]] # Multiply rand by 0.01 to prevent weight explosion?
self.weights = [0.01*np.random.rand(i,j)*varAccount for i,j in zip(sizes[1:],sizes[:-1])]
if __debug__:
print("Biases Sizes:\n {0}\n".format([b.shape for b in self.biases]))
print("Weights Sizes:\n {0}\n".format([w.shape for w in self.weights]))
def regFeedForward(self, a, actFunc):
"""
Forward propogates an input vector "a" throughout
the neural network, assuming a linear activation function on output layer.
Input vector "a" should be passed in as a *2d* numpy array (column vector),
even though it is only mathematically
1d. (Use a.reshape(-1,1) to do so)
"""
for l in xrange(self.numLayers-2):
b = self.biases[l]
w = self.weights[l]
a = actFunc(np.dot(w,a)+b)
# Last layer uses linear activation instead of sigmoidal
b = self.biases[-1]
w = self.weights[-1]
a = np.dot(w,a) + b
return a
def evaluateMSE(self, evInputs, evTargets):
"""
Evaluate the network error with data. Mean squared error
is the metric used. Multiple outputs will be summed into
1 number
"""
evSamples = evInputs.shape[1]
mse = 0.0
for t in xrange(evSamples):
mse += self.squaredError(self.regFeedForward(evInputs[:,t].reshape(-1,1), sigmoidVec).T, evTargets[:,t])
mse /= evSamples
return np.sum(mse)
def train(self, trainInputs, trainTargets, miniBatchSize, epochs = 50, eta = 0.15, lmbda = 0.0, momentum=False, mu = 0.5, dropout=False, dropProb=0.5, valInputs = None, valTargets = None):
"""
Train neural network using training data. If valInputs & valTargets are included,
validation will be calculated as well. "miniBatchSize" should be an even factor
of the number of elements in the training set. "eta" and "lmbda" are the learning rate
and regularization value respectively.
"""
self.dpFlag = dropout
self.mmFlag = momentum
self.dpProb = dropProb
self.zB = [np.zeros(b.shape) for b in self.biases] # Momentum term for weights. needs to stay persistent?
self.zW = [np.zeros(w.shape) for w in self.weights] # Momentum term for biases.
# Prepare performance arrays
trainMseArr = np.zeros((1,epochs))
# Get important sizes
numVar, numSamples = trainInputs.shape
valFlag = False
if (valInputs is not None) and (valTargets is not None):
valMseArr = np.zeros((1,epochs))
valSamples = valInputs.shape[1]
valCounter = 0
mseEps = 0.001
mseOld = 0.0
valFlag = True
# Train over epochs
for i in xrange(epochs):
print "Epoch {0} || ".format(i),
# Create Mini-batches (array of arrays that correspond to each other)
shuffle = np.random.permutation(numSamples)
trainInputs = trainInputs[:,shuffle]
trainTargets = trainTargets[:,shuffle]
miniBatchInputs = [ trainInputs[:,k:k+miniBatchSize] for k in xrange(0,numSamples,miniBatchSize) ]
miniBatchTargets = [ trainTargets[:,k:k+miniBatchSize] for k in xrange(0,numSamples,miniBatchSize) ]
# Update weights using mini batches
for miniBatch in zip(miniBatchInputs, miniBatchTargets):
self.sgdMiniBatch(miniBatch[0], miniBatch[1], eta, lmbda, mu, numSamples)
# Print training data performance
mse = self.evaluateMSE(trainInputs, trainTargets)
trainMseArr[0, i] = mse
print "Train Mse =", mse, "|| ",
# Check on validation data
if valFlag:
# Calculate MSE
mse = self.evaluateMSE(valInputs, valTargets)
valMseArr[0, i] = mse
# Check for Validation increasing accuracy
if abs(mse - mseOld) < mseEps:
valCounter += 1
else:
valCounter = 0
mseOld = mse
# Print and (maybe) break
print "Val Mse =", mse, "|| Val Fail Count =", valCounter,
if valCounter >= 5:
print " "
break
# Finish Printing
print " "
if valFlag:
return trainMseArr, valMseArr
else:
return trainMseArr
def sgdMiniBatch(self, inputs, targets, eta, lmbda, mu, n):
"""
Performs SGD on a mini-batch. This function is almost identical to Michael Nielson's
The actual back-propagation is done in another function, while this handles the
SGD
eta is learning rate, lmbda is regularization term, and n = total size of training data set
"""
numSamples = inputs.shape[1]
gradB = [np.zeros(b.shape) for b in self.biases]
gradW = [np.zeros(w.shape) for w in self.weights]
# Calculate the gradient of the weights and biases to be used in SGD
for i in xrange(numSamples):
deltaGradB, deltaGradW = self.backprop(inputs[:,i], targets[:,i])
gradB = [nb + dnb for nb, dnb in zip(gradB, deltaGradB)]
gradW = [nw + dnw for nw, dnw in zip(gradW, deltaGradW)]
# Do gradient descent update step
if self.mmFlag: # With momentum
for i in xrange(self.numLayers - 1):
w = self.weights[i]
nw = gradW[i]
self.zW[i] = mu*self.zW[i] - (eta/numSamples)*nw
self.weights[i] = w + self.zW[i] - eta*(lmbda/n)*w
for i in xrange(self.numLayers - 1):
b = self.biases[i]
nb = gradB[i]
self.zB[i] = mu*self.zB[i] - (eta/numSamples)*nb
self.biases[i] = b + self.zB[i]
else: # Without momentum
self.weights = [ (1-eta*(lmbda/n))*w - (eta/numSamples)*nw for w, nw in zip(self.weights, gradW) ]
self.biases = [ b - (eta/numSamples)*nb for b, nb in zip(self.biases, gradB) ]
return 0
def squaredError(self, estimate, actual):
"""
Calculates the standard squared error between two scalars/vectors
The two vectors passed in need to be the same size, and this needs to be done outside
the function
"""
return (estimate - actual)**2
def backprop(self, inputVec, targetVec):
"""
Return gradient for single example for cost function.
Called by sgd function
return values are layer-by-layer lists of arrays, similar to
self.biases and self.weights
This function is also similar to Nielson's, but modified for the
linear output activation function
"""
inputVec = inputVec.reshape(-1,1) # Reshape inputVec into vertical vector so math works
targetVec = targetVec.reshape(-1,1) # Same with targetVec
# Initialize gradient
nablaB = [np.zeros(b.shape) for b in self.biases]
nablaW = [np.zeros(w.shape) for w in self.weights]
# Feedforward and save intermediate values
a = inputVec
acts = [inputVec] # List to store activations for each layer
zs = [] # List to store weighted inputs for each layer
dpMasks = []
for i in xrange(self.numLayers - 2): # -2 because weights, biases have 1 less than numLayers, then avoid last layer
b = self.biases[i]
w = self.weights[i]
z = (np.dot(w, a) + b)
if self.dpFlag:
dropMask = (np.random.rand(*z.shape) < self.dpProb) / self.dpProb
dpMasks.append(dropMask)
a = sigmoidVec(z) * dropMask
else:
a = sigmoidVec(z)
zs.append(z)
acts.append(a)
# Above, but for last layer
b = self.biases[-1]
w = self.weights[-1]
z = np.dot(w, a) + b
zs.append(z)
a = z # Apply linear activation func. instead of sigmoidal
acts.append(a)
# Backward Pass last layer
delta = self.costDerivative(acts[-1], targetVec) * 1 # BP1 - You multiply by one in place of sigmoidPrime because linear act. func. in output layer
nablaB[-1] = delta # BP3
nablaW[-1] = np.dot(delta, acts[-2].T) # BP4
# Backward pass rest of layers
for l in xrange(2, self.numLayers):
z = zs[-l]
if self.dpFlag:
dropMask = dpMasks[-l+1]
spv = sigmoidPrimeVec(dropMask*z)
w = self.weights[-l+1].T * dropMask
delta = np.dot(w, delta) * spv # BP2 with dropout
else:
spv = sigmoidPrimeVec(z)
delta = np.dot(self.weights[-l+1].T, delta) * spv # BP2
nablaB[-l] = delta # BP3
nablaW[-l] = np.dot(delta, acts[-l-1].T)
# sys.exit()
return nablaB, nablaW
def costDerivative(self, outputActivations, target):
"""
Return vector of partial derivatives of cost function
for output activations
"""
return (outputActivations - target)
def evaluate(self, evInputs, evTargets):
"""
Return vector of estimates for any given inputs.
Will be same size as the targets also supplied
"""
outputs = np.zeros(evTargets.shape)
evSamples = evInputs.shape[1]
mse = 0.0
for t in xrange(evSamples):
outputs[:,t] = self.regFeedForward(evInputs[:,t].reshape(-1,1), sigmoidVec).T
mse += self.squaredError(outputs[:,t], evTargets[:,t])
mse /= evSamples
return np.sum(mse), outputs
## Main -------------------------------------------------------------------------------------------
if __name__ == "__main__":
# Prepare stuff
prefix = "house"
sizes = [13, 50, 1]
mbSize = 71
# prefix = "building"
# sizes = [14, 20, 3]
# mbSize = 491
# prefix = "abalone"
# sizes = [8, 45, 1]
# mbSize = 172
numEpochs = 200
etaVal = 0.10
lmbdaVal = 0.1
dpOut = True
dpP = 0.50
momtum = True
muVal = 0.5
# Seed random number for comparisons
# np.random.seed(42)
# Read data
inputs = np.genfromtxt(prefix + "Inputs.csv",delimiter=",");
targets = np.genfromtxt(prefix + "Targets.csv",delimiter=",");
# Initialize Neural Network - Uncomment one
NN = Network(sizes)
# Split data
inputsTrain, inputsTest, inputsVal, targetsTrain, targetsTest, targetsVal, = dataSplit(inputs, targets)
# Reshape 1d vectors
# If target vector is 1d, it needs to be reshapen to be 2d. Use .reshape(1,-1) to do so
if prefix != "building":
targetsTrain = targetsTrain.reshape(1,-1)
targetsTest = targetsTest.reshape(1,-1)
targetsVal = targetsVal.reshape(1,-1)
# Scale input data between -1 and 1
inputsTrainScaled, inputsTrainSpans = normalizeMatrix(inputsTrain, -1, 1)
inputsTestScaled, inputsTestSpans = normalizeMatrix(inputsTest, -1, 1)
inputsValScaled, inputsValSpans = normalizeMatrix(inputsVal, -1, 1)
# Don't scale target data between -1 and 1
# Variables are only reassigned to make refactoring easier
targetsTestScaled = targetsTest
targetsTrainScaled = targetsTrain
targetsValScaled = targetsVal
# Train Network
trainMse, valMse = NN.train(inputsTrainScaled, targetsTrainScaled, mbSize, valInputs=inputsValScaled, valTargets=targetsValScaled, eta=etaVal, epochs=numEpochs, lmbda=lmbdaVal, dropout=dpOut, dropProb=dpP, momentum=momtum, mu=muVal)
# Test Network
MSEtrainScaled, outputsTrainScaled = NN.evaluate(inputsTrainScaled, targetsTrainScaled)
MSEtestScaled, outputsTestScaled = NN.evaluate(inputsTestScaled, targetsTestScaled)
print "Test MSE =", MSEtestScaled
# Plot train and validation MSE progression
plt.figure()
trainLine, = plt.plot(np.arange(numEpochs).reshape(1,-1).T, trainMse.T, 'r', label='Train MSE')
valLine, = plt.plot(np.arange(numEpochs).reshape(1,-1).T, valMse.T, 'b', label='Val MSE')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(handles=[trainLine, valLine])
plt.title('Train and Val MSE Over Time')
# Plot Test Output
if prefix == "house":
plt.figure()
tLine, = plt.plot(np.sort(targetsTestScaled.T,axis=0), 'r', label='Targets')
oLine, = plt.plot(np.sort(outputsTestScaled.T,axis=0), label='Outputs' )
plt.xlabel('Sample')
plt.ylabel('Scaled Output')
else:
plt.figure()
tLine, = plt.plot(targetsTestScaled.T, 'r', label='Targets')
oLine, = plt.plot(outputsTestScaled.T, label='Outputs')
plt.xlabel('Sample')
plt.ylabel('Scaled Output')
plt.title('Network Test Output vs. Test Data')
plt.legend(handles=[tLine, oLine])
plt.show()
np.savetxt("testOut.csv", outputsTestScaled.T, delimiter=",")
np.savetxt("testTargets.csv", targetsTestScaled.T, delimiter=",")
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements conaryrc handling.
"""
import fnmatch
import os
import sys
import xml
import re
import traceback
import urllib
import pwd
from conary.deps import deps, arch
from conary.lib import util, api
from conary.lib.cfg import * # pyflakes=ignore
from conary.lib.http import proxy_map
from conary import errors
from conary import versions
from conary import flavorcfg
# ----------- conary specific types
class ServerGlobList(list):
multipleMatches = False
def find(self, server):
l = []
for (serverGlob, item) in ServerGlobList.__iter__(self):
# this is case insensitve, which is perfect for hostnames
if fnmatch.fnmatch(server, serverGlob):
if not self.multipleMatches:
return item
l.append(item)
if not self.multipleMatches:
return None
return l
def _fncmp(self, a, b):
# Comparison function
# Equal elements
if a[0] == b[0]:
return 0
if fnmatch.fnmatch(a[0], b[0]):
return -1
return 1
def extend(self, itemList):
# Look for the first item which globs to this, and insert the new
# item before it. That makes sure find always matches on the
# most-specific instance
for newItem in reversed(itemList):
self.append(newItem)
def extendSort(self, itemList):
"""Extend the current list with the new items, categorizing them and
eliminating duplicates"""
nlist = sorted(self + [ x for x in reversed(itemList)], self._fncmp)
# Walk the list, remove duplicates
del self[:]
lasti = None
for ent in nlist:
if lasti is not None and lasti[0] == ent[0]:
self[-1] = ent
else:
list.append(self, ent)
lasti = ent
def append(self, newItem):
location = None
removeOld = False
for i, (serverGlob, info) in enumerate(ServerGlobList.__iter__(self)):
if fnmatch.fnmatch(newItem[0], serverGlob):
if not self.multipleMatches and serverGlob == newItem[0]:
removeOld = True
location = i
break
if location is None:
list.append(self, newItem)
elif removeOld:
self[location] = newItem
else:
self.insert(location, newItem)
class UserInformation(ServerGlobList):
def __iter__(self):
for x in ServerGlobList.__iter__(self):
yield (x[0], x[1][0], x[1][1])
def addServerGlob(self, *args):
# handle (glob, name, passwd) and transform to (glob, (name, passwd))a
if len(args) == 3:
args = args[0], (args[1], args[2])
ServerGlobList.append(self, args)
def addServerGlobs(self, globList):
ServerGlobList.extendSort(self, globList)
def extend(self, other):
for item in other:
self.addServerGlob(*item)
def append(self, item):
self.addServerGlob(*item)
def remove(self, item):
if len(item) == 3:
item = (item[0], (item[1], item[2]))
ServerGlobList.remove(self, item)
def insert(self, pos, item):
if len(item) == 3:
item = (item[0], (item[1], item[2]))
ServerGlobList.insert(self, pos, item)
def __reduce__(self):
# This is needed to make cPickle work because __iter__ returns 3-tuples
# which cPickle appends directly to the list using internal list code
# instead of our append().
return (type(self), (list(self),))
def __init__(self, initVal = None):
ServerGlobList.__init__(self)
if initVal is not None:
for val in initVal:
self.addServerGlob(*val)
class CfgUserInfoItem(CfgType):
def parseString(self, str):
val = str.split()
if len(val) < 2 or len(val) > 3:
raise ParseError("expected <hostglob> <user> [<password>]")
elif len(val) == 2:
return (val[0], val[1], None)
else:
pw = (val[2] is not None and util.ProtectedString(val[2])) or None
return (val[0], val[1], pw)
def format(self, val, displayOptions=None):
serverGlob, user, password = val
if password is None:
return '%s %s' % (serverGlob, user)
elif displayOptions.get('hidePasswords'):
return '%s %s <password>' % (serverGlob, user)
else:
return '%s %s %s' % (serverGlob, user, password)
class CfgUserInfo(CfgList):
def __init__(self, default=[]):
CfgList.__init__(self, CfgUserInfoItem, UserInformation,
default = default)
def set(self, curVal, newVal):
curVal.extend(newVal)
return curVal
class EntitlementList(ServerGlobList):
multipleMatches = True
def addEntitlement(self, serverGlob, entitlement, entClass = None):
self.append((serverGlob, (entClass, util.ProtectedString(entitlement))))
class CfgEntitlementItem(CfgType):
def parseString(self, str):
val = str.split()
if len(val) == 3:
# Output from an entitlement file, which still has a class
import warnings
warnings.warn("\nExpected an entitlement line with no entitlement "
"class.\nEntitlement classes will be ignored in the future.\n"
"Please change the 'entitlement %s' config line to\n"
"'entitlement %s %s'" % (str, val[0], val[2]),
DeprecationWarning)
return (val[0], (val[1], util.ProtectedString(val[2])))
elif len(val) != 2:
raise ParseError("expected <hostglob> <entitlement>")
return (val[0], (None, util.ProtectedString(val[1])))
def format(self, val, displayOptions=None):
if val[1][0] is None:
return '%s %s' % (val[0], val[1][1])
else:
return '%s %s %s' % (val[0], val[1][0], val[1][1])
class CfgEntitlement(CfgList):
def __init__(self, default=[]):
CfgList.__init__(self, CfgEntitlementItem, EntitlementList,
default = default)
def set(self, curVal, newVal):
curVal.extend(newVal)
return curVal
class CfgLabel(CfgType):
def format(self, val, displayOptions=None):
return val.asString()
def parseString(self, val):
try:
return versions.Label(val)
except versions.ParseError, e:
raise ParseError, e
class CfgDependencyClass(CfgType):
def format(self, val, displayOptions=None):
return val.tagName
def parseString(self, val):
klass = deps.dependencyClassesByName.get(val, None)
if klass is None:
raise ParseError('unknown dependency class: %s' % val)
return klass
class CfgRepoMapEntry(CfgType):
def parseString(self, str):
val = str.split()
if len(val) != 2:
raise ParseError("expected <hostglob> <url>")
match = re.match('https?://([^:]*):[^@]*@([^/:]*)(?::.*)?/.*', val[1])
if match is not None:
user, server = match.groups()
raise ParseError, ('repositoryMap entries should not contain '
'user names and passwords; use '
'"user %s %s <password>" instead' %
(server, user))
return (val[0], val[1])
def format(self, val, displayOptions=None):
return '%-25s %s' % (val[0], val[1])
class RepoMap(ServerGlobList):
# Pretend to be a dict; repositorymap's used to be dicts and this should
# ease the transition.
def __setitem__(self, key, val):
if type(key) is int:
return ServerGlobList.__setitem__(self, key, val)
self.append((key, val))
def __getitem__(self, key):
if type(key) is int:
return ServerGlobList.__getitem__(self, key)
return self.find(key)
def has_key(self, key):
r = self.find(key)
if r is None:
return False
return True
def __contains__(self, key):
return key in self
def clear(self):
del self[:]
def update(self, other):
for key, val in other.iteritems():
self.append((key, val))
def iteritems(self):
return iter(self)
def items(self):
return self
def keys(self):
return [ x[0] for x in self ]
def iterkeys(self):
return ( x[0] for x in self )
def values(self):
return [ x[1] for x in self ]
def itervalues(self):
return ( x[1] for x in self )
def get(self, key, default):
r = self.find(key)
if r is None:
return default
return r
def __init__(self, repoMap=[]):
if hasattr(repoMap, 'iteritems'):
ServerGlobList.__init__(self)
self.update(repoMap)
else:
ServerGlobList.__init__(self, repoMap)
class CfgRepoMap(CfgList):
def __init__(self, default=[]):
CfgList.__init__(self, CfgRepoMapEntry, RepoMap, default=default)
def set(self, curVal, newVal):
curVal.extend(newVal)
return curVal
class CfgFlavor(CfgType):
default = deps.Flavor()
def copy(self, val):
return val.copy()
def parseString(self, val):
try:
f = deps.parseFlavor(val)
except Exception, e:
raise ParseError, e
if f is None:
raise ParseError, 'Invalid flavor %s' % val
return f
def format(self, val, displayOptions=None):
val = ', '.join(deps.formatFlavor(val).split(','))
if displayOptions and displayOptions.get('prettyPrint', False):
val = ('\n%26s'%'').join(textwrap.wrap(val, 48))
return val
class CfgFingerPrintMapItem(CfgType):
def parseString(self, val):
val = val.split(None, 1)
label = val[0]
try:
# compile label to verify that it is valid
re.compile(label)
except Exception, e:
raise ParseError, "Invalid regexp: '%s': " % label + str(e)
if len(val) == 1 or not val[1] or val[1].lower() == 'none':
fingerprint = None
else:
# remove all whitespace
fingerprint = ''.join(val[1].split())
return label, fingerprint
def format(self, val, displayOptions=None):
# val[1] may be None
return ' '.join([val[0], str(val[1])])
class CfgFingerPrintMap(CfgList):
def __init__(self, default={}):
CfgList.__init__(self, CfgFingerPrintMapItem, default=default)
class CfgFingerPrint(CfgType):
def parseString(self, val):
val = val.replace(' ', '')
if not val or val.lower() == 'none':
return None
return val
class CfgLabelList(list):
def __repr__(self):
return "CfgLabelList(%s)" % list.__repr__(self)
def __getslice__(self, i, j):
return CfgLabelList(list.__getslice__(self, i, j))
def versionPriority(self, first, second):
return self.priority(first.trailingLabel(), second.trailingLabel())
def priority(self, first, second):
# returns -1 if the first label occurs earlier in the list than
# the second label does; None if either or both labels are missing
# from the path. If the labels are identical and both are in the
# path, we return 0 (I don't know how useful that is, but what the
# heck)
firstIdx = None
secondIdx = None
for i, l in enumerate(self):
if firstIdx is None and l == first:
firstIdx = i
if secondIdx is None and l == second:
secondIdx = i
if firstIdx is None or secondIdx is None:
return None
return cmp(firstIdx, secondIdx)
class ProxyEntry(CfgType):
def parseString(self, str):
match = re.match('https?://.*', str)
if match is None:
raise ParseError('Invalid proxy url %s' % str)
return CfgType.parseString(self, str)
class CfgProxy(CfgDict):
def updateFromString(self, val, str):
suppProtocols = ['http', 'https']
vlist = str.split()
if len(vlist) > 2:
raise ParseError("Too many arguments for proxy configuration '%s'"
% str)
if not vlist:
raise ParseError("Arguments required for proxy configuration")
if len(vlist) == 2:
if vlist[0] not in suppProtocols:
raise ParseError('Unknown proxy procotol %s' % vlist[0])
if vlist[1] == "None":
# Special value to turn proxy values off
if vlist[0] in val:
del val[vlist[0]]
return val
return CfgDict.updateFromString(self, val, str)
# At this point, len(vlist) == 1
# Fix it up
try:
protocol, rest = str.split(':', 1)
except ValueError:
# : not in the value
if str == "None":
# Special value that turns off the proxy
for protocol in suppProtocols:
if protocol in val:
del val[protocol]
return val
raise ParseError("Invalid proxy configuration value %s" % str)
# This next test duplicates the work done by ProxyEntry.parseString,
# but it's pretty cheap to do here since we already have the protocol
# parsed out
if protocol not in suppProtocols:
raise ParseError('Unknown proxy procotol %s' % protocol)
CfgDict.updateFromString(self, val, 'http http:' + rest)
CfgDict.updateFromString(self, val, 'https https:' + rest)
return val
def __init__(self, default={}):
CfgDict.__init__(self, ProxyEntry, default=default)
CfgInstallLabelPath = CfgLineList(CfgLabel, listType = CfgLabelList)
CfgDependencyClassList = CfgLineList(CfgDependencyClass)
class CfgSearchPathItem(CfgType):
def parseString(self, item):
return item
CfgSearchPath = CfgLineList(CfgSearchPathItem)
class CfgProxyMap(CfgType):
default = proxy_map.ProxyMap()
def updateFromString(self, val, string):
parts = string.split()
if parts == ['[]']:
val.clear()
return val
if len(parts) < 2:
raise ParseError("Expected: proxyMap <pattern> "
"[http://proxy1|DIRECT] [proxy2 ...]")
pattern, targets = parts[0], parts[1:]
val.addStrategy(pattern, targets)
return val
def setFromString(self, val, string):
pm = val.__class__()
return self.updateFromString(pm, string)
def toStrings(self, value, displayOptions):
for pattern, targets in value.filterList:
yield ' '.join([str(pattern)] + [str(x) for x in targets])
class CfgCapsuleSync(CfgEnum):
validValues = [ 'false', 'clean', 'pin', 'update' ]
def parseString(self, val):
if val.lower() == 'true':
# Backwards compatibility
val = 'clean'
return CfgEnum.parseString(self, val)
def _getDefaultPublicKeyrings():
publicKeyrings = []
# If we are root, don't use the keyring in $HOME, since a process started
# under sudo will have $HOME set to the old user's (CNY-2630)
# CNY-2722: look up the directory with getpwuid, instead of using $HOME
try:
ent = pwd.getpwuid(os.getuid())
pwDir = ent[5]
# If home dir doesn't exist, don't bother
if os.path.isdir(pwDir):
publicKeyrings.append(os.path.join(pwDir, '.gnupg', 'pubring.gpg'))
except KeyError:
pass
publicKeyrings.append('/etc/conary/pubring.gpg')
return publicKeyrings
class ConaryContext(ConfigSection):
""" Conary uses context to let the value of particular config parameters
be set based on a keyword that can be set at the command line.
Configuartion values that are set in a context are overridden
by the values in the context that have been set. Values that are
unset in the context do not override the default config values.
"""
archDirs = (CfgPathList, ('/etc/conary/arch',
'/etc/conary/distro/arch',
'~/.conary/arch'))
autoLoadRecipes = CfgList(CfgString)
autoResolve = (CfgBool, False)
autoResolvePackages = (CfgBool, True)
buildFlavor = CfgFlavor
buildLabel = CfgLabel
buildPath = (CfgPath, '~/conary/builds')
cleanAfterCook = (CfgBool, True)
commitRelativeChangeset = (CfgBool, True)
componentDirs = (CfgPathList, ('/etc/conary/components',
'/etc/conary/distro/components',
'~/.conary/components'))
configComponent = (CfgBool, True)
contact = None
context = None
dbPath = '/var/lib/conarydb'
debugExceptions = (CfgBool, False)
debugRecipeExceptions = (CfgBool, False)
defaultMacros = (CfgPathList, ('/etc/conary/macros',
'/etc/conary/macros.d/*',
'~/.conary/macros'))
emergeUser = (CfgString, 'emerge')
enforceManagedPolicy = (CfgBool, True)
entitlement = CfgEntitlement
entitlementDirectory = (CfgPath, '/etc/conary/entitlements')
environment = CfgDict(CfgString)
excludeTroves = CfgRegExpList
flavor = CfgList(CfgFlavor)
flavorPreferences = CfgList(CfgFlavor)
fullVersions = CfgBool
fullFlavors = CfgBool
localRollbacks = CfgBool
keepRequired = CfgBool
ignoreDependencies = (CfgDependencyClassList,
[ deps.AbiDependency, deps.RpmLibDependencies])
installLabelPath = CfgInstallLabelPath
interactive = (CfgBool, False)
logFile = (CfgPathList, ('/var/log/conary',
'~/.conary/log',))
lookaside = (CfgPath, '~/conary/cache')
macros = CfgDict(CfgString)
mirrorDirs = (CfgPathList, ('~/.conary/mirrors',
'/etc/conary/distro/mirrors',
'/etc/conary/mirrors',))
modelPath = '/etc/conary/system-model'
name = None
quiet = CfgBool
pinTroves = CfgRegExpList
policyDirs = (CfgPathList, ('/usr/lib/conary/policy',
'/usr/lib/conary/distro/policy',
'/etc/conary/policy',
'~/.conary/policy'))
shortenGroupFlavors = CfgBool
syncCapsuleDatabase = (CfgCapsuleSync, 'update')
# Upstream Conary proxy
conaryProxy = CfgProxy
# HTTP proxy
proxy = CfgProxy
proxyMap = CfgProxyMap
connectAttempts = (CfgInt, 3, "Number of connection attempts to make "
"for outbound HTTP requests.")
# The first keyring in the list is writable, and is used for storing the
# keys that are not present on the system-wide keyring. Always expect
# Conary to write to the first keyring.
pubRing = (CfgPathList, _getDefaultPublicKeyrings())
uploadRateLimit = (CfgInt, 0,
"Upload rate limit, in bytes per second")
downloadRateLimit = (CfgInt, 0,
"Download rate limit, in bytes per second")
recipeTemplate = None
repositoryMap = CfgRepoMap
resolveLevel = (CfgInt, 2)
root = (CfgPath, '/')
recipeTemplateDirs = (CfgPathList, ('~/.conary/recipeTemplates',
'/etc/conary/recipeTemplates'))
showLabels = CfgBool
showComponents = CfgBool
searchPath = CfgSearchPath
signatureKey = CfgFingerPrint
signatureKeyMap = CfgFingerPrintMap
siteConfigPath = (CfgPathList, ('/etc/conary/site',
'/etc/conary/distro/site',
'~/.conary/site'))
sourceSearchDir = (CfgPath, '.')
threaded = (CfgBool, True)
downloadFirst = (CfgBool, False)
tmpDir = (CfgPath, '/var/tmp')
trustThreshold = (CfgInt, 0)
trustedCerts = (CfgPathList, (),
'List of CA certificates which are trusted to identify a remote '
'repository using SSL. Entries may be files, dirs, or globs.')
trustedKeys = (CfgList(CfgString), [])
updateThreshold = (CfgInt, 15)
useDirs = (CfgPathList, ('/etc/conary/use',
'/etc/conary/distro/use',
'~/.conary/use'))
user = CfgUserInfo
baseClassDir = (CfgPath, '/usr/share/conary/baseclasses')
verifyDirsNoNewFiles = (CfgPathList, ('/proc', '/sys', '/home', '/dev',
'/mnt', '/tmp', '/var',
'/media', '/initrd' ))
windowsBuildService = CfgString
systemIdScript = CfgPath
def _resetSigMap(self):
self.resetToDefault('signatureKeyMap')
def __init__(self, *args, **kw):
ConfigSection.__init__(self, *args, **kw)
self.addListener('signatureKey', lambda *args: self._resetSigMap())
def _writeKey(self, out, cfgItem, value, options):
# Suppress all default values, as opposed to the default behavior which
# only suppresses defaults that are None
name = cfgItem.name
if name not in self._values or self._values[name].isDefault():
return
ConfigSection._writeKey(self, out, cfgItem, value, options)
class ConaryConfiguration(SectionedConfigFile):
# Inherit all context options
_cfg_bases = (ConaryContext,)
# this allows a new section to be created on the fly with the type
# ConaryContext
_allowNewSections = True
_defaultSectionType = ConaryContext
@api.publicApi
def __init__(self, readConfigFiles = False, ignoreErrors = False,
readProxyValuesFirst=True):
"""
Initialize a ConaryConfiguration object
@param readConfigFiles: If True, read /etc/conaryrc and entitlements
files
@type readConfigFiles: bool
@param ignoreErrors: If True, ParseError exceptions will not be raised
@type ignoreErrors: bool
@param readProxyValuesFirst: If True, parse local config files for
proxy settings and apply them before further configuration.
@type readProxyValuesFirst: bool
@raises ParseError: Raised if configuration syntax is invalid and
ignoreErrors is False.
"""
SectionedConfigFile.__init__(self)
self._ignoreErrors = ignoreErrors
self.addListener('signatureKey', lambda *args: self._resetSigMap())
if readConfigFiles:
if readProxyValuesFirst:
self.limitToKeys('conaryProxy', 'proxy')
self.ignoreUrlIncludes()
self.readFiles()
self.limitToKeys(False)
self.ignoreUrlIncludes(False)
self.readFiles()
# Entitlement files are config files
self.readEntitlementDirectory()
util.settempdir(self.tmpDir)
def getProxyMap(self):
return getProxyMap(self)
def _getOpener(self):
return transport.URLOpener(proxyMap=self.getProxyMap(),
connectAttempts=self.connectAttempts)
def readEntitlementDirectory(self):
if not os.path.isdir(self.entitlementDirectory):
return
try:
files = os.listdir(self.entitlementDirectory)
except OSError:
return
for basename in files:
try:
if os.path.isfile(os.path.join(self.entitlementDirectory,
basename)):
ent = loadEntitlement(self.entitlementDirectory, basename)
if not ent:
continue
self.entitlement.addEntitlement(ent[0], ent[2],
entClass = ent[1])
except OSError:
return
def readFiles(self):
self.read("/etc/conaryrc", exception=False)
if os.environ.has_key("HOME"):
self.read(os.environ["HOME"] + "/" + ".conaryrc", exception=False)
self.read("conaryrc", exception=False)
def setContext(self, name):
""" Copy the config values from the context named name (if any)
into the main config file. Returns False if not such config
file found.
"""
if not self.hasSection(name):
return False
self.context = name
context = self.getSection(name)
for key, ctxval in context._values.iteritems():
if ctxval.isDefault():
continue
newval = self._cow(key)
newval.updateFromContext(ctxval)
return True
def getContext(self, name):
if not self.hasSection(name):
return False
return self.getSection(name)
def displayContext(self, out=None):
if out is None:
out = sys.stdout
if self.context:
out.write('[%s]\n' % self.context)
context = self.getContext(self.context)
context.setDisplayOptions(**self._displayOptions)
context.display(out)
else:
out.write('No context set.\n')
def _writeSection(self, name, options):
return self.getDisplayOption('showContexts', False)
def requireInstallLabelPath(self):
# NOTE - conary doesn't use this check anymore. Kept for
# backwards compatibility.
if not self.installLabelPath:
print >> sys.stderr, "installLabelPath is not set"
sys.exit(1)
def _resetSigMap(self):
self.resetToDefault('signatureKeyMap')
def initializeFlavors(self):
"""
Initialize flavor preferences based on files typically
found in /etc/conary/arch (archDirs) and /etc/conary/use
@raises RuntimeError: Raised if use flags conflict in
a way which cannot be reconciled
(see L{deps.DependencyClass.MergeFlags})
"""
self.flavorConfig = flavorcfg.FlavorConfig(self.useDirs,
self.archDirs)
if self.flavor == []:
self.flavor = [deps.Flavor()]
self.flavor = self.flavorConfig.toDependency(override=self.flavor)
newFlavors = []
hasIns = False
# if any flavor has an instruction set, don't merge
for flavor in self.flavor:
if deps.DEP_CLASS_IS in flavor.getDepClasses():
hasIns = True
break
if not hasIns:
# use all the flavors for the main arch first
for depList in arch.currentArch:
for flavor in self.flavor:
insSet = deps.Flavor()
for dep in depList:
insSet.addDep(deps.InstructionSetDependency, dep)
newFlavor = flavor.copy()
newFlavor.union(insSet)
newFlavors.append(newFlavor)
self.flavor = newFlavors
# buildFlavor is installFlavor + overrides
self.buildFlavor = deps.overrideFlavor(self.flavor[0],
self.buildFlavor)
if self.isDefault('flavorPreferences'):
self.flavorPreferences = arch.getFlavorPreferencesFromFlavor(
self.flavor[0])
self.flavorConfig.populateBuildFlags()
def selectSignatureKey(cfg, label):
if not cfg.signatureKeyMap:
return cfg.signatureKey
label = str(label)
if "local@local" in label:
label = str(cfg.buildLabel)
for sigLabel, fingerprint in cfg.signatureKeyMap:
if re.match(sigLabel, label):
return fingerprint
return cfg.signatureKey
def emitEntitlement(serverName, className = None, key = None, timeout = None,
retryOnTimeout = None):
# XXX This probably should be emitted using a real XML DOM writer,
# but this will probably do for now. And yes, all that mess is required
# to be well-formed and valid XML.
if className is None:
classInfo = ""
else:
classInfo = "<class>%s</class>" % className
s = """<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<!DOCTYPE entitlement [
<!ELEMENT entitlement (server, class, key)>
<!ELEMENT server (#PCDATA)>
<!ELEMENT class (#PCDATA)>
<!ELEMENT key (#PCDATA)>
<!ELEMENT timeout EMPTY>
<!ATTLIST
timeout retry (True|False) "True"
val CDATA #IMPLIED>
]>
<entitlement>
<server>%s</server>
%s
<key>%s</key>
""" % (serverName, classInfo, key)
if timeout is not None or retryOnTimeout is not None:
s += " <timeout "
if timeout is not None:
s += 'val="%d" ' % timeout
if retryOnTimeout:
s += 'retry="True" '
elif retryOnTimeout is not None:
s += 'retry="False" '
s += '/>\n'
s += "</entitlement>\n"
return s
def loadEntitlementFromString(xmlContent, *args, **kw):
# handle old callers
source=kw.get('source', '<override>')
serverName = kw.get('serverName', None)
if len(args):
if len(args) == 1:
source = args[0]
elif len(args) == 2:
serverName = args[0]
source = args[1]
else:
raise TypeError('loadEntitlementFromString() takes exactly 1 argument (%d given)' %len(args))
if serverName:
import warnings
warnings.warn("The serverName argument to loadEntitlementFromString "
"has been deprecated", DeprecationWarning)
returnTimeout = kw.pop('returnTimeout', False)
p = EntitlementParser()
# wrap this in an <entitlement> top level tag (making it optional
# [but recommended!] in the entitlement itself)
#
# XXX This synthetic wrapping should probably be made obsolete; everyone
# should use emitEntitlement, which does the right thing.
try:
if '<entitlement>' not in xmlContent:
p.parse("<entitlement>" + xmlContent + "</entitlement>")
else:
p.parse(xmlContent)
try:
entClass = p.get('class', None)
entKey = p['key']
except KeyError:
raise errors.ConaryError("Entitlement incomplete. Entitlements"
" must include 'server', 'class', and"
" 'key' values")
except Exception, err:
raise errors.ConaryError("Malformed entitlement at %s:"
" %s" % (source, err))
if returnTimeout:
return (p['server'], entClass, entKey, p['timeout'], p['retry'])
return (p['server'], entClass, entKey)
def loadEntitlementFromProgram(fullPath, serverName):
""" Executes the given file to generate an entitlement.
The executable must print to stdout a full valid entitlement xml
blob.
"""
readFd, writeFd = os.pipe()
stdErrRead, stdErrWrite = os.pipe()
childPid = os.fork()
if not childPid:
nullFd = os.open("/dev/null", os.O_RDONLY)
try:
try:
os.close(readFd)
# switch stdin to /dev/null
os.dup2(nullFd, 0)
os.close(nullFd)
# both error and stderr are redirected - the entitlement
# should be on stdout, and error info should be
# on stderr.
os.dup2(writeFd, 1)
os.dup2(stdErrWrite, 2)
os.close(writeFd)
os.close(stdErrWrite)
util.massCloseFileDescriptors(3, 252)
os.execl(fullPath, fullPath, serverName)
except Exception:
traceback.print_exc(sys.stderr)
finally:
os._exit(1)
os.close(writeFd)
os.close(stdErrWrite)
# read in from pipes. When they're closed,
# the child process should have exited.
output = []
errorOutput = []
buf = os.read(readFd, 1024)
errBuf = os.read(stdErrRead, 1024)
while buf or errBuf:
if buf:
output.append(buf)
buf = os.read(readFd, 1024)
if errBuf:
errorOutput.append(errBuf)
errBuf = os.read(stdErrRead, 1024)
pid, status = os.waitpid(childPid, 0)
os.close(readFd)
os.close(stdErrRead)
errMsg = ''
if os.WIFEXITED(status) and os.WEXITSTATUS(status):
errMsg = ('Entitlement generator at "%s"'
' died with exit status %d' % (fullPath,
os.WEXITSTATUS(status)))
elif os.WIFSIGNALED(status):
errMsg = ('Entitlement generator at "%s"'
' died with signal %d' % (fullPath, os.WTERMSIG(status)))
else:
errMsg = ''
if errMsg:
if errorOutput:
errMsg += ' - stderr output follows:\n%s' % ''.join(errorOutput)
else:
errMsg += ' - no output on stderr'
raise errors.ConaryError(errMsg)
# looks like we generated an entitlement - they're still the possibility
# that the entitlement is broken.
xmlContent = ''.join(output)
return loadEntitlementFromString(xmlContent, fullPath)
def loadEntitlement(dirName, serverName):
if not dirName:
# XXX
# this is a hack for the repository server which doesn't support
# entitlements, but needs to stop cross talking anyway
return None
fullPath = os.path.join(dirName, serverName)
if not os.access(fullPath, os.R_OK):
return None
if os.access(fullPath, os.X_OK):
return loadEntitlementFromProgram(fullPath,
'<executable %s>' % fullPath)
elif os.access(fullPath, os.R_OK):
return loadEntitlementFromString(open(fullPath).read(), fullPath)
else:
return None
class EntitlementParser(dict):
def StartElementHandler(self, name, attrs):
if name not in [ 'entitlement', 'server', 'class', 'key', 'timeout' ]:
raise SyntaxError
self.state.append((str(name), attrs))
self.data = None
def EndElementHandler(self, name):
state, attrs = self.state.pop()
if state == 'timeout':
self['retry'] = (str(attrs['retry']) == 'True')
if 'val' in attrs:
self['timeout'] = int(attrs['val'])
else:
# str() converts from unicode
self[state] = str(self.data)
def CharacterDataHandler(self, data):
self.data = data
def parse(self, s):
self.state = []
return self.p.Parse(s)
def __init__(self):
self.p = xml.parsers.expat.ParserCreate()
self.p.StartElementHandler = self.StartElementHandler
self.p.EndElementHandler = self.EndElementHandler
self.p.CharacterDataHandler = self.CharacterDataHandler
dict.__init__(self)
self['retry'] = True
self['timeout'] = None
def getProxyFromConfig(cfg):
"""Get the proper proxy configuration variable from the supplied config
object"""
# Is there a conaryProxy defined?
proxy = {}
for k, v in cfg.conaryProxy.iteritems():
# Munge http.* to conary.* to flag the transport layer that
# we're using a Conary proxy
v = 'conary' + v[4:]
proxy[k] = v
if proxy:
return proxy
return cfg.proxy
def getProxyMap(cfg):
"""
Return the proxyMap, or create it from old-style proxy/conaryProxy
entries.
"""
if cfg.proxyMap:
return cfg.proxyMap
# This creates a new proxyMap instance. We don't want to override the
# config's proxyMap, since old consumers of the API may modify the settings
# in-place and expect the changes to take effect.
proxyDict = urllib.getproxies()
proxyDict.update(cfg.proxy)
if hasattr(cfg, 'conaryProxy'):
for scheme, url in cfg.conaryProxy.items():
if url.startswith('http:'):
url = 'conary:' + url[5:]
elif url.startswith('https:'):
url = 'conarys:' + url[6:]
proxyDict[scheme] = url
return proxy_map.ProxyMap.fromDict(proxyDict)
|
|
# This is based on https://github.com/cpopp/MicroTelnetServer
#
# Major modifications started by ulno (http://ulno.net) on 2017-09-15
# adding chacha encryption
MAGIC = b"UlnoIOT-NetREPL:"
import uiot._cfg as _cfg
import time, socket, network, uos, machine, micropython, errno, ubinascii
from crypt_socket import Crypt_Socket
# debug
# from ulnoiot import *
# import ulnoiot.shield.devkit1_display
# dp=devices["dp1"]
_key = None
_crypt_socket = None
_server_socket = None
_flush_timer = None
_telnetwrapper = None
# Provide necessary functions for dupterm and replace telnet control characters that come in.
class TelnetWrapper():
BUFFER_SIZE = 192
INTERVAL = 20 # reaction time in ms for buffering
def __init__(self, crypt_socket):
self.cs = crypt_socket
self.in_buffer = None
self.in_fill = 0
self.in_process = 0
self.out_buffer = bytearray(self.BUFFER_SIZE)
self.out_fill = 0
self.out_last_sent = time.ticks_ms()
self.out_buffer_lock = False
def readinto(self, b):
# TODO: check that this is non-blocking
# return None, when no data available
# else return the number of bytes read.
if len(b) == 0:
print("readinfo: empty buffer")
return None # TODO:maybe we should then return 0?
if self.in_process == self.in_fill: # more data needed
(self.in_buffer, self.in_fill) = self.cs.receive()
# print("r:",self.in_buffer[0:self.in_fill]) # debug
self.in_process = 0
if self.in_process < self.in_fill:
r = self.in_buffer[self.in_process]
if r == 0x1e: # close requested
self.close()
return None
else:
b[0] = r
# print("read",b[0])
self.in_process += 1
# if self.in_process < self.in_fill: return 1 # just handing over 1 byte
return 1 # just handed over 1 byte
return None # couldn't read anything
def acquire_out_buffer(self):
while self.out_buffer_lock == True:
time.sleep_ms(1) # Wait for release
self.irqstate = machine.disable_irq()
if self.out_buffer_lock == True: # TODO: check if this locking is enough
machine.enable_irq(self.irqstate)
return False
self.out_buffer_lock = True
return True
def release_out_buffer(self):
self.out_buffer_lock = False
machine.enable_irq(self.irqstate)
def _send(self):
# TODO: does this need to be unblocking?
# dp.println("s {},{},{}".format(self.out_fill,int(self.out_buffer[0]),int(self.out_buffer[1]))) # debug
self.cs.send(self.out_buffer, length=self.out_fill)
self.out_fill = 0
self.out_last_sent = time.ticks_ms()
def _write1(self, byte):
self.acquire_out_buffer()
# if byte == 0 or byte == 0xff: return # TODO: debug
# if byte != 10 and byte != 13 and (byte < 32 or byte > 127): return # TODO: debug
# if byte==0 or byte>127: return # not sending this
self.out_buffer[self.out_fill] = byte
# dp.println("f1 {},{}".format(self.out_fill, self.MAXFILL))
self.out_fill += 1
# dp.println("f2 {},{}".format(self.out_fill, self.MAXFILL))
if self.out_fill >= self.BUFFER_SIZE:
# dp.println("f3 {},{}".format(self.out_fill,self.MAXFILL))
self._send()
self.release_out_buffer()
def flush(self):
t = time.ticks_ms()
if self.out_fill == 0: # reset time, if there is nothing to send
self.out_last_sent = t
# debug dp.println("rt {}".format(t))
elif time.ticks_diff(t, self.out_last_sent) > self.INTERVAL:
# debug dp.println("t {},{},{}".format(time.ticks_diff(t,self.out_last_sent),
# t,self.out_last_sent))
self.acquire_out_buffer()
self._send()
self.release_out_buffer()
def write(self, data):
# sadly not called without input, makes buffering tricky
# requires the scheduled flush
for byte in data:
self._write1(byte)
self.flush()
def close(self, report=False):
# TODO: empty network buffers first?
if report:
self.write(b"\x1e")
micropython.schedule(stop_client, 0) # we are maybe in interrupt
def flush(t):
# callback for timer to flush buffer (scheduled and safe to execute)
global _telnetwrapper
_telnetwrapper.flush()
init_flush_timer()
def _flush_critical(t):
# callback for timer to flush buffer (called in interrupt)
micropython.schedule(flush, 0)
def init_flush_timer():
global _flush_timer
if _flush_timer is None:
_flush_timer = machine.Timer("netrepl")
else:
_flush_timer.deinit()
_flush_timer.init(period=TelnetWrapper.INTERVAL,
mode=machine.Timer.ONE_SHOT,
callback=_flush_critical)
# Attach new clients to dupterm and
# send telnet control characters to disable line mode
# and stop local echoing
def accept_telnet_connect(telnet_server):
global _crypt_socket, _key, _telnetwrapper
if _crypt_socket is not None:
# close any previous clients
uos.dupterm(None)
_crypt_socket.close()
client_socket, remote_addr = telnet_server.accept()
print("\nnetrepl: Connection request from:", remote_addr[0])
# prepare answer channel
client_socket.setblocking(False)
client_socket.setsockopt(socket.SOL_SOCKET, 20, uos.dupterm_notify)
# read magic and initialization in first 2s
readbytes = 0
start_t = time.ticks_ms()
block = bytearray(24)
while readbytes < 24:
try:
received = client_socket.recv(1)
if received and len(received) > 0:
block[readbytes] = received[0]
readbytes += 1
except OSError as e:
if len(e.args) > 0 \
and e.args[0] == errno.EAGAIN:
pass # try eventually again
else:
raise
if time.ticks_diff(time.ticks_ms(), start_t) >= 2000:
break
_crypt_socket = Crypt_Socket(client_socket)
if readbytes == 24 and block[0:16] == MAGIC:
print("netrepl: Received initialization request and vector.")
# setup input encryption
_crypt_socket.init_in(_key, block[16:24])
# read (now encrypted) magic word (16byte="UlnoIOT-NetREPL:"),
# key (32byte=256bit) and iv (8byte=64bit)
# but encrypted
# init=decrypt_receive(last_client_socket,64,2000) # 2s timeout for init
(init, l) = _crypt_socket.receive(request=56, timeoutms=2000) # 2s timeout for init
if l == 56 and init[0:16] == MAGIC: # Magic correct
print("netrepl: Initial handshake succeeded, received session key.\n")
# use rest for output key
_crypt_socket.init_out(init[16:48], init[48:56])
# print in terminal: last_client_socket.sendall(bytes([255, 252, 34])) # dont allow line mode
# print in terminal: last_client_socket.sendall(bytes([255, 251, 1])) # turn off local echo
_telnetwrapper = TelnetWrapper(_crypt_socket)
uos.dupterm(_telnetwrapper)
# construct timer to flush buffers often enough
init_flush_timer()
return
# something went wrong
client_socket.sendall('\nnetrepl: Wrong protocol for ulnoiot netrepl.\n')
print("\nWrong protocol for this client. Closing.\n")
_crypt_socket.close()
_crypt_socket = None
def stop_client(t=None): # allow random parameter for being scheduled
global _crypt_socket, _flush_timer
uos.dupterm(None)
if _crypt_socket:
_crypt_socket.close()
_crypt_socket = None
print("\nnetrepl: Connection closed.\n")
if _flush_timer:
_flush_timer.deinit()
def stop():
global _server_socket
stop_client()
if _server_socket:
_server_socket.close()
# start listening for telnet connections on port 23
def start(port=23, key=None, nostop=False): # TODO: take simpler default key as it will be reset
global _server_socket, _key
if nostop: # we want to check if it's already running and not restart it
if _server_socket: # not none
return # no new intialization _> stop here
stop()
if key is None:
key = _cfg.config.netrepl
if key is None or len(key) == 0:
key = bytearray(32) # empty default key
elif len(key) == 64:
key = ubinascii.unhexlify(key)
_key = key
# will be initialized after connection
# cc_out = chacha.ChaCha(key, bytearray(8))
_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ai = socket.getaddrinfo("0.0.0.0", port)
addr = ai[0][4]
_server_socket.bind(addr)
_server_socket.listen(1)
_server_socket.setsockopt(socket.SOL_SOCKET, 20, accept_telnet_connect)
for i in (network.AP_IF, network.STA_IF):
wlan = network.WLAN(i)
if wlan.active():
print("\nnetrepl: UlnoIOT netrepl server started on {}:{}".format(wlan.ifconfig()[0], port))
connect = start # also access as connect
# write config and connect
def setup(key, reset=True):
global netrepl_config
if len(key) != 64:
print("Key needs to be 64 bytes hex-code (256bit key).")
return
_cfg.netrepl(key)
print("Updated netrepl config.")
if reset:
print("Resetting netrepl in 3 seconds.")
time.sleep(3)
start()
else:
print("Netrepl not restarted. Call start manually to restart.")
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
"""
String manipulation functions
"""
from __future__ import absolute_import
import binascii
import functools
import os
import re
import string
import unicodedata
from enum import Enum
from itertools import chain
from operator import attrgetter
from uuid import uuid4
import bleach
import email_validator
import markdown
import translitcodec # this is NOT unused. it needs to be imported to register the codec.
from html2text import HTML2Text
from jinja2.filters import do_striptags
from lxml import etree, html
from markupsafe import Markup, escape
from speaklater import _LazyString, is_lazy_string
from sqlalchemy import ForeignKeyConstraint, inspect
# basic list of tags, used for markdown content
BLEACH_ALLOWED_TAGS = bleach.ALLOWED_TAGS + [
'sup', 'sub', 'small', 'br', 'p', 'table', 'thead', 'tbody', 'th', 'tr', 'td', 'img', 'hr', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'pre', 'dl', 'dd', 'dt'
]
BLEACH_ALLOWED_ATTRIBUTES = dict(bleach.ALLOWED_ATTRIBUTES, img=['src', 'alt', 'style'])
# extended list of tags, used for HTML content
BLEACH_ALLOWED_TAGS_HTML = BLEACH_ALLOWED_TAGS + [
'address', 'area', 'bdo', 'big', 'caption', 'center', 'cite', 'col', 'colgroup', 'del', 'dfn', 'dir', 'div',
'fieldset', 'font', 'ins', 'kbd', 'legend', 'map', 'menu', 'q', 's', 'samp', 'span', 'strike', 'tfoot', 'tt', 'u',
'var'
]
# yuck, this is ugly, but all these attributes were allowed in legacy...
BLEACH_ALLOWED_ATTRIBUTES_HTML = dict(BLEACH_ALLOWED_ATTRIBUTES, **{'*': [
'align', 'abbr', 'alt', 'border', 'bgcolor', 'class', 'cellpadding', 'cellspacing', 'color', 'char', 'charoff',
'cite', 'clear', 'colspan', 'compact', 'dir', 'disabled', 'face', 'href', 'height', 'headers', 'hreflang', 'hspace',
'id', 'ismap', 'lang', 'name', 'noshade', 'nowrap', 'rel', 'rev', 'rowspan', 'rules', 'size', 'scope', 'shape',
'span', 'src', 'start', 'style', 'summary', 'tabindex', 'target', 'title', 'type', 'valign', 'value', 'vspace',
'width', 'wrap'
]})
BLEACH_ALLOWED_STYLES_HTML = [
'background-color', 'border-top-color', 'border-top-style', 'border-top-width', 'border-top', 'border-right-color',
'border-right-style', 'border-right-width', 'border-right', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-bottom', 'border-left-color', 'border-left-style', 'border-left-width',
'border-left', 'border-color', 'border-style', 'border-width', 'border', 'bottom', 'border-collapse',
'border-spacing', 'color', 'clear', 'clip', 'caption-side', 'display', 'direction', 'empty-cells', 'float',
'font-size', 'font-family', 'font-style', 'font', 'font-variant', 'font-weight', 'font-size-adjust', 'font-stretch',
'height', 'left', 'list-style-type', 'list-style-position', 'line-height', 'letter-spacing', 'marker-offset',
'margin', 'margin-left', 'margin-right', 'margin-top', 'margin-bottom', 'max-height', 'min-height', 'max-width',
'min-width', 'marks', 'overflow', 'outline-color', 'outline-style', 'outline-width', 'outline', 'orphans',
'position', 'padding-top', 'padding-right', 'padding-bottom', 'padding-left', 'padding', 'page', 'page-break-after',
'page-break-before', 'page-break-inside', 'quotes', 'right', 'size', 'text-align', 'top', 'table-layout',
'text-decoration', 'text-indent', 'text-shadow', 'text-transform', 'unicode-bidi', 'visibility', 'vertical-align',
'width', 'widows', 'white-space', 'word-spacing', 'word-wrap', 'z-index'
]
LATEX_MATH_PLACEHOLDER = u"\uE000"
def encode_if_unicode(s):
if isinstance(s, _LazyString) and isinstance(s.value, unicode):
s = unicode(s)
return s.encode('utf-8') if isinstance(s, unicode) else s
def safe_upper(text):
if isinstance(text, unicode):
return text.upper()
else:
return text.decode('utf-8').upper().encode('utf-8')
def remove_accents(text, reencode=True):
if not isinstance(text, unicode):
text = text.decode('utf-8')
result = u''.join((c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn'))
if reencode:
return result.encode('utf-8')
else:
return result
def fix_broken_string(text, as_unicode=False):
try:
text = text.decode('utf-8')
except UnicodeDecodeError:
try:
text = text.decode('latin1')
except UnicodeDecodeError:
text = unicode(text, 'utf-8', errors='replace')
return text if as_unicode else text.encode('utf-8')
def to_unicode(text):
"""Converts a string to unicode if it isn't already unicode."""
return fix_broken_string(text, as_unicode=True) if isinstance(text, str) else unicode(text)
def remove_non_alpha(text):
return ''.join(c for c in text if c.isalnum())
def unicode_to_ascii(text):
if not isinstance(text, unicode):
text = to_unicode(text)
text = text.encode('translit/long')
return text.encode('ascii', 'ignore')
def strict_unicode(value):
"""Convert a value to unicode or fails if it is None.
Useful when converting e.g. IDs to path segments. Usually they
should not be ``None`` so we do not want to fail silently (and end
up with a literal ``None`` in the path).
"""
if value is None:
raise TypeError('strict_unicode does not accept `None`')
return unicode(value)
def slugify(*args, **kwargs):
"""Joins a series of strings into a URL slug.
- normalizes unicode to proper ascii repesentations
- removes non-alphanumeric characters
- replaces whitespace with dashes
:param lower: Whether the slug should be all-lowercase
:param maxlen: Maximum slug length
:param fallback: Fallback in case of an empty slug
"""
lower = kwargs.get('lower', True)
maxlen = kwargs.get('maxlen')
fallback = kwargs.get('fallback', '')
value = u'-'.join(to_unicode(val) for val in args)
value = value.encode('translit/long')
value = re.sub(r'[^\w\s-]', u'', value).strip()
if lower:
value = value.lower()
value = re.sub(r'[-\s]+', u'-', value)
if maxlen:
value = value[0:maxlen].rstrip(u'-')
return value or fallback
def return_ascii(f):
"""Decorator to normalize all unicode characters.
This is useful for __repr__ methods which **MUST** return a plain string to
avoid encoding to utf8 or ascii all the time."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return unicode_to_ascii(f(*args, **kwargs))
return wrapper
def truncate(text, max_size, ellipsis='...', encoding='utf-8'):
"""
Truncate text, taking unicode chars into account
"""
encode = False
if isinstance(text, str):
encode = True
text = text.decode(encoding)
if len(text) > max_size:
text = text[:max_size] + ellipsis
if encode:
text = text.encode(encoding)
return text
def strip_tags(text):
"""Strip HTML tags and replace adjacent whitespace by one space."""
encode = False
if isinstance(text, str):
encode = True
text = text.decode('utf-8')
text = do_striptags(text)
return text.encode('utf-8') if encode else text
def render_markdown(text, escape_latex_math=True, md=None, **kwargs):
""" Mako markdown to HTML filter
:param text: Markdown source to convert to HTML
:param escape_latex_math: Whether math expression should be left untouched or a function that will be called
to replace math-mode segments.
:param md: An alternative markdown processor (can be used
to generate e.g. a different format)
:param kwargs: Extra arguments to pass on to the markdown
processor
"""
if escape_latex_math:
math_segments = []
def _math_replace(m):
segment = m.group(0)
if callable(escape_latex_math):
segment = escape_latex_math(segment)
math_segments.append(segment)
return LATEX_MATH_PLACEHOLDER
text = re.sub(r'\$[^\$]+\$|\$\$(^\$)\$\$', _math_replace, to_unicode(text))
if md is None:
result = bleach.clean(markdown.markdown(text, **kwargs), tags=BLEACH_ALLOWED_TAGS,
attributes=BLEACH_ALLOWED_ATTRIBUTES)
else:
result = md(text, **kwargs)
if escape_latex_math:
return re.sub(LATEX_MATH_PLACEHOLDER, lambda _: math_segments.pop(0), result)
else:
return result
def sanitize_for_platypus(text):
"""Sanitize HTML to be used in platypus"""
tags = ['b', 'br', 'em', 'font', 'i', 'img', 'strike', 'strong', 'sub', 'sup', 'u', 'span', 'div', 'p']
attrs = {
'font': ['size', 'face', 'color'],
'img': ['src', 'width', 'height', 'valign']
}
res = bleach.clean(text, tags=tags, attributes=attrs, strip=True).strip()
if not res:
return ''
# Convert to XHTML
doc = html.fromstring(res)
return etree.tostring(doc)
def is_valid_mail(emails_string, multi=True):
# XXX: This is deprecated, use `validate_email` or `validate_emails` instead!
# Remove this in 2.2 when the 'multi' mode is not needed anymore (only used in RB)
# and don't forget to update the paypal plugin as well!
if not emails_string:
return False
return validate_emails(emails_string) if multi else validate_email(emails_string)
def validate_email(email):
"""Validate the given email address.
This checks both if it looks valid and if it has valid
MX (or A/AAAA) records.
"""
email = to_unicode(email)
try:
email_validator.validate_email(email)
except email_validator.EmailNotValidError:
return False
else:
return True
def validate_emails(emails):
"""Validate a space/semicolon/comma-separated list of email addresses."""
emails = to_unicode(emails)
emails = re.split(r'[\s;,]+', emails)
return all(validate_email(email) for email in emails if email)
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower() for text in re.split(_nsre, s)]
def seems_html(text):
return re.search(r'<[a-z]+?>', text) is not None
def strip_control_chars(text):
return re.sub(r'[\x0B-\x1F]', '', text)
def html_color_to_rgb(hexcolor):
"""
convert #RRGGBB to an (R, G, B) tuple
"""
if not hexcolor.startswith('#'):
raise ValueError("Invalid color string '{}' (should start with '#')".format(hexcolor))
hexcolor = hexcolor[1:]
if len(hexcolor) not in {3, 6}:
raise ValueError("'#{}'' is not in #RRGGBB or #RGB format".format(hexcolor))
if len(hexcolor) == 3:
hexcolor = ''.join(c * 2 for c in hexcolor)
return tuple(float(int(hexcolor[i:i + 2], 16)) / 255 for i in range(0, 6, 2))
def strip_whitespace(s):
"""Removes trailing/leading whitespace if a string was passed.
This utility is useful in cases where you might get None or
non-string values such as WTForms filters.
"""
if isinstance(s, basestring):
s = s.strip()
return s
def make_unique_token(is_unique):
"""Create a unique UUID4-based token
:param is_unique: a callable invoked with the token which should
return a boolean indicating if the token is actually
"""
token = unicode(uuid4())
while not is_unique(token):
token = unicode(uuid4())
return token
def encode_utf8(f):
@functools.wraps(f)
def _wrapper(*args, **kwargs):
rv = f(*args, **kwargs)
if not rv:
return ''
if is_lazy_string(rv):
rv = rv.value
return rv.encode('utf-8') if isinstance(rv, unicode) else str(rv)
return _wrapper
def is_legacy_id(id_):
"""Checks if an ID is a broken legacy ID.
These IDs are not compatible with new code since they are not
numeric or have a leading zero, resulting in different objects
with the same numeric id.
"""
return not isinstance(id_, (int, long)) and (not id_.isdigit() or str(int(id_)) != id_)
def text_to_repr(text, html=False, max_length=50):
"""Converts text to a suitable string for a repr
:param text: A string which might contain html and/or linebreaks
:param html: If True, HTML tags are stripped.
:param max_length: The maximum length before the string is
truncated. Use ``None`` to disable.
:return: A string that contains no linebreaks or HTML tags.
"""
if text is None:
text = u''
if html:
text = bleach.clean(text, tags=[], strip=True)
text = re.sub(r'\s+', u' ', text)
if max_length is not None and len(text) > max_length:
text = text[:max_length] + u'...'
return text.strip()
def alpha_enum(value):
"""Convert integer to ordinal letter code (a, b, c, ... z, aa, bb, ...)."""
max_len = len(string.ascii_lowercase)
return unicode(string.ascii_lowercase[value % max_len] * (value / max_len + 1))
def format_repr(obj, *args, **kwargs):
"""Creates a pretty repr string from object attributes
:param obj: The object to show the repr for.
:param args: The names of arguments to include in the repr.
The arguments are shown in order using their unicode
representation.
:param kwargs: Each kwarg is included as a ``name=value`` string
if it doesn't match the provided value. This is
mainly intended for boolean attributes such as
``is_deleted`` where you don't want them to
clutter the repr unless they are set.
:param _text: When the keyword argument `_text` is provided and
not ``None``, it will include its value as extra
text in the repr inside quotes. This is useful
for objects which have one longer title or text
that doesn't look well in the unquoted
comma-separated argument list.
:param _rawtext: Like `_text` but without surrounding quotes.
:param _repr: Similar as `_text`, but uses the `repr()` of the
passed object instead of quoting it. Cannot be
used together with `_text`.
"""
def _format_value(value):
if isinstance(value, Enum):
return value.name
else:
return value
text_arg = kwargs.pop('_text', None)
raw_text_arg = kwargs.pop('_rawtext', None)
repr_arg = kwargs.pop('_repr', None)
cls = type(obj)
obj_name = cls.__name__
fkeys = set(chain.from_iterable(c.column_keys
for t in inspect(cls).tables
for c in t.constraints
if isinstance(c, ForeignKeyConstraint))) if hasattr(cls, '__table__') else set()
formatted_args = [unicode(_format_value(getattr(obj, arg)))
if arg not in fkeys
else u'{}={}'.format(arg, _format_value(getattr(obj, arg)))
for arg in args]
for name, default_value in sorted(kwargs.items()):
value = getattr(obj, name)
if value != default_value:
formatted_args.append(u'{}={}'.format(name, _format_value(value)))
if text_arg is not None:
return u'<{}({}): "{}">'.format(obj_name, u', '.join(formatted_args), text_arg)
elif raw_text_arg is not None:
return u'<{}({}): {}>'.format(obj_name, u', '.join(formatted_args), raw_text_arg)
elif repr_arg is not None:
return u'<{}({}): {!r}>'.format(obj_name, u', '.join(formatted_args), repr_arg)
else:
return u'<{}({})>'.format(obj_name, u', '.join(formatted_args))
def snakify(name):
"""Converts a camelCased name to snake_case"""
# from http://stackoverflow.com/a/1176023/298479
name = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', name).lower()
def camelize(name):
"""Converts a snake_cased name to camelCase."""
parts = name.split('_')
underscore = ''
if name.startswith('_'):
underscore = '_'
parts = parts[1:]
return underscore + parts[0] + ''.join(x.title() for x in parts[1:])
def _convert_keys(value, convert_func):
if isinstance(value, (list, tuple)):
return type(value)(_convert_keys(x, convert_func) for x in value)
elif not isinstance(value, dict):
return value
return {convert_func(k): _convert_keys(v, convert_func) for k, v in value.iteritems()}
def camelize_keys(dict_):
"""Convert the keys of a dict to camelCase"""
return _convert_keys(dict_, camelize)
def snakify_keys(dict_):
"""Convert the keys of a dict to snake_case"""
return _convert_keys(dict_, snakify)
def crc32(data):
"""Calculates a CRC32 checksum.
When a unicode object is passed, it is encoded as UTF-8.
"""
if isinstance(data, unicode):
data = data.encode('utf-8')
return binascii.crc32(data) & 0xffffffff
def normalize_phone_number(value):
"""Normalize phone number so it doesn't contain invalid characters
This removes all characters besides a leading +, digits and x as
described here: http://stackoverflow.com/a/123681/298479
"""
return re.sub(r'((?!^)\+)|[^0-9x+]', '', value.strip())
def format_full_name(first_name, last_name, title=None, last_name_first=True, last_name_upper=True,
abbrev_first_name=True, show_title=False):
"""Returns the user's name in the specified notation.
Note: Do not use positional arguments (except for the names/title)
when calling this method. Always use keyword arguments!
:param first_name: The first name (may be empty)
:param last_name: The last name
:param title: The title (may be empty/None)
:param last_name_first: if "lastname, firstname" instead of
"firstname lastname" should be used
:param last_name_upper: if the last name should be all-uppercase
:param abbrev_first_name: if the first name should be abbreviated to
use only the first character
:param show_title: if the title should be included
"""
if last_name_upper:
last_name = last_name.upper()
if not first_name:
full_name = last_name
else:
first_name = u'{}.'.format(first_name[0].upper()) if abbrev_first_name else first_name
full_name = u'{}, {}'.format(last_name, first_name) if last_name_first else u'{} {}'.format(first_name,
last_name)
return full_name if not show_title or not title else u'{} {}'.format(title, full_name)
def sanitize_email(email, require_valid=False):
if '<' in email:
m = re.search(r'<([^>]+)>', email)
email = email if m is None else m.group(1)
if not require_valid or validate_email(email):
return email
else:
return None
def sanitize_html(string):
return bleach.clean(string, tags=BLEACH_ALLOWED_TAGS_HTML, attributes=BLEACH_ALLOWED_ATTRIBUTES_HTML,
styles=BLEACH_ALLOWED_STYLES_HTML)
def html_to_plaintext(string):
return html.html5parser.fromstring(string).xpath('string()')
def inject_unicode_debug(s, level=1):
"""
Wrap a string in invisible unicode characters to trigger a unicode
error when erroneously mixing unicode and bytestrings. If unicode
debug mode is not enabled, this function returns its argument
without touching it.
:param s: a unicode string
:param level: the minimum unicode debug level needed to inject
the spaces. the more likely it is to break things
the higher it should be.
"""
# Enabling unicode debugging injects an invisible zero-width space at the
# beginning and end of every translated string. This will cause errors in case
# of implicit conversions from bytes to unicode or vice versa instead of
# silently succeeding for english (ascii) strings and then failing in languages
# where the same string is not plain ascii. This setting should be enabled only
# during development and never in production.
# Level 1 will inject it only in translated strings and is usually safe while
# level 2 will inject it in formatted date/time values too which may result in
# strange/broken behavior in certain form fields.
try:
unicode_debug_level = int(os.environ.get('INDICO_UNICODE_DEBUG', '0'))
except ValueError:
unicode_debug_level = 0
if unicode_debug_level < level:
return s
else:
return u'\N{ZERO WIDTH SPACE}' + s + u'\N{ZERO WIDTH SPACE}'
class RichMarkup(Markup):
"""unicode/Markup subclass that detects preformatted text
Note that HTML in this string will NOT be escaped when displaying
it in a jinja template.
"""
__slots__ = ('_preformatted',)
def __new__(cls, content=u'', preformatted=None):
obj = Markup.__new__(cls, content)
if preformatted is None:
tmp = content.lower()
obj._preformatted = not any(tag in tmp for tag in (u'<p>', u'<p ', u'<br', u'<li>'))
else:
obj._preformatted = preformatted
return obj
def __html__(self):
# XXX: ensure we have no harmful HTML - there are certain malicious values that
# are not caught by the legacy sanitizer that runs at submission time
string = RichMarkup(sanitize_html(unicode(self)), preformatted=self._preformatted)
if string._preformatted:
return u'<div class="preformatted">{}</div>'.format(string)
else:
return string
def __getstate__(self):
return {slot: getattr(self, slot) for slot in self.__slots__ if hasattr(self, slot)}
def __setstate__(self, state):
for slot, value in state.iteritems():
setattr(self, slot, value)
class MarkdownText(Markup):
"""unicode/Markup class that renders markdown."""
def __html__(self):
return render_markdown(unicode(self), extensions=('nl2br', 'tables'))
class PlainText(Markup):
"""unicode/Markup class that renders plain text."""
def __html__(self):
return u'<div class="preformatted">{}</div>'.format(escape(unicode(self)))
def handle_legacy_description(field, obj, get_render_mode=attrgetter('render_mode'),
get_value=attrgetter('_description')):
"""Check if the object in question is using an HTML description and convert it.
The description will be automatically converted to Markdown and a warning will
be shown next to the field.
:param field: the WTForms field to be checked
:param obj: the object whose render mode/description will be checked
"""
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.util.i18n import _
if get_render_mode(obj) == RenderMode.html:
field.warning = _(u"This text has been automatically converted from HTML to Markdown. "
u"Please double-check that it's properly displayed.")
ht = HTML2Text(bodywidth=0)
desc = get_value(obj)
if RichMarkup(desc)._preformatted:
desc = desc.replace(u'\n', u'<br>\n')
field.data = ht.handle(desc)
|
|
#
# A class to merge quicklook qa outputs.
#
from __future__ import absolute_import, division, print_function
from desiutil.io import yamlify
import yaml
import json
import numpy as np
import datetime
import pytz
###############################################################
def remove_task(myDict, Key):
if Key in myDict:
del myDict[Key]
return myDict
###############################################################
def rename_task(myDict, oldKey, newKey):
if oldKey in myDict:
task_data = myDict[oldKey]
del myDict[oldKey]
myDict[newKey] = task_data
return myDict
###############################################################
## KeyHead = "KeyHead" or "PARAMS"
def transferKEY(myDict, KeyHead, old_task, new_task, keyList):
if old_task in myDict and new_task in myDict:
for key in keyList:
if key in myDict[old_task][KeyHead]:
data = myDict[old_task][KeyHead][key]
del myDict[old_task][KeyHead][key]
myDict[new_task][KeyHead][key] = data
return myDict
###############################################################
### Please Give the correct Re-arrangmenet recipe here ...
def modify_tasks(myDict):
################
### Moving all keys in keyList under Metrics (from PREPROC to BOXCAREXTRACT)
keyList = ["XWSIGMA", "XWSIGMA_AMP", "XWSIGMA_STATUS"]
if "EXTRACT_QP" in myDict:
myDict = transferKEY(myDict, "METRICS", "EXTRACT_QP", "PREPROC", keyList)
elif "BOXCAREXTRACT" in myDict:
myDict = transferKEY(myDict, "METRICS", "BOXCAREXTRACT", "PREPROC", keyList)
################
keyList = ["XWSIGMA_NORMAL_RANGE", "XWSIGMA_REF", "XWSIGMA_WARN_RANGE"]
if "EXTRACT_QP" in myDict:
myDict = transferKEY(myDict, "PARAMS", "EXTRACT_QP", "PREPROC",keyList)
elif "BOXCAREXTRACT" in myDict:
myDict = transferKEY(myDict, "PARAMS", "BOXCAREXTRACT", "PREPROC",keyList)
################
keyList = ["CHECKHDUS","EXPNUM","CHECKHDUS_STATUS","EXPNUM_STATUS"]
myDict = transferKEY(myDict, "METRICS", "INITIALIZE", "PREPROC", keyList)
################
keyList = ["XYSHIFTS","XYSHIFTS_STATUS"]
if "EXTRACT_QP" in myDict:
myDict = transferKEY(myDict, "METRICS", "FLEXURE", "EXTRACT_QP", keyList)
elif "BOXCAREXTRACT" in myDict:
myDict = transferKEY(myDict, "METRICS", "FLEXURE", "BOXCAREXTRACT", keyList)
################
keyList = ["XYSHIFTS_NORMAL_RANGE", "XYSHIFTS_WARN_RANGE", "XYSHIFTS_DARK_REF", "XYSHIFTS_GRAY_REF","XYSHIFTS_BRIGHT_REF"]
if "EXTRACT_QP" in myDict:
myDict = transferKEY(myDict, "PARAMS", "FLEXURE", "EXTRACT_QP", keyList)
elif "BOXCAREXTRACT" in myDict:
myDict = transferKEY(myDict, "PARAMS", "FLEXURE", "BOXCAREXTRACT", keyList)
################
keyList = ["PEAKCOUNT","PEAKCOUNT_FIB","PEAKCOUNT_NOISE","PEAKCOUNT_STATUS","SKYCONT","SKYCONT_FIBER","SKYCONT_STATUS","SKYRBAND","SKY_RFLUX_DIFF","SKY_FIB_RBAND","FIDSNR_TGT","FIDSNR_TGT_STATUS","FITCOEFF_TGT","MEDIAN_SNR","NUM_NEGATIVE_SNR","SNR_MAG_TGT","SNR_RESID","OBJLIST"]
if "APPLYFIBERFLAT_QP" in myDict:
myDict = transferKEY(myDict, "METRICS", "APPLYFIBERFLAT_QP", "SKYSUB_QP", keyList)
myDict = transferKEY(myDict, "METRICS", "SKYSUB_QP", "APPLYFLUXCALIBRATION", keyList)
elif "APPLYFIBERFLAT_QL" in myDict:
myDict = transferKEY(myDict, "METRICS", "APPLYFIBERFLAT_QL", "SKYSUB_QL", keyList)
myDict = transferKEY(myDict, "METRICS", "SKYSUB_QL", "APPLYFLUXCALIBRATION", keyList)
################
keyList = ["B_CONT","R_CONT","Z_CONT","PEAKCOUNT_NORMAL_RANGE","PEAKCOUNT_BRIGHT_REF","PEAKCOUNT_DARK_REF","PEAKCOUNT_GRAY_REF","PEAKCOUNT_WARN_RANGE","SKYCONT_NORMAL_RANGE","SKYCONT_REF","SKYCONT_WARN_RANGE","SKYCONT_BRIGHT_REF","SKYCONT_DARK_REF","SKYCONT_GRAY_REF","RESIDUAL_CUT","SIGMA_CUT","FIDSNR_TGT_NORMAL_RANGE","FIDSNR_TGT_WARN_RANGE","FIDSNR_TGT_BRIGHT_REF","FIDSNR_TGT_DARK_REF","FIDSNR_TGT_GRAY_REF","FIDMAG"]
if "APPLYFIBERFLAT_QP" in myDict:
myDict = transferKEY(myDict, "PARAMS", "APPLYFIBERFLAT_QP", "SKYSUB_QP", keyList)
myDict = transferKEY(myDict, "PARAMS", "SKYSUB_QP", "APPLYFLUXCALIBRATION", keyList)
elif "APPLYFIBERFLAT_QL" in myDict:
myDict = transferKEY(myDict, "PARAMS", "APPLYFIBERFLAT_QL", "SKYSUB_QL", keyList)
myDict = transferKEY(myDict, "PARAMS", "SKYSUB_QL", "APPLYFLUXCALIBRATION", keyList)
### Changing Task Names
myDict = rename_task(myDict, "PREPROC", "CHECK_CCDs")
myDict = rename_task(myDict, "BOXCAREXTRACT", "CHECK_FIBERS")
myDict = rename_task(myDict, "EXTRACT_QP", "CHECK_FIBERS")
myDict = rename_task(myDict, "APPLYFLUXCALIBRATION", "CHECK_SPECTRA")
myDict = rename_task(myDict, "RESOLUTIONFIT", "CHECK_ARC")
myDict = rename_task(myDict, "COMPUTEFIBERFLAT_QL", "CHECK_FIBERFLAT")
myDict = rename_task(myDict, "COMPUTEFIBERFLAT_QP", "CHECK_FIBERFLAT")
### Removing empty (or unused Pipeline steps
myDict = remove_task(myDict, "FLEXURE")
myDict = remove_task(myDict, "APPLYFIBERFLAT_QL")
myDict = remove_task(myDict, "APPLYFIBERFLAT_QP")
myDict = remove_task(myDict, "SKYSUB_QL")
myDict = remove_task(myDict, "SKYSUB_QP")
myDict = remove_task(myDict, "INITIALIZE")
return myDict
###############################################################
### Replacing "PIPELINE_STEPS" with "TASKS"
### Re-ordering Task metrics and Params
def taskMaker(myDict):
if "PIPELINE_STEPS" in myDict:
tasks = {}
task_data = myDict["PIPELINE_STEPS"]
task_data = modify_tasks(task_data)
del myDict["PIPELINE_STEPS"]
myDict["TASKS"] = task_data
return myDict
###############################################################
###################################
# GENERAL_INFO section
#def delKey(d, k, val=None, remove=True):
#if isinstance(d, dict):
#key_list = []
#for key, value in d.items():
#if key==k:
#val = value
#key_list.append(key)
#val = delKey(value, k, val=val, remove=remove)
#if remove:
#for key in key_list:
#del d[key]
#elif isinstance(d, list):
#try:
#for i in range(len(d)):
#val = delKey(d[i], k, val=val, remove=remove)
#except:
#return val
#else: return val
#return val
def delKey(d, k, val=None, remove=True, include=False):
if isinstance(d, dict):
key_list = []
for key, value in d.items():
if (key==k and not include) or (k in key and include):
val = value
key_list.append(key)
val = delKey(value, k, val=val, remove=remove)
if remove:
for key in key_list:
del d[key]
elif isinstance(d, list):
try:
for i in range(len(d)):
val = delKey(d[i], k, val=val, remove=remove)
except:
return val
else: return val
return val
###################################
# facilitate the GENERAL_INFO section
def reOrderDict(mergeDict):
for Night in mergeDict["NIGHTS"]:
for Exposure in Night["EXPOSURES"]:
for Camera in Exposure["CAMERAS"]:
ra = delKey(Camera, "RA")
dec = delKey(Camera, "DEC")
program = delKey(Camera, "PROGRAM")
airmass = delKey(Camera, "AIRMASS")
seeing = delKey(Camera, "SEEING")
exptime = delKey(Camera, "EXPTIME")
desispec_run_ver = delKey(Camera, "PROC_DESISPEC_VERSION") # desispec version in the raw FITS header
desispec_fits_ver = delKey(Camera, "FITS_DESISPEC_VERSION") # desispec version of the software release
quicklook_run_ver = delKey(Camera, "PROC_QuickLook_VERSION") # version of the quicklook development state
fibermags = delKey(Camera,"FIBER_MAGS")
skyfib_id = delKey(Camera,"SKYFIBERID")
nskyfib = delKey(Camera,"NSKY_FIB")
delKey(Camera, "SKYSUB_QL")
delKey(Camera, "MED_RESID")
delKey(Camera, "MED_RESID_FIBER")
delKey(Camera, "MED_RESID_WAVE")
delKey(Camera, "MED_RESID")
delKey(Camera, "MED_RESID_FIBER")
delKey(Camera, "RESID_PER")
delKey(Camera, "RESID_STATUS")
delKey(Camera, "BIAS")
delKey(Camera, "NOISE")
elg_fiberid = delKey(Camera, "ELG_FIBERID")
lrg_fiberid = delKey(Camera, "LRG_FIBERID")
qso_fiberid = delKey(Camera, "QSO_FIBERID")
star_fiberid = delKey(Camera, "STAR_FIBERID", remove=False)
std_fiberid = delKey(Camera, "STD_FIBERID", remove=False)
if star_fiberid is None:
star_fiberid = std_fiberid
b_peaks = delKey(Camera, "B_PEAKS")
r_peaks = delKey(Camera, "R_PEAKS")
z_peaks = delKey(Camera, "Z_PEAKS")
try: ra = [float("%.5f" % m) for m in ra]
except: ra=None
try: dec = [float("%.5f" % m) for m in dec]
except: dec=None
# Date/time of the merger i.e., QL run - time is in UTC = Mayall local time + 7h
def utcnow():
return datetime.datetime.now(tz=pytz.utc)
QLrun_datime = utcnow().isoformat()
datetime.datetime.now(datetime.timezone.utc)
datetime.datetime.now(tz=pytz.utc)
Camera["GENERAL_INFO"]={"QLrun_datime_UTC":QLrun_datime,"PROGRAM":format(program).upper(),"SEEING":seeing,"AIRMASS":airmass,"EXPTIME":exptime,"FITS_DESISPEC_VERSION":desispec_fits_ver,"PROC_DESISPEC_VERSION":desispec_run_ver,"PROC_QuickLook_VERSION":quicklook_run_ver,"RA":ra,"DEC":dec,"SKY_FIBERID":skyfib_id,"ELG_FIBERID":elg_fiberid,"LRG_FIBERID":lrg_fiberid,"QSO_FIBERID":qso_fiberid,"STAR_FIBERID":star_fiberid,"B_PEAKS":b_peaks,"R_PEAKS":r_peaks,"Z_PEAKS":z_peaks,"FIBER_MAGS":fibermags,"NSKY_FIB":nskyfib}
###################################
def EditDic(Camera):
desispec_run_ver = delKey(Camera, "PROC_DESISPEC_VERSION") # desispec version in the raw FITS header
desispec_fits_ver = delKey(Camera, "FITS_DESISPEC_VERSION") # desispec version of the software release
quicklook_run_ver = delKey(Camera, "PROC_QuickLook_VERSION") # version of the quivklook development state
delKey(Camera, "SKYSUB_QL")
delKey(Camera, "MED_RESID")
delKey(Camera, "MED_RESID_FIBER")
delKey(Camera, "MED_RESID_WAVE")
delKey(Camera, "MED_RESID")
delKey(Camera, "MED_RESID_FIBER")
delKey(Camera, "RESID_PER")
delKey(Camera, "RESID_STATUS")
delKey(Camera, "BIAS")
delKey(Camera, "NOISE")
delKey(Camera, "XWSHIFT_AMP")
delKey(Camera, "XWSIGMA_SHIFT")
delKey(Camera, "NREJ")
delKey(Camera, "MED_SKY")
delKey(Camera, "NBAD_PCHI")
all_Steps=delKey(Camera,"PIPELINE_STEPS") # returns a list of dictionaries, each holding one step
step_dict={}
for step in all_Steps:
if step['PIPELINE_STEP'] == 'INITIALIZE':
Camera['GENERAL_INFO']=delKey(step,"METRICS",remove=False,include=True)
else:
step_Name=delKey(step,"PIPELINE_STEP")
step_dict[step_Name]=step
Camera["PIPELINE_STEPS"]=step_dict
program=Camera['GENERAL_INFO']['PROGRAM']
sciprog = ["DARK","GRAY","BRIGHT"]
QAlist=["BIAS_AMP","LITFRAC_AMP","NOISE_AMP","XWSIGMA","XYSHIFTS","NGOODFIB","DELTAMAG_TGT","FIDSNR_TGT","SKYRBAND","PEAKCOUNT", "SKYCONT"]
if program in sciprog:
sciprog.remove(program)
for prog in sciprog:
for qa in QAlist:
delKey(Camera,qa+'_'+prog+"_REF",include=True)
Camera["GENERAL_INFO"]["FITS_DESISPEC_VERSION"]=desispec_fits_ver
Camera["GENERAL_INFO"]["PROC_DESISPEC_VERSION"]=desispec_run_ver
Camera["GENERAL_INFO"]["PROC_QuickLook_VERSION"]=quicklook_run_ver
###################################
class QL_QAMerger:
def __init__(self,night,expid,flavor,camera,program,convdict):
self.__night=night
self.__expid=expid
self.__flavor=flavor
self.__camera=camera
self.__program=program
self.__stepsArr=[]
#self.__schema={'NIGHTS':[{'NIGHT':night,'EXPOSURES':[{'EXPID':expid,'FLAVOR':flavor,'PROGRAM':program, 'CAMERAS':[{'CAMERA':camera, 'PIPELINE_STEPS':self.__stepsArr}]}]}]}
#general_Info = esnEditDic(self.__stepsArr)
# Get flux information from fibermap and convert to fiber magnitudes
if flavor == 'science':
if camera[0].lower()=='b':decamfilter='G'
elif camera[0].lower()=='r': decamfilter='R'
elif camera[0].lower()=='z': decamfilter='Z'
self.__schema={'NIGHT':night,'EXPID':expid,'CAMERA':camera,'FLAVOR':flavor,'PIPELINE_STEPS':self.__stepsArr}
else:
self.__schema={'NIGHT':night,'EXPID':expid,'CAMERA':camera,'FLAVOR':flavor,'PIPELINE_STEPS':self.__stepsArr}
class QL_Step:
def __init__(self,paName,paramsDict,metricsDict):
self.__paName=paName
self.__pDict=paramsDict
self.__mDict=metricsDict
def getStepName(self):
return self.__paName
def addParams(self,pdict):
self.__pDict.update(pdict)
def addMetrics(self,mdict):
self.__mDict.update(mdict)
def addPipelineStep(self,stepName):
metricsDict={}
paramsDict={}
stepDict={"PIPELINE_STEP":stepName.upper(),'METRICS':metricsDict,'PARAMS':paramsDict}
self.__stepsArr.append(stepDict)
return self.QL_Step(stepName,paramsDict,metricsDict)
def writeTojsonFile(self,fileName):
g=open(fileName,'w')
myDict = yamlify(self.__schema)
#reOrderDict(myDict)
# remove lists ... after this step there is no list of dictionaries
EditDic(myDict)
# this step modifies Takse, renames them, and re-arrange Metrics and corresponding Paramas
myDict = taskMaker(myDict)
json.dump(myDict, g, sort_keys=True, indent=4)
g.close()
|
|
from test.test_support import TESTFN, run_unittest
import mmap
import unittest
import os, re, itertools
PAGESIZE = mmap.PAGESIZE
class MmapTests(unittest.TestCase):
def setUp(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def tearDown(self):
try:
os.unlink(TESTFN)
except OSError:
pass
def test_basic(self):
# Test mmap module on Unix systems and Windows
# Create a file to be mmap'ed.
f = open(TESTFN, 'w+')
try:
# Write 2 pages worth of data to the file
f.write('\0'* PAGESIZE)
f.write('foo')
f.write('\0'* (PAGESIZE-3) )
f.flush()
m = mmap.mmap(f.fileno(), 2 * PAGESIZE)
f.close()
# Simple sanity checks
tp = str(type(m)) # SF bug 128713: segfaulted on Linux
self.assertEqual(m.find('foo'), PAGESIZE)
self.assertEqual(len(m), 2*PAGESIZE)
self.assertEqual(m[0], '\0')
self.assertEqual(m[0:3], '\0\0\0')
# Shouldn't crash on boundary (Issue #5292)
self.assertRaises(IndexError, m.__getitem__, len(m))
self.assertRaises(IndexError, m.__setitem__, len(m), '\0')
# Modify the file's content
m[0] = '3'
m[PAGESIZE +3: PAGESIZE +3+3] = 'bar'
# Check that the modification worked
self.assertEqual(m[0], '3')
self.assertEqual(m[0:3], '3\0\0')
self.assertEqual(m[PAGESIZE-1 : PAGESIZE + 7], '\0foobar\0')
m.flush()
# Test doing a regular expression match in an mmap'ed file
match = re.search('[A-Za-z]+', m)
if match is None:
self.fail('regex match on mmap failed!')
else:
start, end = match.span(0)
length = end - start
self.assertEqual(start, PAGESIZE)
self.assertEqual(end, PAGESIZE + 6)
# test seeking around (try to overflow the seek implementation)
m.seek(0,0)
self.assertEqual(m.tell(), 0)
m.seek(42,1)
self.assertEqual(m.tell(), 42)
m.seek(0,2)
self.assertEqual(m.tell(), len(m))
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -1)
# Try to seek beyond end of mmap...
self.assertRaises(ValueError, m.seek, 1, 2)
# Try to seek to negative position...
self.assertRaises(ValueError, m.seek, -len(m)-1, 2)
# Try resizing map
try:
m.resize(512)
except SystemError:
# resize() not supported
# No messages are printed, since the output of this test suite
# would then be different across platforms.
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the underlying file is truncated too
# (bug #728515)
f = open(TESTFN)
f.seek(0, 2)
self.assertEqual(f.tell(), 512)
f.close()
self.assertEqual(m.size(), 512)
m.close()
finally:
try:
f.close()
except OSError:
pass
def test_access_parameter(self):
# Test for "access" keyword parameter
mapsize = 10
open(TESTFN, "wb").write("a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_READ)
self.assertEqual(m[:], 'a'*mapsize, "Readonly memory map data incorrect.")
# Ensuring that readonly mmap can't be slice assigned
try:
m[:] = 'b'*mapsize
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be item assigned
try:
m[0] = 'b'
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write() to
try:
m.seek(0,0)
m.write('abc')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be write_byte() to
try:
m.seek(0,0)
m.write_byte('d')
except TypeError:
pass
else:
self.fail("Able to write to readonly memory map")
# Ensuring that readonly mmap can't be resized
try:
m.resize(2*mapsize)
except SystemError: # resize is not universally supported
pass
except TypeError:
pass
else:
self.fail("Able to resize readonly memory map")
f.close()
del m, f
self.assertEqual(open(TESTFN, "rb").read(), 'a'*mapsize,
"Readonly memory map data file was modified")
# Opening mmap with size too big
import sys
f = open(TESTFN, "r+b")
try:
m = mmap.mmap(f.fileno(), mapsize+1)
except ValueError:
# we do not expect a ValueError on Windows
# CAUTION: This also changes the size of the file on disk, and
# later tests assume that the length hasn't changed. We need to
# repair that.
if sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should work on Windows.")
else:
# we expect a ValueError on Unix, but not on Windows
if not sys.platform.startswith('win'):
self.fail("Opening mmap with size+1 should raise ValueError.")
m.close()
f.close()
if sys.platform.startswith('win'):
# Repair damage from the resizing test.
f = open(TESTFN, 'r+b')
f.truncate(mapsize)
f.close()
# Opening mmap with access=ACCESS_WRITE
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_WRITE)
# Modifying write-through memory map
m[:] = 'c'*mapsize
self.assertEqual(m[:], 'c'*mapsize,
"Write-through memory map memory not updated properly.")
m.flush()
m.close()
f.close()
f = open(TESTFN, 'rb')
stuff = f.read()
f.close()
self.assertEqual(stuff, 'c'*mapsize,
"Write-through memory map data file not updated properly.")
# Opening mmap with access=ACCESS_COPY
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize, access=mmap.ACCESS_COPY)
# Modifying copy-on-write memory map
m[:] = 'd'*mapsize
self.assertEqual(m[:], 'd' * mapsize,
"Copy-on-write memory map data not written correctly.")
m.flush()
self.assertEqual(open(TESTFN, "rb").read(), 'c'*mapsize,
"Copy-on-write test data file should not be modified.")
# Ensuring copy-on-write maps cannot be resized
self.assertRaises(TypeError, m.resize, 2*mapsize)
f.close()
del m, f
# Ensuring invalid access parameter raises exception
f = open(TESTFN, "r+b")
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize, access=4)
f.close()
if os.name == "posix":
# Try incompatible flags, prot and access parameters.
f = open(TESTFN, "r+b")
self.assertRaises(ValueError, mmap.mmap, f.fileno(), mapsize,
flags=mmap.MAP_PRIVATE,
prot=mmap.PROT_READ, access=mmap.ACCESS_WRITE)
f.close()
def test_bad_file_desc(self):
# Try opening a bad file descriptor...
self.assertRaises(mmap.error, mmap.mmap, -2, 4096)
def test_tougher_find(self):
# Do a tougher .find() test. SF bug 515943 pointed out that, in 2.2,
# searching for data with embedded \0 bytes didn't work.
f = open(TESTFN, 'w+')
data = 'aabaac\x00deef\x00\x00aa\x00'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
for start in range(n+1):
for finish in range(start, n+1):
slice = data[start : finish]
self.assertEqual(m.find(slice), data.find(slice))
self.assertEqual(m.find(slice + 'x'), -1)
m.close()
def test_find_end(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'w+')
data = 'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.find('one'), 0)
self.assertEqual(m.find('ones'), 8)
self.assertEqual(m.find('one', 0, -1), 0)
self.assertEqual(m.find('one', 1), 8)
self.assertEqual(m.find('one', 1, -1), 8)
self.assertEqual(m.find('one', 1, -2), -1)
def test_rfind(self):
# test the new 'end' parameter works as expected
f = open(TESTFN, 'w+')
data = 'one two ones'
n = len(data)
f.write(data)
f.flush()
m = mmap.mmap(f.fileno(), n)
f.close()
self.assertEqual(m.rfind('one'), 8)
self.assertEqual(m.rfind('one '), 0)
self.assertEqual(m.rfind('one', 0, -1), 8)
self.assertEqual(m.rfind('one', 0, -2), 0)
self.assertEqual(m.rfind('one', 1, -1), 8)
self.assertEqual(m.rfind('one', 1, -2), -1)
def test_double_close(self):
# make sure a double close doesn't crash on Solaris (Bug# 665913)
f = open(TESTFN, 'w+')
f.write(2**16 * 'a') # Arbitrary character
f.close()
f = open(TESTFN)
mf = mmap.mmap(f.fileno(), 2**16, access=mmap.ACCESS_READ)
mf.close()
mf.close()
f.close()
def test_entire_file(self):
# test mapping of entire file by passing 0 for map length
if hasattr(os, "stat"):
f = open(TESTFN, "w+")
f.write(2**16 * 'm') # Arbitrary character
f.close()
f = open(TESTFN, "rb+")
mf = mmap.mmap(f.fileno(), 0)
self.assertEqual(len(mf), 2**16, "Map size should equal file size.")
self.assertEqual(mf.read(2**16), 2**16 * "m")
mf.close()
f.close()
def test_move(self):
# make move works everywhere (64-bit format problem earlier)
f = open(TESTFN, 'w+')
f.write("ABCDEabcde") # Arbitrary character
f.flush()
mf = mmap.mmap(f.fileno(), 10)
mf.move(5, 0, 5)
self.assertEqual(mf[:], "ABCDEABCDE", "Map move should have duplicated front 5")
mf.close()
f.close()
# more excessive test
data = "0123456789"
for dest in range(len(data)):
for src in range(len(data)):
for count in range(len(data) - max(dest, src)):
expected = data[:dest] + data[src:src+count] + data[dest+count:]
m = mmap.mmap(-1, len(data))
m[:] = data
m.move(dest, src, count)
self.assertEqual(m[:], expected)
m.close()
# segfault test (Issue 5387)
m = mmap.mmap(-1, 100)
offsets = [-100, -1, 0, 1, 100]
for source, dest, size in itertools.product(offsets, offsets, offsets):
try:
m.move(source, dest, size)
except ValueError:
pass
self.assertRaises(ValueError, m.move, -1, -1, -1)
self.assertRaises(ValueError, m.move, -1, -1, 0)
self.assertRaises(ValueError, m.move, -1, 0, -1)
self.assertRaises(ValueError, m.move, 0, -1, -1)
self.assertRaises(ValueError, m.move, -1, 0, 0)
self.assertRaises(ValueError, m.move, 0, -1, 0)
self.assertRaises(ValueError, m.move, 0, 0, -1)
m.close()
def test_anonymous(self):
# anonymous mmap.mmap(-1, PAGE)
m = mmap.mmap(-1, PAGESIZE)
for x in xrange(PAGESIZE):
self.assertEqual(m[x], '\0', "anonymously mmap'ed contents should be zero")
for x in xrange(PAGESIZE):
m[x] = ch = chr(x & 255)
self.assertEqual(m[x], ch)
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = "".join(chr(c) for c in reversed(range(256)))
m = mmap.mmap(-1, len(s))
m[:] = s
self.assertEqual(m[:], s)
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(m[start:stop:step],
s[start:stop:step])
def test_extended_set_del_slice(self):
# Test extended slicing by comparing with list slicing.
s = "".join(chr(c) for c in reversed(range(256)))
m = mmap.mmap(-1, len(s))
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
m[:] = s
self.assertEqual(m[:], s)
L = list(s)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data = "".join(reversed(data))
L[start:stop:step] = data
m[start:stop:step] = data
self.assertEquals(m[:], "".join(L))
def make_mmap_file (self, f, halfsize):
# Write 2 pages worth of data to the file
f.write ('\0' * halfsize)
f.write ('foo')
f.write ('\0' * (halfsize - 3))
f.flush ()
return mmap.mmap (f.fileno(), 0)
def test_offset (self):
f = open (TESTFN, 'w+b')
try: # unlink TESTFN no matter what
halfsize = mmap.ALLOCATIONGRANULARITY
m = self.make_mmap_file (f, halfsize)
m.close ()
f.close ()
mapsize = halfsize * 2
# Try invalid offset
f = open(TESTFN, "r+b")
for offset in [-2, -1, None]:
try:
m = mmap.mmap(f.fileno(), mapsize, offset=offset)
self.assertEqual(0, 1)
except (ValueError, TypeError, OverflowError):
pass
else:
self.assertEqual(0, 0)
f.close()
# Try valid offset, hopefully 8192 works on all OSes
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), mapsize - halfsize, offset=halfsize)
self.assertEqual(m[0:3], 'foo')
f.close()
# Try resizing map
try:
m.resize(512)
except SystemError:
pass
else:
# resize() is supported
self.assertEqual(len(m), 512)
# Check that we can no longer seek beyond the new size.
self.assertRaises(ValueError, m.seek, 513, 0)
# Check that the content is not changed
self.assertEqual(m[0:3], 'foo')
# Check that the underlying file is truncated too
f = open(TESTFN)
f.seek(0, 2)
self.assertEqual(f.tell(), halfsize + 512)
f.close()
self.assertEqual(m.size(), halfsize + 512)
m.close()
finally:
f.close()
try:
os.unlink(TESTFN)
except OSError:
pass
def test_subclass(self):
class anon_mmap(mmap.mmap):
def __new__(klass, *args, **kwargs):
return mmap.mmap.__new__(klass, -1, *args, **kwargs)
anon_mmap(PAGESIZE)
def test_prot_readonly(self):
if not hasattr(mmap, 'PROT_READ'):
return
mapsize = 10
open(TESTFN, "wb").write("a"*mapsize)
f = open(TESTFN, "rb")
m = mmap.mmap(f.fileno(), mapsize, prot=mmap.PROT_READ)
self.assertRaises(TypeError, m.write, "foo")
f.close()
def test_error(self):
self.assert_(issubclass(mmap.error, EnvironmentError))
self.assert_("mmap.error" in str(mmap.error))
def test_io_methods(self):
data = "0123456789"
open(TESTFN, "wb").write("x"*len(data))
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), len(data))
f.close()
# Test write_byte()
for i in xrange(len(data)):
self.assertEquals(m.tell(), i)
m.write_byte(data[i:i+1])
self.assertEquals(m.tell(), i+1)
self.assertRaises(ValueError, m.write_byte, "x")
self.assertEquals(m[:], data)
# Test read_byte()
m.seek(0)
for i in xrange(len(data)):
self.assertEquals(m.tell(), i)
self.assertEquals(m.read_byte(), data[i:i+1])
self.assertEquals(m.tell(), i+1)
self.assertRaises(ValueError, m.read_byte)
# Test read()
m.seek(3)
self.assertEquals(m.read(3), "345")
self.assertEquals(m.tell(), 6)
# Test write()
m.seek(3)
m.write("bar")
self.assertEquals(m.tell(), 6)
self.assertEquals(m[:], "012bar6789")
m.seek(8)
self.assertRaises(ValueError, m.write, "bar")
if os.name == 'nt':
def test_tagname(self):
data1 = "0123456789"
data2 = "abcdefghij"
assert len(data1) == len(data2)
# Test same tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="foo")
m2[:] = data2
self.assertEquals(m1[:], data2)
self.assertEquals(m2[:], data2)
m2.close()
m1.close()
# Test differnt tag
m1 = mmap.mmap(-1, len(data1), tagname="foo")
m1[:] = data1
m2 = mmap.mmap(-1, len(data2), tagname="boo")
m2[:] = data2
self.assertEquals(m1[:], data1)
self.assertEquals(m2[:], data2)
m2.close()
m1.close()
def test_crasher_on_windows(self):
# Should not crash (Issue 1733986)
m = mmap.mmap(-1, 1000, tagname="foo")
try:
mmap.mmap(-1, 5000, tagname="foo")[:] # same tagname, but larger size
except:
pass
m.close()
# Should not crash (Issue 5385)
open(TESTFN, "wb").write("x"*10)
f = open(TESTFN, "r+b")
m = mmap.mmap(f.fileno(), 0)
f.close()
try:
m.resize(0) # will raise WindowsError
except:
pass
try:
m[:]
except:
pass
m.close()
def test_main():
run_unittest(MmapTests)
if __name__ == '__main__':
test_main()
|
|
# -*- coding: utf-8 -*-
# Django settings for olympia project.
import datetime
import logging
import os
import socket
import dj_database_url
from django.utils.functional import lazy
from django.core.urlresolvers import reverse_lazy
from heka.config import client_from_dict_config
ALLOWED_HOSTS = [
'.allizom.org',
'.mozilla.org',
'.mozilla.com',
'.mozilla.net',
]
# jingo-minify settings
CACHEBUST_IMGS = True
try:
# If we have build ids available, we'll grab them here and add them to our
# CACHE_PREFIX. This will let us not have to flush memcache during updates
# and it will let us preload data into it before a production push.
from build import BUILD_ID_CSS, BUILD_ID_JS
build_id = "%s%s" % (BUILD_ID_CSS[:2], BUILD_ID_JS[:2])
except ImportError:
build_id = ""
# jingo-minify: Style sheet media attribute default
CSS_MEDIA_DEFAULT = 'all'
# Make filepaths relative to the root of olympia.
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT = os.path.join(BASE_DIR, '..', '..')
def path(*folders):
return os.path.join(ROOT, *folders)
# We need to track this because hudson can't just call its checkout "olympia".
# It puts it in a dir called "workspace". Way to be, hudson.
ROOT_PACKAGE = os.path.basename(ROOT)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
# LESS CSS OPTIONS (Debug only).
LESS_PREPROCESS = True # Compile LESS with Node, rather than client-side JS?
LESS_LIVE_REFRESH = False # Refresh the CSS on save?
LESS_BIN = 'lessc'
# Path to stylus (to compile .styl files).
STYLUS_BIN = 'stylus'
# Path to cleancss (our CSS minifier).
CLEANCSS_BIN = 'cleancss'
# Path to uglifyjs (our JS minifier).
UGLIFY_BIN = 'uglifyjs' # Set as None to use YUI instead (at your risk).
FLIGTAR = '[email protected]'
EDITORS_EMAIL = '[email protected]'
SENIOR_EDITORS_EMAIL = '[email protected]'
THEMES_EMAIL = '[email protected]'
ABUSE_EMAIL = '[email protected]'
NOBODY_EMAIL = '[email protected]'
# Add Access-Control-Allow-Origin: * header for the new API with
# django-cors-headers.
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r'^/api/v3/.*$'
INTERNAL_DOMAINS = ['localhost:3000']
CORS_ENDPOINT_OVERRIDES = [
(r'^/api/v3/internal/accounts/login/?$', {
'CORS_ORIGIN_ALLOW_ALL': False,
'CORS_ORIGIN_WHITELIST': INTERNAL_DOMAINS,
'CORS_ALLOW_CREDENTIALS': True,
}),
(r'^/api/v3/internal/.*$', {
'CORS_ORIGIN_ALLOW_ALL': False,
'CORS_ORIGIN_WHITELIST': INTERNAL_DOMAINS,
}),
]
DATABASE_URL = os.environ.get('DATABASE_URL',
'mysql://root:@localhost/olympia')
DATABASES = {'default': dj_database_url.parse(DATABASE_URL)}
DATABASES['default']['OPTIONS'] = {'sql_mode': 'STRICT_ALL_TABLES'}
DATABASES['default']['TEST_CHARSET'] = 'utf8'
DATABASES['default']['TEST_COLLATION'] = 'utf8_general_ci'
# Run all views in a transaction unless they are decorated not to.
DATABASES['default']['ATOMIC_REQUESTS'] = True
# Pool our database connections up for 300 seconds
DATABASES['default']['CONN_MAX_AGE'] = 300
# A database to be used by the services scripts, which does not use Django.
# The settings can be copied from DATABASES, but since its not a full Django
# database connection, only some values are supported.
SERVICES_DATABASE = {
'NAME': DATABASES['default']['NAME'],
'USER': DATABASES['default']['USER'],
'PASSWORD': DATABASES['default']['PASSWORD'],
'HOST': DATABASES['default']['HOST'],
'PORT': DATABASES['default']['PORT'],
}
DATABASE_ROUTERS = ('multidb.PinningMasterSlaveRouter',)
# Put the aliases for your slave databases in this list.
SLAVE_DATABASES = []
PASSWORD_HASHERS = (
'olympia.users.models.SHA512PasswordHasher',
)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Accepted locales
# Note: If you update this list, don't forget to also update the locale
# permissions in the database.
AMO_LANGUAGES = (
'af', 'ar', 'bg', 'bn-BD', 'ca', 'cs', 'da', 'de', 'el', 'en-GB', 'en-US',
'es', 'eu', 'fa', 'fi', 'fr', 'ga-IE', 'he', 'hu', 'id', 'it', 'ja', 'ko',
'mk', 'mn', 'nl', 'pl', 'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sl', 'sq',
'sv-SE', 'uk', 'vi', 'zh-CN', 'zh-TW',
)
# Explicit conversion of a shorter language code into a more specific one.
SHORTER_LANGUAGES = {
'en': 'en-US', 'ga': 'ga-IE', 'pt': 'pt-PT', 'sv': 'sv-SE', 'zh': 'zh-CN'
}
# Not shown on the site, but .po files exist and these are available on the
# L10n dashboard. Generally languages start here and move into AMO_LANGUAGES.
HIDDEN_LANGUAGES = ('cy', 'hr', 'sr', 'sr-Latn', 'tr')
def lazy_langs(languages):
from product_details import product_details
if not product_details.languages:
return {}
return dict([(i.lower(), product_details.languages[i]['native'])
for i in languages])
# Where product details are stored see django-mozilla-product-details
PROD_DETAILS_DIR = path('src', 'olympia', 'lib', 'product_json')
PROD_DETAILS_URL = 'https://svn.mozilla.org/libs/product-details/json/'
PROD_DETAILS_STORAGE = 'olympia.lib.product_details_backend.NoCachePDFileStorage' # noqa
# Override Django's built-in with our native names
LANGUAGES = lazy(lazy_langs, dict)(AMO_LANGUAGES)
RTL_LANGUAGES = ('ar', 'fa', 'fa-IR', 'he')
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
LOCALE_PATHS = (
path('locale'),
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# The host currently running the site. Only use this in code for good reason;
# the site is designed to run on a cluster and should continue to support that
HOSTNAME = socket.gethostname()
# The front end domain of the site. If you're not running on a cluster this
# might be the same as HOSTNAME but don't depend on that. Use this when you
# need the real domain.
DOMAIN = HOSTNAME
# Full base URL for your main site including protocol. No trailing slash.
# Example: https://addons.mozilla.org
SITE_URL = 'http://%s' % DOMAIN
# Domain of the services site. This is where your API, and in-product pages
# live.
SERVICES_DOMAIN = 'services.%s' % DOMAIN
# Full URL to your API service. No trailing slash.
# Example: https://services.addons.mozilla.org
SERVICES_URL = 'http://%s' % SERVICES_DOMAIN
# The domain of the mobile site.
MOBILE_DOMAIN = 'm.%s' % DOMAIN
# The full url of the mobile site.
MOBILE_SITE_URL = 'http://%s' % MOBILE_DOMAIN
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = path('user-media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/user-media/'
# Absolute path to a temporary storage area
TMP_PATH = path('tmp')
# Tarballs in DUMPED_APPS_PATH deleted 30 days after they have been written.
DUMPED_APPS_DAYS_DELETE = 3600 * 24 * 30
# Tarballs in DUMPED_USERS_PATH deleted 30 days after they have been written.
DUMPED_USERS_DAYS_DELETE = 3600 * 24 * 30
# path that isn't just one /, and doesn't require any locale or app.
SUPPORTED_NONAPPS_NONLOCALES_PREFIX = (
'api/v3',
'blocked/blocklists.json',
)
# paths that don't require an app prefix
SUPPORTED_NONAPPS = (
'about', 'admin', 'apps', 'blocklist', 'contribute.json', 'credits',
'developer_agreement', 'developer_faq', 'developers', 'editors', 'faq',
'jsi18n', 'review_guide', 'google1f3e37b7351799a5.html',
'robots.txt', 'statistics', 'services', 'sunbird', 'static', 'user-media',
'__version__',
)
DEFAULT_APP = 'firefox'
# paths that don't require a locale prefix
SUPPORTED_NONLOCALES = (
'contribute.json', 'google1f3e37b7351799a5.html', 'robots.txt', 'services',
'downloads', 'blocklist', 'static', 'user-media', '__version__',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'this-is-a-dummy-key-and-its-overridden-for-prod-servers'
# Templates
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'olympia.lib.template_loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# We don't want jingo's template loaded to pick up templates for third party
# apps that don't use Jinja2. The Following is a list of prefixes for jingo to
# ignore.
JINGO_EXCLUDE_APPS = (
'django_extensions',
'admin',
'toolbar_statsd',
'registration',
'rest_framework',
'debug_toolbar',
'waffle',
)
JINGO_EXCLUDE_PATHS = (
'users/email',
'reviews/emails',
'editors/emails',
'amo/emails',
'devhub/email/revoked-key-email.ltxt',
'devhub/email/new-key-email.ltxt'
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'olympia.amo.context_processors.app',
'olympia.amo.context_processors.i18n',
'olympia.amo.context_processors.global_settings',
'olympia.amo.context_processors.static_url',
'jingo_minify.helpers.build_ids',
)
TEMPLATE_DIRS = (
path('media', 'docs'),
path('src/olympia/templates'),
)
def JINJA_CONFIG():
import jinja2
from django.conf import settings
from django.core.cache import cache
config = {
'extensions': [
'olympia.amo.ext.cache',
'puente.ext.i18n',
'waffle.jinja.WaffleExtension',
'jinja2.ext.do',
'jinja2.ext.with_',
'jinja2.ext.loopcontrols'
],
'finalize': lambda x: x if x is not None else '',
'autoescape': True,
}
if False and not settings.DEBUG:
# We're passing the _cache object directly to jinja because
# Django can't store binary directly; it enforces unicode on it.
# Details: http://jinja.pocoo.org/2/documentation/api#bytecode-cache
# and in the errors you get when you try it the other way.
bc = jinja2.MemcachedBytecodeCache(cache._cache,
"%sj2:" % settings.CACHE_PREFIX)
config['cache_size'] = -1 # Never clear the cache
config['bytecode_cache'] = bc
return config
MIDDLEWARE_CLASSES = (
# AMO URL middleware comes first so everyone else sees nice URLs.
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'olympia.amo.middleware.LocaleAndAppURLMiddleware',
# Mobile detection should happen in Zeus.
'mobility.middleware.DetectMobileMiddleware',
'mobility.middleware.XMobileMiddleware',
'olympia.amo.middleware.RemoveSlashMiddleware',
# Munging REMOTE_ADDR must come before ThreadRequest.
'commonware.middleware.SetRemoteAddrFromForwardedFor',
'commonware.middleware.FrameOptionsHeader',
'commonware.middleware.XSSProtectionHeader',
'commonware.middleware.ContentTypeOptionsHeader',
'commonware.middleware.StrictTransportMiddleware',
'multidb.middleware.PinningRouterMiddleware',
'waffle.middleware.WaffleMiddleware',
# CSP and CORS need to come before CommonMiddleware because they might
# need to add headers to 304 responses returned by CommonMiddleware.
'csp.middleware.CSPMiddleware',
'corsheaders.middleware.CorsMiddleware',
'olympia.amo.middleware.CommonMiddleware',
'olympia.amo.middleware.NoVarySessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'olympia.amo.middleware.AuthenticationMiddlewareWithoutAPI',
'commonware.log.ThreadRequestMiddleware',
'olympia.search.middleware.ElasticsearchExceptionMiddleware',
'session_csrf.CsrfMiddleware',
# This should come after authentication middleware
'olympia.access.middleware.ACLMiddleware',
'commonware.middleware.ScrubRequestOnException',
)
# Auth
AUTHENTICATION_BACKENDS = (
'olympia.users.backends.AmoUserBackend',
)
AUTH_USER_MODEL = 'users.UserProfile'
# Override this in the site settings.
ROOT_URLCONF = 'olympia.urls'
INSTALLED_APPS = (
'olympia.core',
'olympia.amo', # amo comes first so it always takes precedence.
'olympia.abuse',
'olympia.access',
'olympia.accounts',
'olympia.addons',
'olympia.api',
'olympia.applications',
'olympia.bandwagon',
'olympia.blocklist',
'olympia.browse',
'olympia.compat',
'olympia.devhub',
'olympia.discovery',
'olympia.editors',
'olympia.files',
'olympia.internal_tools',
'olympia.legacy_api',
'olympia.legacy_discovery',
'olympia.lib.es',
'olympia.pages',
'olympia.reviews',
'olympia.search',
'olympia.stats',
'olympia.tags',
'olympia.translations',
'olympia.users',
'olympia.versions',
'olympia.zadmin',
# Third party apps
'product_details',
'moz_header',
'cronjobs',
'csp',
'aesfield',
'django_extensions',
'raven.contrib.django',
'rest_framework',
'waffle',
'jingo_minify',
'puente',
# Django contrib apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# Has to load after auth
'django_statsd',
)
# These apps are only needed in a testing environment. They are added to
# INSTALLED_APPS by settings_test.py (which is itself loaded by setup.cfg by
# py.test)
TEST_INSTALLED_APPS = (
'olympia.translations.tests.testapp',
)
# Tells the extract script what files to look for l10n in and what function
# handles the extraction. The puente library expects this.
PUENTE = {
'BASE_DIR': ROOT,
# Tells the extract script what files to look for l10n in and what function
# handles the extraction.
'DOMAIN_METHODS': {
'django': [
('src/olympia/**.py', 'python'),
# Make sure we're parsing django-admin templates with the django
# template extractor
(
'src/olympia/zadmin/templates/admin/*.html',
'django_babel.extract.extract_django'
),
('src/olympia/**/templates/**.html', 'jinja2'),
('**/templates/**.lhtml', 'jinja2'),
],
'djangojs': [
# We can't say **.js because that would dive into mochikit
# and timeplot and all the other baggage we're carrying.
# Timeplot, in particular, crashes the extractor with bad
# unicode data.
('static/js/*.js', 'javascript'),
('static/js/amo2009/**.js', 'javascript'),
('static/js/common/**.js', 'javascript'),
('static/js/impala/**.js', 'javascript'),
('static/js/zamboni/**.js', 'javascript'),
],
},
}
# Bundles is a dictionary of two dictionaries, css and js, which list css files
# and js files that can be bundled together by the minify app.
MINIFY_BUNDLES = {
'css': {
'restyle/css': (
'css/restyle/restyle.less',
),
# CSS files common to the entire site.
'zamboni/css': (
'css/legacy/main.css',
'css/legacy/main-mozilla.css',
'css/legacy/jquery-lightbox.css',
'css/legacy/autocomplete.css',
'css/zamboni/zamboni.css',
'moz_header/header.css',
'moz_header/footer.css',
'css/zamboni/tags.css',
'css/zamboni/tabs.css',
'css/impala/formset.less',
'css/impala/suggestions.less',
'css/impala/header.less',
'css/impala/moz-tab.css',
'css/impala/footer.less',
'css/impala/faux-zamboni.less',
'css/zamboni/themes.less',
),
'zamboni/impala': (
'css/impala/base.css',
'css/legacy/jquery-lightbox.css',
'css/impala/site.less',
'css/impala/typography.less',
'moz_header/header.css',
'moz_header/footer.css',
'css/impala/forms.less',
'css/common/invisible-upload.less',
'css/impala/header.less',
'css/impala/footer.less',
'css/impala/moz-tab.css',
'css/impala/hovercards.less',
'css/impala/toplist.less',
'css/impala/carousel.less',
'css/impala/reviews.less',
'css/impala/buttons.less',
'css/impala/promos.less',
'css/impala/addon_details.less',
'css/impala/policy.less',
'css/impala/expando.less',
'css/impala/popups.less',
'css/impala/l10n.less',
'css/impala/contributions.less',
'css/impala/lightbox.less',
'css/impala/prose.less',
'css/impala/abuse.less',
'css/impala/paginator.less',
'css/impala/listing.less',
'css/impala/versions.less',
'css/impala/users.less',
'css/impala/collections.less',
'css/impala/tooltips.less',
'css/impala/search.less',
'css/impala/suggestions.less',
'css/impala/jquery.minicolors.css',
'css/impala/personas.less',
'css/impala/login.less',
'css/impala/dictionaries.less',
'css/impala/apps.less',
'css/impala/formset.less',
'css/impala/tables.less',
'css/impala/compat.less',
'css/impala/fxa-migration.less',
),
'zamboni/stats': (
'css/impala/stats.less',
),
'zamboni/discovery-pane': (
'css/zamboni/discovery-pane.css',
'css/impala/promos.less',
'css/legacy/jquery-lightbox.css',
),
'zamboni/devhub': (
'css/impala/tooltips.less',
'css/zamboni/developers.css',
'css/zamboni/docs.less',
'css/impala/developers.less',
'css/impala/personas.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/impala/formset.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/refunds.less',
'css/devhub/buttons.less',
'css/devhub/in-app-config.less',
),
'zamboni/devhub_impala': (
'css/impala/developers.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/devhub/dashboard.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/search.less',
'css/devhub/refunds.less',
'css/impala/devhub-api.less',
),
'zamboni/editors': (
'css/zamboni/editors.styl',
'css/zamboni/unlisted.less',
),
'zamboni/themes_review': (
'css/zamboni/developers.css',
'css/zamboni/editors.styl',
'css/zamboni/themes_review.styl',
),
'zamboni/files': (
'css/lib/syntaxhighlighter/shCoreDefault.css',
'css/zamboni/files.css',
),
'zamboni/mobile': (
'css/zamboni/mobile.css',
'css/mobile/typography.less',
'css/mobile/forms.less',
'css/mobile/header.less',
'css/mobile/search.less',
'css/mobile/listing.less',
'css/mobile/footer.less',
'css/impala/fxa-migration.less',
'css/mobile/notifications.less',
),
'zamboni/admin': (
'css/zamboni/admin-django.css',
'css/zamboni/admin-mozilla.css',
'css/zamboni/admin_features.css',
# Datepicker styles and jQuery UI core.
'css/zamboni/jquery-ui/custom-1.7.2.css',
),
},
'js': {
# JS files common to the entire site (pre-impala).
'common': (
'js/lib/raven.min.js',
'js/common/raven-config.js',
'js/lib/underscore.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/zamboni/tabs.js',
'js/common/keys.js',
# jQuery UI
'js/lib/jquery-ui/core.js',
'js/lib/jquery-ui/position.js',
'js/lib/jquery-ui/widget.js',
'js/lib/jquery-ui/menu.js',
'js/lib/jquery-ui/mouse.js',
'js/lib/jquery-ui/autocomplete.js',
'js/lib/jquery-ui/datepicker.js',
'js/lib/jquery-ui/sortable.js',
'js/zamboni/helpers.js',
'js/zamboni/global.js',
'js/amo2009/global.js',
'js/common/ratingwidget.js',
'js/lib/jquery-ui/jqModal.js',
'js/zamboni/l10n.js',
'js/zamboni/debouncer.js',
# Homepage
'js/impala/promos.js',
'js/zamboni/homepage.js',
# Add-ons details page
'js/lib/jquery-ui/ui.lightbox.js',
'js/zamboni/contributions.js',
'js/zamboni/addon_details.js',
'js/impala/abuse.js',
'js/zamboni/reviews.js',
# Personas
'js/lib/jquery.hoverIntent.js',
'js/zamboni/personas_core.js',
'js/zamboni/personas.js',
# Unicode: needs to be loaded after collections.js which listens to
# an event fired in this file.
'js/zamboni/unicode.js',
# Collections
'js/zamboni/collections.js',
# Users
'js/zamboni/users.js',
# Hover delay for global header
'moz_header/menu.js',
# Password length and strength
'js/zamboni/password-strength.js',
# Search suggestions
'js/impala/forms.js',
'js/impala/ajaxcache.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
),
# Impala and Legacy: Things to be loaded at the top of the page
'preload': (
'js/lib/jquery-1.12.0.js',
'js/lib/jquery.browser.js',
'js/impala/preloaded.js',
'js/zamboni/analytics.js',
),
# Impala: Things to be loaded at the bottom
'impala': (
'js/lib/raven.min.js',
'js/common/raven-config.js',
'js/lib/underscore.js',
'js/impala/carousel.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/lib/jquery.pjax.js',
'js/impala/footer.js',
'js/common/keys.js',
# jQuery UI
'js/lib/jquery-ui/core.js',
'js/lib/jquery-ui/position.js',
'js/lib/jquery-ui/widget.js',
'js/lib/jquery-ui/mouse.js',
'js/lib/jquery-ui/menu.js',
'js/lib/jquery-ui/autocomplete.js',
'js/lib/jquery-ui/datepicker.js',
'js/lib/jquery-ui/sortable.js',
# Firefox Accounts
'js/lib/uri.js',
'js/common/fxa-login.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/ajaxcache.js',
'js/zamboni/helpers.js',
'js/zamboni/global.js',
'js/impala/global.js',
'js/common/ratingwidget.js',
'js/lib/jquery-ui/jqModal.js',
'js/zamboni/l10n.js',
'js/impala/forms.js',
# Homepage
'js/impala/promos.js',
'js/impala/homepage.js',
# Add-ons details page
'js/lib/jquery-ui/ui.lightbox.js',
'js/zamboni/contributions.js',
'js/impala/addon_details.js',
'js/impala/abuse.js',
'js/impala/reviews.js',
# Browse listing pages
'js/impala/listing.js',
# Personas
'js/lib/jquery.hoverIntent.js',
'js/zamboni/personas_core.js',
'js/zamboni/personas.js',
# Persona creation
'js/common/upload-image.js',
'js/lib/jquery.minicolors.js',
'js/impala/persona_creation.js',
# Unicode: needs to be loaded after collections.js which listens to
# an event fired in this file.
'js/zamboni/unicode.js',
# Collections
'js/zamboni/collections.js',
'js/impala/collections.js',
# Users
'js/zamboni/users.js',
'js/impala/users.js',
# Search
'js/impala/serializers.js',
'js/impala/search.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
# Login
'js/impala/login.js',
),
'zamboni/discovery': (
'js/lib/jquery-1.12.0.js',
'js/lib/jquery.browser.js',
'js/lib/underscore.js',
'js/zamboni/browser.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/impala/carousel.js',
'js/zamboni/analytics.js',
# Add-ons details
'js/lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/lib/jquery-ui/ui.lightbox.js',
# Personas
'js/lib/jquery.hoverIntent.js',
'js/zamboni/personas_core.js',
'js/zamboni/personas.js',
'js/zamboni/debouncer.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/promos.js',
'js/zamboni/discovery_addons.js',
'js/zamboni/discovery_pane.js',
),
'zamboni/discovery-video': (
'js/lib/popcorn-1.0.js',
'js/zamboni/discovery_video.js',
),
'zamboni/devhub': (
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/common/upload-base.js',
'js/common/upload-addon.js',
'js/common/upload-image.js',
'js/impala/formset.js',
'js/zamboni/devhub.js',
'js/zamboni/validator.js',
),
'zamboni/editors': (
'js/lib/highcharts.src.js',
'js/zamboni/editors.js',
'js/lib/jquery.hoverIntent.js', # Used by jquery.zoomBox.
'js/lib/jquery.zoomBox.js', # Used by themes_review.
'js/zamboni/themes_review_templates.js',
'js/zamboni/themes_review.js',
),
'zamboni/files': (
'js/lib/diff_match_patch_uncompressed.js',
'js/lib/syntaxhighlighter/xregexp-min.js',
'js/lib/syntaxhighlighter/shCore.js',
'js/lib/syntaxhighlighter/shLegacy.js',
'js/lib/syntaxhighlighter/shBrushAppleScript.js',
'js/lib/syntaxhighlighter/shBrushAS3.js',
'js/lib/syntaxhighlighter/shBrushBash.js',
'js/lib/syntaxhighlighter/shBrushCpp.js',
'js/lib/syntaxhighlighter/shBrushCSharp.js',
'js/lib/syntaxhighlighter/shBrushCss.js',
'js/lib/syntaxhighlighter/shBrushDiff.js',
'js/lib/syntaxhighlighter/shBrushJava.js',
'js/lib/syntaxhighlighter/shBrushJScript.js',
'js/lib/syntaxhighlighter/shBrushPhp.js',
'js/lib/syntaxhighlighter/shBrushPlain.js',
'js/lib/syntaxhighlighter/shBrushPython.js',
'js/lib/syntaxhighlighter/shBrushSass.js',
'js/lib/syntaxhighlighter/shBrushSql.js',
'js/lib/syntaxhighlighter/shBrushVb.js',
'js/lib/syntaxhighlighter/shBrushXml.js',
'js/zamboni/storage.js',
'js/zamboni/files_templates.js',
'js/zamboni/files.js',
),
'zamboni/mobile': (
'js/lib/jquery-1.12.0.js',
'js/lib/jquery.browser.js',
'js/lib/underscore.js',
'js/lib/jqmobile.js',
'js/lib/jquery.cookie.js',
'js/zamboni/browser.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/zamboni/analytics.js',
'js/lib/format.js',
'js/zamboni/mobile/buttons.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/footer.js',
'js/zamboni/personas_core.js',
'js/zamboni/mobile/personas.js',
'js/zamboni/helpers.js',
'js/zamboni/mobile/general.js',
'js/common/ratingwidget.js',
# Firefox Accounts
'js/lib/uri.js',
'js/common/fxa-login.js',
),
'zamboni/stats': (
'js/lib/highcharts.src.js',
'js/impala/stats/csv_keys.js',
'js/impala/stats/helpers.js',
'js/impala/stats/dateutils.js',
'js/impala/stats/manager.js',
'js/impala/stats/controls.js',
'js/impala/stats/overview.js',
'js/impala/stats/topchart.js',
'js/impala/stats/chart.js',
'js/impala/stats/table.js',
'js/impala/stats/stats.js',
),
'zamboni/admin': (
'js/zamboni/admin.js',
'js/zamboni/admin_features.js',
'js/zamboni/admin_validation.js',
),
# This is included when DEBUG is True. Bundle in <head>.
'debug': (
'js/debug/less_setup.js',
'js/lib/less.js',
'js/debug/less_live.js',
),
}
}
# Caching
# Prefix for cache keys (will prevent collisions when running parallel copies)
CACHE_PREFIX = 'amo:%s:' % build_id
KEY_PREFIX = CACHE_PREFIX
FETCH_BY_ID = True
# Number of seconds a count() query should be cached. Keep it short because
# it's not possible to invalidate these queries.
CACHE_COUNT_TIMEOUT = 60
# To enable pylibmc compression (in bytes)
PYLIBMC_MIN_COMPRESS_LEN = 0 # disabled
# External tools.
JAVA_BIN = '/usr/bin/java'
# Add-on download settings.
PRIVATE_MIRROR_URL = '/_privatefiles'
# File paths
ADDON_ICONS_DEFAULT_PATH = os.path.join(ROOT, 'static', 'img', 'addon-icons')
CA_CERT_BUNDLE_PATH = os.path.join(
ROOT, 'src/olympia/amo/certificates/roots.pem')
# URL paths
# paths for images, e.g. mozcdn.com/amo or '/static'
VAMO_URL = 'https://versioncheck.addons.mozilla.org'
NEW_PERSONAS_UPDATE_URL = VAMO_URL + '/%(locale)s/themes/update-check/%(id)d'
# Outgoing URL bouncer
REDIRECT_URL = 'https://outgoing.mozilla.org/v1/'
REDIRECT_SECRET_KEY = ''
# Allow URLs from these servers. Use full domain names.
REDIRECT_URL_WHITELIST = ['addons.mozilla.org']
# Default to short expiration; check "remember me" to override
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
# See: https://github.com/mozilla/addons-server/issues/1789
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_AGE = 2592000
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN # bug 608797
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
# These should have app+locale at the start to avoid redirects
LOGIN_URL = reverse_lazy('users.login')
LOGOUT_URL = reverse_lazy('users.logout')
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# When logging in with browser ID, a username is created automatically.
# In the case of duplicates, the process is recursive up to this number
# of times.
MAX_GEN_USERNAME_TRIES = 50
# PayPal Settings
PAYPAL_API_VERSION = '78'
PAYPAL_APP_ID = ''
# URLs for various calls.
PAYPAL_API_URL = 'https://api-3t.paypal.com/nvp'
PAYPAL_CGI_URL = 'https://www.paypal.com/cgi-bin/webscr'
PAYPAL_PAY_URL = 'https://svcs.paypal.com/AdaptivePayments/'
PAYPAL_FLOW_URL = 'https://paypal.com/webapps/adaptivepayment/flow/pay'
PAYPAL_PERMISSIONS_URL = 'https://svcs.paypal.com/Permissions/'
PAYPAL_JS_URL = 'https://www.paypalobjects.com/js/external/dg.js'
# Permissions for the live or sandbox servers
PAYPAL_EMBEDDED_AUTH = {'USER': '', 'PASSWORD': '', 'SIGNATURE': ''}
# The PayPal cert that we'll use for checking.
# When None, the Mozilla CA bundle is used to look it up.
PAYPAL_CERT = None
# Contribution limit, one time and monthly
MAX_CONTRIBUTION = 1000
# Email settings
ADDONS_EMAIL = "Mozilla Add-ons <[email protected]>"
DEFAULT_FROM_EMAIL = ADDONS_EMAIL
# Email goes to the console by default. s/console/smtp/ for regular delivery
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Please use all lowercase for the blacklist.
EMAIL_BLACKLIST = (
'[email protected]',
)
# Please use all lowercase for the QA whitelist.
EMAIL_QA_WHITELIST = ()
# URL for Add-on Validation FAQ.
VALIDATION_FAQ_URL = ('https://wiki.mozilla.org/Add-ons/Reviewers/Guide/'
'AddonReviews#Step_2:_Automatic_validation')
# Celery
BROKER_URL = os.environ.get('BROKER_URL',
'amqp://olympia:olympia@localhost:5672/olympia')
BROKER_CONNECTION_TIMEOUT = 0.1
BROKER_HEARTBEAT = 60 * 15
CELERY_DEFAULT_QUEUE = 'default'
CELERY_RESULT_BACKEND = 'amqp'
CELERY_IGNORE_RESULT = True
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERYD_HIJACK_ROOT_LOGGER = False
CELERY_IMPORTS = (
'olympia.lib.crypto.tasks',
'olympia.lib.es.management.commands.reindex',
)
# We have separate celeryds for processing devhub & images as fast as possible
# Some notes:
# - always add routes here instead of @task(queue=<name>)
# - when adding a queue, be sure to update deploy.py so that it gets restarted
CELERY_ROUTES = {
# Priority.
# If your tasks need to be run as soon as possible, add them here so they
# are routed to the priority queue.
'olympia.addons.tasks.index_addons': {'queue': 'priority'},
'olympia.addons.tasks.unindex_addons': {'queue': 'priority'},
'olympia.addons.tasks.save_theme': {'queue': 'priority'},
'olympia.addons.tasks.save_theme_reupload': {'queue': 'priority'},
'olympia.bandwagon.tasks.index_collections': {'queue': 'priority'},
'olympia.bandwagon.tasks.unindex_collections': {'queue': 'priority'},
'olympia.users.tasks.index_users': {'queue': 'priority'},
'olympia.users.tasks.unindex_users': {'queue': 'priority'},
# Other queues we prioritize below.
# AMO Devhub.
'olympia.devhub.tasks.convert_purified': {'queue': 'devhub'},
'olympia.devhub.tasks.flag_binary': {'queue': 'devhub'},
'olympia.devhub.tasks.get_preview_sizes': {'queue': 'devhub'},
'olympia.devhub.tasks.handle_file_validation_result': {'queue': 'devhub'},
'olympia.devhub.tasks.handle_upload_validation_result': {
'queue': 'devhub'},
'olympia.devhub.tasks.resize_icon': {'queue': 'devhub'},
'olympia.devhub.tasks.resize_preview': {'queue': 'devhub'},
'olympia.devhub.tasks.send_welcome_email': {'queue': 'devhub'},
'olympia.devhub.tasks.submit_file': {'queue': 'devhub'},
'olympia.devhub.tasks.validate_file': {'queue': 'devhub'},
'olympia.devhub.tasks.validate_file_path': {'queue': 'devhub'},
# This is currently used only by validation tasks.
# This puts the chord_unlock task on the devhub queue. Which means anything
# that uses chord() or group() must also be running in this queue or must
# be on a worker that listens to the same queue.
'celery.chord_unlock': {'queue': 'devhub'},
'olympia.devhub.tasks.compatibility_check': {'queue': 'devhub'},
# Images.
'olympia.bandwagon.tasks.resize_icon': {'queue': 'images'},
'olympia.users.tasks.resize_photo': {'queue': 'images'},
'olympia.users.tasks.delete_photo': {'queue': 'images'},
'olympia.devhub.tasks.resize_icon': {'queue': 'images'},
'olympia.devhub.tasks.resize_preview': {'queue': 'images'},
# AMO validator.
'olympia.zadmin.tasks.bulk_validate_file': {'queue': 'limited'},
# AMO
'olympia.amo.tasks.delete_anonymous_collections': {'queue': 'amo'},
'olympia.amo.tasks.delete_logs': {'queue': 'amo'},
'olympia.amo.tasks.delete_stale_contributions': {'queue': 'amo'},
'olympia.amo.tasks.flush_front_end_cache_urls': {'queue': 'amo'},
'olympia.amo.tasks.migrate_editor_eventlog': {'queue': 'amo'},
'olympia.amo.tasks.send_email': {'queue': 'amo'},
'olympia.amo.tasks.set_modified_on_object': {'queue': 'amo'},
# Addons
'olympia.addons.tasks.calc_checksum': {'queue': 'addons'},
'olympia.addons.tasks.delete_persona_image': {'queue': 'addons'},
'olympia.addons.tasks.delete_preview_files': {'queue': 'addons'},
'olympia.addons.tasks.update_incompatible_appversions': {
'queue': 'addons'},
'olympia.addons.tasks.version_changed': {'queue': 'addons'},
# API
'olympia.api.tasks.process_results': {'queue': 'api'},
'olympia.api.tasks.process_webhook': {'queue': 'api'},
# Crons
'olympia.addons.cron._update_addon_average_daily_users': {'queue': 'cron'},
'olympia.addons.cron._update_addon_download_totals': {'queue': 'cron'},
'olympia.addons.cron._update_addons_current_version': {'queue': 'cron'},
'olympia.addons.cron._update_appsupport': {'queue': 'cron'},
'olympia.addons.cron._update_daily_theme_user_counts': {'queue': 'cron'},
'olympia.bandwagon.cron._drop_collection_recs': {'queue': 'cron'},
'olympia.bandwagon.cron._update_collections_subscribers': {
'queue': 'cron'},
'olympia.bandwagon.cron._update_collections_votes': {'queue': 'cron'},
# Bandwagon
'olympia.bandwagon.tasks.collection_meta': {'queue': 'bandwagon'},
'olympia.bandwagon.tasks.collection_votes': {'queue': 'bandwagon'},
'olympia.bandwagon.tasks.collection_watchers': {'queue': 'bandwagon'},
'olympia.bandwagon.tasks.delete_icon': {'queue': 'bandwagon'},
'olympia.bandwagon.tasks.resize_icon': {'queue': 'bandwagon'},
# Editors
'olympia.editors.tasks.add_commentlog': {'queue': 'editors'},
'olympia.editors.tasks.add_versionlog': {'queue': 'editors'},
'olympia.editors.tasks.approve_rereview': {'queue': 'editors'},
'olympia.editors.tasks.reject_rereview': {'queue': 'editors'},
'olympia.editors.tasks.send_mail': {'queue': 'editors'},
# Files
'olympia.files.tasks.extract_file': {'queue': 'files'},
'olympia.files.tasks.fix_let_scope_bustage_in_addons': {'queue': 'files'},
# Crypto
'olympia.lib.crypto.tasks.resign_files': {'queue': 'crypto'},
'olympia.lib.crypto.tasks.sign_addons': {'queue': 'crypto'},
'olympia.lib.crypto.tasks.unsign_addons': {'queue': 'crypto'},
# Search
'olympia.lib.es.management.commands.reindex.create_new_index': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.delete_indexes': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.flag_database': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.index_data': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.unflag_database': {
'queue': 'search'},
'olympia.lib.es.management.commands.reindex.update_aliases': {
'queue': 'search'},
# Reviews
'olympia.reviews.models.check_spam': {'queue': 'reviews'},
'olympia.reviews.tasks.addon_bayesian_rating': {'queue': 'reviews'},
'olympia.reviews.tasks.addon_grouped_rating': {'queue': 'reviews'},
'olympia.reviews.tasks.addon_review_aggregates': {'queue': 'reviews'},
'olympia.reviews.tasks.update_denorm': {'queue': 'reviews'},
# Stats
'olympia.stats.tasks.addon_total_contributions': {'queue': 'stats'},
'olympia.stats.tasks.index_collection_counts': {'queue': 'stats'},
'olympia.stats.tasks.index_download_counts': {'queue': 'stats'},
'olympia.stats.tasks.index_theme_user_counts': {'queue': 'stats'},
'olympia.stats.tasks.index_update_counts': {'queue': 'stats'},
'olympia.stats.tasks.update_addons_collections_downloads': {
'queue': 'stats'},
'olympia.stats.tasks.update_collections_total': {'queue': 'stats'},
'olympia.stats.tasks.update_global_totals': {'queue': 'stats'},
'olympia.stats.tasks.update_google_analytics': {'queue': 'stats'},
# Tags
'olympia.tags.tasks.clean_tag': {'queue': 'tags'},
'olympia.tags.tasks.update_all_tag_stats': {'queue': 'tags'},
'olympia.tags.tasks.update_tag_stat': {'queue': 'tags'},
# Users
'olympia.users.tasks.delete_photo': {'queue': 'users'},
'olympia.users.tasks.resize_photo': {'queue': 'users'},
'olympia.users.tasks.update_user_ratings_task': {'queue': 'users'},
# Zadmin
'olympia.zadmin.tasks.add_validation_jobs': {'queue': 'zadmin'},
'olympia.zadmin.tasks.admin_email': {'queue': 'zadmin'},
'olympia.zadmin.tasks.celery_error': {'queue': 'zadmin'},
'olympia.zadmin.tasks.fetch_langpack': {'queue': 'zadmin'},
'olympia.zadmin.tasks.fetch_langpacks': {'queue': 'zadmin'},
'olympia.zadmin.tasks.notify_compatibility': {'queue': 'zadmin'},
'olympia.zadmin.tasks.notify_compatibility_chunk': {'queue': 'zadmin'},
'olympia.zadmin.tasks.tally_validation_results': {'queue': 'zadmin'},
'olympia.zadmin.tasks.update_maxversions': {'queue': 'zadmin'},
}
# This is just a place to store these values, you apply them in your
# task decorator, for example:
# @task(time_limit=CELERY_TIME_LIMITS['lib...']['hard'])
# Otherwise your task will use the default settings.
CELERY_TIME_LIMITS = {
# The reindex management command can take up to 3 hours to run.
'olympia.lib.es.management.commands.reindex': {
'soft': 10800, 'hard': 14400},
}
# When testing, we always want tasks to raise exceptions. Good for sanity.
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
# Time in seconds before celery.exceptions.SoftTimeLimitExceeded is raised.
# The task can catch that and recover but should exit ASAP. Note that there is
# a separate, shorter timeout for validation tasks.
CELERYD_TASK_SOFT_TIME_LIMIT = 60 * 30
# Fixture Magic
CUSTOM_DUMPS = {
'addon': { # ./manage.py custom_dump addon id
'primary': 'addons.addon', # This is our reference model.
'dependents': [ # These are items we wish to dump.
# Magic turns this into current_version.files.all()[0].
'current_version.files.all.0',
'current_version.apps.all.0',
'addonuser_set.all.0',
],
'order': ('translations.translation',
'files.platform', 'addons.addon',
'versions.license', 'versions.version', 'files.file'),
'excludes': {
'addons.addon': ('_current_version',),
}
}
}
# Hera (http://github.com/clouserw/hera)
HERA = [{'USERNAME': '',
'PASSWORD': '',
'LOCATION': ''}]
# Logging
LOG_LEVEL = logging.DEBUG
HAS_SYSLOG = True # syslog is used if HAS_SYSLOG and NOT DEBUG.
SYSLOG_TAG = "http_app_addons"
SYSLOG_TAG2 = "http_app_addons2"
# See PEP 391 and log_settings.py for formatting help. Each section of
# LOGGING will get merged into the corresponding section of
# log_settings.py. Handlers and log levels are set up automatically based
# on LOG_LEVEL and DEBUG unless you set them here. Messages will not
# propagate through a logger unless propagate: True is set.
LOGGING_CONFIG = None
LOGGING = {
'loggers': {
'amo.validator': {'level': logging.WARNING},
'amqplib': {'handlers': ['null']},
'caching.invalidation': {'handlers': ['null']},
'caching': {'level': logging.ERROR},
'elasticsearch': {'handlers': ['null']},
'rdflib': {'handlers': ['null']},
'suds': {'handlers': ['null']},
'z.task': {'level': logging.INFO},
'z.es': {'level': logging.INFO},
'z.heka': {'level': logging.INFO},
's.client': {'level': logging.INFO},
},
}
HEKA_CONF = {
'logger': 'olympia',
'plugins': {
'cef': ('heka_cef.cef_plugin:config_plugin', {
'syslog_facility': 'LOCAL4',
'syslog_ident': 'http_app_addons_marketplace',
'syslog_priority': 'ALERT'}),
# Sentry accepts messages over UDP, you'll need to
# configure this URL so that logstash can relay the message
# properly
'raven': ('heka_raven.raven_plugin:config_plugin',
{'dsn': 'udp://username:[email protected]:9000/2'})},
'stream': {
'class': 'heka.streams.UdpStream',
'host': '127.0.0.1',
'port': 5565}}
HEKA = client_from_dict_config(HEKA_CONF)
USE_HEKA_FOR_CEF = False
USE_HEKA_FOR_TASTYPIE = False
CEF_PRODUCT = "amo"
# CSP Settings
PROD_CDN_HOST = 'https://addons.cdn.mozilla.net'
ANALYTICS_HOST = 'https://ssl.google-analytics.com'
CSP_REPORT_URI = '/__cspreport__'
CSP_REPORT_ONLY = False
CSP_EXCLUDE_URL_PREFIXES = ()
# NOTE: CSP_DEFAULT_SRC MUST be set otherwise things not set
# will default to being open to anything.
CSP_DEFAULT_SRC = (
"'self'",
)
CSP_CONNECT_SRC = (
"'self'",
'https://sentry.prod.mozaws.net',
)
CSP_FONT_SRC = (
"'self'",
PROD_CDN_HOST,
)
CSP_FRAME_SRC = (
"'self'",
'https://ic.paypal.com',
'https://paypal.com',
'https://www.google.com/recaptcha/',
'https://www.paypal.com',
)
CSP_IMG_SRC = (
"'self'",
'data:', # Used in inlined mobile css.
'blob:', # Needed for image uploads.
'https://www.paypal.com',
ANALYTICS_HOST,
PROD_CDN_HOST,
'https://static.addons.mozilla.net', # CDN origin server.
'https://sentry.prod.mozaws.net',
)
CSP_MEDIA_SRC = (
'https://videos.cdn.mozilla.net',
)
CSP_OBJECT_SRC = ("'none'",)
# https://addons.mozilla.org is needed for about:addons because
# the discovery pane's origin is https://services.addons.mozilla.org
# and as a result 'self' doesn't match requests to addons.mozilla.org.
CSP_SCRIPT_SRC = (
"'self'",
'https://addons.mozilla.org',
'https://www.paypalobjects.com',
'https://www.google.com/recaptcha/',
'https://www.gstatic.com/recaptcha/',
ANALYTICS_HOST,
PROD_CDN_HOST,
)
CSP_STYLE_SRC = (
"'self'",
"'unsafe-inline'",
PROD_CDN_HOST,
)
# Should robots.txt deny everything or disallow a calculated list of URLs we
# don't want to be crawled? Default is true, allow everything, toggled to
# False on -dev and stage.
# Also see http://www.google.com/support/webmasters/bin/answer.py?answer=93710
ENGAGE_ROBOTS = True
# Read-only mode setup.
READ_ONLY = False
# Turn on read-only mode in local_settings.py by putting this line
# at the VERY BOTTOM: read_only_mode(globals())
def read_only_mode(env):
env['READ_ONLY'] = True
# Replace the default (master) db with a slave connection.
if not env.get('SLAVE_DATABASES'):
raise Exception("We need at least one slave database.")
slave = env['SLAVE_DATABASES'][0]
env['DATABASES']['default'] = env['DATABASES'][slave]
# No sessions without the database, so disable auth.
env['AUTHENTICATION_BACKENDS'] = ('olympia.users.backends.NoAuthForYou',)
# Add in the read-only middleware before csrf middleware.
extra = 'olympia.amo.middleware.ReadOnlyMiddleware'
before = 'session_csrf.CsrfMiddleware'
m = list(env['MIDDLEWARE_CLASSES'])
m.insert(m.index(before), extra)
env['MIDDLEWARE_CLASSES'] = tuple(m)
# Uploaded file limits
MAX_ICON_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_IMAGE_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_VIDEO_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_PHOTO_UPLOAD_SIZE = MAX_ICON_UPLOAD_SIZE
MAX_PERSONA_UPLOAD_SIZE = 300 * 1024
MAX_REVIEW_ATTACHMENT_UPLOAD_SIZE = 5 * 1024 * 1024
# RECAPTCHA: overload the following key setttings in local_settings.py
# with your keys.
NOBOT_RECAPTCHA_PUBLIC_KEY = ''
NOBOT_RECAPTCHA_PRIVATE_KEY = ''
# Send Django signals asynchronously on a background thread.
ASYNC_SIGNALS = True
# Performance for persona pagination, we hardcode the number of
# available pages when the filter is up-and-coming.
PERSONA_DEFAULT_PAGES = 10
REDIS_LOCATION = os.environ.get(
'REDIS_LOCATION',
'redis://localhost:6379/0?socket_timeout=0.5')
def get_redis_settings(uri):
import urlparse
urlparse.uses_netloc.append('redis')
result = urlparse.urlparse(uri)
options = dict(urlparse.parse_qsl(result.query))
if 'socket_timeout' in options:
options['socket_timeout'] = float(options['socket_timeout'])
return {
'HOST': result.hostname,
'PORT': result.port,
'PASSWORD': result.password,
'DB': int((result.path or '0').lstrip('/')),
'OPTIONS': options
}
# This is used for `django-cache-machine`
REDIS_BACKEND = REDIS_LOCATION
REDIS_BACKENDS = {
'master': get_redis_settings(REDIS_LOCATION)
}
# Full path or executable path (relative to $PATH) of the spidermonkey js
# binary. It must be a version compatible with amo-validator.
SPIDERMONKEY = None
# Number of seconds before celery tasks will abort addon validation:
VALIDATOR_TIMEOUT = 110
# Max number of warnings/errors to show from validator. Set to None for no
# limit.
VALIDATOR_MESSAGE_LIMIT = 500
# Feature flags
UNLINK_SITE_STATS = True
# Set to True if we're allowed to use X-SENDFILE.
XSENDFILE = True
XSENDFILE_HEADER = 'X-SENDFILE'
MOBILE_COOKIE = 'mamo'
# If the users's Firefox has a version number greater than this we consider it
# a beta.
MIN_BETA_VERSION = '3.7'
DEFAULT_SUGGESTED_CONTRIBUTION = 5
# Path to `ps`.
PS_BIN = '/bin/ps'
BLOCKLIST_COOKIE = 'BLOCKLIST_v1'
# The maximum file size that is shown inside the file viewer.
FILE_VIEWER_SIZE_LIMIT = 1048576
# The maximum file size that you can have inside a zip file.
FILE_UNZIP_SIZE_LIMIT = 104857600
# How long to delay tasks relying on file system to cope with NFS lag.
NFS_LAG_DELAY = 3
# A whitelist of domains that the authentication script will redirect to upon
# successfully logging in or out.
VALID_LOGIN_REDIRECTS = {
'builder': 'https://builder.addons.mozilla.org',
'builderstage': 'https://builder-addons.allizom.org',
'buildertrunk': 'https://builder-addons-dev.allizom.org',
}
# Elasticsearch
ES_HOSTS = [os.environ.get('ELASTICSEARCH_LOCATION', '127.0.0.1:9200')]
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = {
'default': 'addons',
'stats': 'addons_stats',
}
ES_TIMEOUT = 30
ES_DEFAULT_NUM_REPLICAS = 2
ES_DEFAULT_NUM_SHARDS = 5
# Default AMO user id to use for tasks.
TASK_USER_ID = 4757633
# If this is False, tasks and other jobs that send non-critical emails should
# use a fake email backend.
SEND_REAL_EMAIL = False
STATSD_HOST = 'localhost'
STATSD_PORT = 8125
STATSD_PREFIX = 'amo'
# The django statsd client to use, see django-statsd for more.
STATSD_CLIENT = 'django_statsd.clients.normal'
GRAPHITE_HOST = 'localhost'
GRAPHITE_PORT = 2003
GRAPHITE_PREFIX = 'amo'
GRAPHITE_TIMEOUT = 1
# IP addresses of servers we use as proxies.
KNOWN_PROXIES = []
# Blog URL
DEVELOPER_BLOG_URL = 'http://blog.mozilla.com/addons/feed/'
LOGIN_RATELIMIT_USER = 5
LOGIN_RATELIMIT_ALL_USERS = '15/m'
CSRF_FAILURE_VIEW = 'olympia.amo.views.csrf_failure'
# Testing responsiveness without rate limits.
CELERY_DISABLE_RATE_LIMITS = True
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'olympia.amo.utils.LocalFileStorage'
# This is the signing server for signing fully reviewed files.
SIGNING_SERVER = ''
# This is the signing server for signing preliminary reviewed files.
PRELIMINARY_SIGNING_SERVER = ''
# And how long we'll give the server to respond.
SIGNING_SERVER_TIMEOUT = 10
# Hotfix addons (don't sign those, they're already signed by Mozilla.
HOTFIX_ADDON_GUIDS = ['[email protected]',
'[email protected]']
# Minimum Firefox version for default to compatible addons to be signed.
MIN_D2C_VERSION = '4'
# Minimum Firefox version for not default to compatible addons to be signed.
MIN_NOT_D2C_VERSION = '37'
# True when the Django app is running from the test suite.
IN_TEST_SUITE = False
# The configuration for the client that speaks to solitude.
# A tuple of the solitude hosts.
SOLITUDE_HOSTS = ('',)
# The oAuth key and secret that solitude needs.
SOLITUDE_KEY = ''
SOLITUDE_SECRET = ''
# The timeout we'll give solitude.
SOLITUDE_TIMEOUT = 10
# The OAuth keys to connect to the solitude host specified above.
SOLITUDE_OAUTH = {'key': '', 'secret': ''}
# Temporary flag to work with navigator.mozPay() on devices that don't
# support it natively.
SIMULATE_NAV_PAY = False
# When the dev. agreement gets updated and you need users to re-accept it
# change this date. You won't want to do this for minor format changes.
# The tuple is passed through to datetime.date, so please use a valid date
# tuple. If the value is None, then it will just not be used at all.
DEV_AGREEMENT_LAST_UPDATED = None
# If you want to allow self-reviews for add-ons/apps, then enable this.
# In production we do not want to allow this.
ALLOW_SELF_REVIEWS = False
# Modify the user-agents we check for in django-mobility
# (Android has since changed its user agent).
MOBILE_USER_AGENTS = ('mozilla.+mobile|android|fennec|iemobile|'
'iphone|opera (?:mini|mobi)')
# Credentials for accessing Google Analytics stats.
GOOGLE_ANALYTICS_CREDENTIALS = {}
# Which domain to access GA stats for. If not set, defaults to DOMAIN.
GOOGLE_ANALYTICS_DOMAIN = None
# Used for general web API access.
GOOGLE_API_CREDENTIALS = ''
# Google translate settings.
GOOGLE_TRANSLATE_API_URL = 'https://www.googleapis.com/language/translate/v2'
GOOGLE_TRANSLATE_REDIRECT_URL = (
'https://translate.google.com/#auto/{lang}/{text}')
# Language pack fetcher settings
LANGPACK_OWNER_EMAIL = '[email protected]'
LANGPACK_DOWNLOAD_BASE = 'https://ftp.mozilla.org/pub/mozilla.org/'
LANGPACK_PATH_DEFAULT = '%s/releases/%s/win32/xpi/'
# E.g. https://ftp.mozilla.org/pub/mozilla.org/firefox/releases/23.0/SHA512SUMS
LANGPACK_MANIFEST_PATH = '../../SHA512SUMS'
LANGPACK_MAX_SIZE = 5 * 1024 * 1024 # 5MB should be more than enough
# Basket subscription url for newsletter signups
BASKET_URL = 'https://basket.mozilla.com'
# This saves us when we upgrade jingo-minify (jsocol/jingo-minify@916b054c).
JINGO_MINIFY_USE_STATIC = True
# Whitelist IP addresses of the allowed clients that can post email
# through the API.
WHITELISTED_CLIENTS_EMAIL_API = []
# Allow URL style format override. eg. "?format=json"
URL_FORMAT_OVERRIDE = 'format'
# Add on used to collect stats (!technical dept around!)
ADDON_COLLECTOR_ID = 11950
# Connection to the hive server.
HIVE_CONNECTION = {
'host': 'peach-gw.peach.metrics.scl3.mozilla.com',
'port': 10000,
'user': 'amo_prod',
'password': '',
'auth_mechanism': 'PLAIN',
}
# Static
STATIC_ROOT = path('site-static')
STATIC_URL = '/static/'
JINGO_MINIFY_ROOT = path('static')
STATICFILES_DIRS = (
path('static'),
JINGO_MINIFY_ROOT
)
NETAPP_STORAGE = TMP_PATH
GUARDED_ADDONS_PATH = ROOT + u'/guarded-addons'
# These are key files that must be present on disk to encrypt/decrypt certain
# database fields.
AES_KEYS = {
#'api_key:secret': os.path.join(ROOT, 'path', 'to', 'file.key'),
}
# Time in seconds for how long a JWT auth token created by developers with
# their API key can live. When developers are creating auth tokens they cannot
# set the expiration any longer than this.
MAX_APIKEY_JWT_AUTH_TOKEN_LIFETIME = 60
# django-rest-framework-jwt settings:
JWT_AUTH = {
# Use HMAC using SHA-256 hash algorithm. It should be the default, but we
# want to make sure it does not change behind our backs.
# See https://github.com/jpadilla/pyjwt/blob/master/docs/algorithms.rst
'JWT_ALGORITHM': 'HS256',
# This adds some padding to timestamp validation in case client/server
# clocks are off.
'JWT_LEEWAY': 5,
# Expiration for non-apikey jwt tokens. Since this will be used by our
# frontend clients we want a longer expiration than normal, matching the
# session cookie expiration.
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=SESSION_COOKIE_AGE),
# We don't allow refreshes, instead we simply have a long duration.
'JWT_ALLOW_REFRESH': False,
# Prefix for non-apikey jwt tokens. Should be different from 'JWT' which we
# already used for api key tokens.
'JWT_AUTH_HEADER_PREFIX': 'Bearer',
}
REST_FRAMEWORK = {
# Set this because the default is to also include:
# 'rest_framework.renderers.BrowsableAPIRenderer'
# Which it will try to use if the client accepts text/html.
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'olympia.api.authentication.JSONWebTokenAuthentication',
),
# Enable pagination
'PAGE_SIZE': 25,
}
# This is the DSN to the local Sentry service. It might be overidden in
# site-specific settings files as well.
SENTRY_DSN = os.environ.get('SENTRY_DSN')
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.int64, dtypes.float32,
dtypes.complex64, dtypes.complex128)
# TODO(virimia): Add a benchmark for gather_v2, with batch_dims and axis set.
def _to_str_elements(values):
"""Converts the inner list elements to strings."""
if isinstance(values, list):
return [_to_str_elements(value) for value in values]
else:
return str(values).encode("utf-8")
class GatherTest(test.TestCase, parameterized.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.cached_session():
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in _TEST_TYPES:
for indices in 4, [1, 2, 2, 4, 5]:
with self.subTest(dtype=dtype, indices=indices):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = self.evaluate(gather_t)
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testScalar2D(self):
with self.session():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
with self.subTest(dtype=dtype, axis=axis):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, 2, axis=axis), gather_val)
expected_shape = data.shape[:axis] + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testSimpleTwoD32(self):
with self.session():
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
with self.subTest(dtype=dtype, axis=axis):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, [0, 1, 0, 2], axis=axis),
gather_val)
expected_shape = data.shape[:axis] + (4,) + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testHigherRank(self):
with ops.Graph().as_default():
# We check that scalar and empty indices shapes work as well
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in _TEST_TYPES:
for axis in range(len(shape)):
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.subTest(
indices_shape=indices_shape,
dtype=dtype,
axis=axis,
indices=indices):
tf_params = constant_op.constant(params)
tf_indices = constant_op.constant(indices)
# Check that both positive and negative indices for axis work.
tf_axis = constant_op.constant(axis)
tf_negative_axis = constant_op.constant(-len(shape) + axis)
gather = array_ops.gather(tf_params, tf_indices, axis=tf_axis)
gather_negative_axis = array_ops.gather(
tf_params, tf_indices, axis=tf_negative_axis)
gather_value, gather_negative_axis_value = self.evaluate(
[gather, gather_negative_axis])
gather_np = np.take(params, indices, axis)
self.assertAllEqual(gather_np, gather_value)
self.assertAllEqual(gather_np, gather_negative_axis_value)
expected_shape = (params.shape[:axis] + indices.shape +
params.shape[axis + 1:])
self.assertEqual(expected_shape, gather.shape)
self.assertEqual(expected_shape, gather_negative_axis.shape)
# Test gradients
gather_grad = np.random.randn(
*gather.get_shape().as_list()).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
gather_grad -= 1j * gather_grad
params_grad, indices_grad, axis_grad = gradients_impl.gradients(
gather, [tf_params, tf_indices, tf_axis], gather_grad)
self.assertIsNone(indices_grad)
self.assertIsNone(axis_grad)
if dtype.is_integer:
self.assertIsNone(params_grad)
continue
# For axis 0, we are able to create an efficient IndexedSlices for
# the gradient.
if axis == 0:
self.assertEqual(
type(params_grad), indexed_slices.IndexedSlices)
params_grad = ops.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape).astype(dtype.as_numpy_dtype)
outer_dims = axis
inner_dims = len(shape) - axis - 1
gather_grad = gather_grad.reshape(
shape[:axis] + (indices.size,) + shape[axis + 1:])
for source_index, dest_index in enumerate(indices.flat):
dest_slice = ((slice(None),) * outer_dims + (dest_index,) +
(slice(None),) * inner_dims)
source_slice = ((slice(None),) * outer_dims + (source_index,) +
(slice(None),) * inner_dims)
correct_params_grad[dest_slice] += gather_grad[source_slice]
self.assertAllClose(
correct_params_grad,
self.evaluate(params_grad),
atol=2e-6,
rtol=2e-6)
def testHigherRankGradientTape(self):
# We check that scalar and empty indices shapes work as well
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in _TEST_TYPES:
for axis in range(len(shape)):
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.subTest(
indices_shape=indices_shape,
dtype=dtype,
axis=axis,
indices=indices):
with backprop.GradientTape() as tape:
tf_params = constant_op.constant(params)
tf_indices = constant_op.constant(indices)
# Check that both positive and negative indices for axis work.
tf_axis = constant_op.constant(axis)
tape.watch(tf_params)
tape.watch(tf_indices)
tape.watch(tf_axis)
tf_negative_axis = constant_op.constant(-len(shape) + axis)
gather = array_ops.gather(tf_params, tf_indices, axis=tf_axis)
gather_negative_axis = array_ops.gather(
tf_params, tf_indices, axis=tf_negative_axis)
gather_value, gather_negative_axis_value = self.evaluate(
[gather, gather_negative_axis])
gather_np = np.take(params, indices, axis)
self.assertAllEqual(gather_np, gather_value)
self.assertAllEqual(gather_np, gather_negative_axis_value)
expected_shape = (
params.shape[:axis] + indices.shape + params.shape[axis + 1:])
self.assertEqual(expected_shape, gather.shape)
self.assertEqual(expected_shape, gather_negative_axis.shape)
# Test gradients
gather_grad = np.random.randn(
*gather.get_shape().as_list()).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
gather_grad -= 1j * gather_grad
params_grad, indices_grad, axis_grad = tape.gradient(
gather, [tf_params, tf_indices, tf_axis], gather_grad)
self.assertIsNone(indices_grad)
self.assertIsNone(axis_grad)
if dtype.is_integer:
self.assertIsNone(params_grad)
continue
# For axis 0, we are able to create an efficient IndexedSlices for
# the gradient.
if axis == 0:
self.assertEqual(type(params_grad), indexed_slices.IndexedSlices)
params_grad = ops.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape).astype(dtype.as_numpy_dtype)
outer_dims = axis
inner_dims = len(shape) - axis - 1
gather_grad = gather_grad.reshape(shape[:axis] + (indices.size,) +
shape[axis + 1:])
for source_index, dest_index in enumerate(indices.flat):
dest_slice = ((slice(None),) * outer_dims + (dest_index,) +
(slice(None),) * inner_dims)
source_slice = ((slice(None),) * outer_dims + (source_index,) +
(slice(None),) * inner_dims)
correct_params_grad[dest_slice] += gather_grad[source_slice]
self.assertAllClose(
correct_params_grad,
self.evaluate(params_grad),
atol=2e-6,
rtol=2e-6)
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
self.assertAllEqual([b"qwer", b"uiop"], array_ops.gather(params, 1, axis=0))
self.assertAllEqual([b"asdf", b"qwer"], array_ops.gather(params, 0, axis=1))
def testUInt32AndUInt64(self):
for unsigned_type in (dtypes.uint32, dtypes.uint64):
with self.subTest(unsigned_type=unsigned_type):
params = self._buildParams(
np.array([[1, 2, 3], [7, 8, 9]]), unsigned_type)
with self.cached_session():
self.assertAllEqual([7, 8, 9], array_ops.gather(params, 1, axis=0))
self.assertAllEqual([1, 7], array_ops.gather(params, 0, axis=1))
def testUnknownIndices(self):
# This test is purely a test for placeholder inputs which is only applicable
# in graph mode.
with ops.Graph().as_default():
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
def testUnknownAxis(self):
# This test is purely a test for placeholder inputs which is only applicable
# in graph mode.
with ops.Graph().as_default():
params = constant_op.constant([[0, 1, 2]])
indices = constant_op.constant([[0, 0], [0, 0]])
axis = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
# Rank 2 params with rank 2 indices results in a rank 3 shape.
self.assertEqual([None, None, None], gather_t.shape.as_list())
# If indices is also unknown the result rank is unknown.
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
self.assertEqual(None, gather_t.shape)
def testBadIndicesType(self):
with self.assertRaisesRegex(
(TypeError, errors.InvalidArgumentError),
"float.* not in.* list of allowed values: int16, int32, int64"):
self.evaluate(array_ops.gather([0], 0.))
@test_util.disable_xla(
"Assertion inside an op is not supported in XLA. Instead XLA clamps the "
"index to be in bounds and returns the indexed value there (Don't rely "
"on this behavior).")
def testBadIndicesCPU(self):
with test_util.force_cpu():
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
self.evaluate(array_ops.gather(params, [[7]], axis=0))
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
self.evaluate(array_ops.gather(params, [[7]], axis=1))
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session():
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
array_ops.gather(params, [[7]], axis=0).eval()
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
array_ops.gather(params, [[7]], axis=1).eval()
def testBadAxis(self):
@def_function.function(autograph=False, jit_compile=False)
def gather(x, indices, axis):
return array_ops.gather(x, indices, axis=axis)
@def_function.function(
autograph=False,
jit_compile=False,
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
] * 3)
def gather_shape_inf_disabled(x, indices, axis):
return array_ops.gather(x, indices, axis=axis)
@def_function.function(
autograph=False,
jit_compile=True,
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
] * 3)
def xla_gather(x, indices, axis):
return array_ops.gather(x, indices, axis=axis)
params = [0, 1, 2]
indices = 0
functions = [("array_ops.gather", array_ops.gather), ("gather", gather),
("gather_shape_inf_disabled", gather_shape_inf_disabled),
("xla_gather", xla_gather)]
for bad_axis in (1, 2, -2):
for fn_name, fn in functions:
# Shape inference can validate axis for known params rank.
with self.subTest(bad_axis=bad_axis, msg=fn_name, fn=fn):
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Shape must be at least rank .* but is rank 1"):
fn(params, indices, axis=bad_axis)
def testEmptySlices(self):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
# Leading axis gather.
with self.subTest(dtype=dtype, itype=itype):
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
gather = array_ops.gather(params, indices, axis=0)
self.assertAllEqual(gather, np.zeros((2, 0, 0)))
# Middle axis gather.
params = np.zeros((0, 7, 0), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=1)
self.assertAllEqual(gather, np.zeros((0, 2, 0)))
# Trailing axis gather.
params = np.zeros((0, 0, 7), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=2)
self.assertAllEqual(gather, np.zeros((0, 0, 2)))
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)
dict( # 2D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[2, 1], [0, 3]],
expected=[[8, 7], [6, 9]]),
dict( # 3D indices
batch_dims=0,
params=[6, 7, 8, 9],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[9, 7], [8, 6]], [[6, 9], [8, 8]]]),
dict( # 4D indices
batch_dims=0,
params=[8, 9],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
# batch_dims=indices.shape.ndims - 1
# (equivalent to tf.compat.v1.batch_gather)
dict( # 2D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=2,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
dict( # 2D indices (1 batch dim)
batch_dims=-1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[2, 1], [0, 3]],
expected=[[12, 11], [20, 23]]),
dict( # 3D indices (2 batch dims)
batch_dims=-1,
params=[[[100, 101], [110, 111]], [[200, 201], [210, 211]]],
indices=[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
expected=[[[100, 101], [111, 110]], [[200, 200], [211, 211]]]),
# batch_dims=indices.shape.ndims
dict( # 1D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[2, 1],
expected=[12, 21]),
dict( # 2D indices (2 batch dim)
batch_dims=2,
params=[[[100, 101, 102, 103], [110, 111, 112, 113]],
[[200, 201, 202, 203], [210, 211, 212, 213]]],
indices=[[2, 1], [0, 3]],
expected=[[102, 111], [200, 213]]),
# 0 < batch_dims < indices.shape.ndims - 1
dict( # 3D indices (1 batch dim)
batch_dims=1,
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
indices=[[[3, 1], [2, 0]], [[0, 3], [2, 2]]],
expected=[[[13, 11], [12, 10]], [[20, 23], [22, 22]]]),
dict( # 4D indices (1 batch dim)
batch_dims=1,
params=[[6, 7], [8, 9]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[6, 7], [7, 6]], [[6, 6], [7, 7]]],
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
dict( # 4D indices (2 batch dims)
batch_dims=2,
params=[[[2, 3], [4, 5]], [[6, 7], [8, 9]]],
indices=[[[[0, 1], [1, 0]], [[0, 0], [1, 1]]],
[[[1, 1], [0, 0]], [[0, 1], [1, 0]]]],
expected=[[[[2, 3], [3, 2]], [[4, 4], [5, 5]]],
[[[7, 7], [6, 6]], [[8, 9], [9, 8]]]]),
# axis > 0
dict( # 3D indices, batch_dims=1, axis=2
# params.shape = [I1, J1, J2] = [2, 2, 3]
# indices.shape = [I1, K1, K2] = [2, 1, 5]
# result.shape = [I1, J1, K1, K2] = [2, 2, 1, 5]
batch_dims=1,
axis=2,
params=[[[10, 11, 12], [13, 14, 15]], [[20, 21, 22], [23, 24, 25]]],
indices=[[[0, 1, 2, 1, 0]], [[0, 1, 2, 1, 0]]],
expected=[[[[10, 11, 12, 11, 10]], [[13, 14, 15, 14, 13]]],
[[[20, 21, 22, 21, 20]], [[23, 24, 25, 24, 23]]]]),
dict( # 3D indices, batch_dims=None, axis=1
batch_dims=None,
axis=1,
params=[[10, 11, 12], [13, 14, 15]],
indices=[1, 0],
expected=[[11, 10], [14, 13]]),
dict( # 3D indices, batch_dims=-3, axis=1
batch_dims=-3,
axis=1,
params=[[0, 1, 2], [3, 4, 5]],
indices=[[[0, 1], [1, 0]]],
expected=[[[[0, 1], [1, 0]]], [[[3, 4], [4, 3]]]]),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDims(self, params, indices, batch_dims, expected=None,
axis=None):
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
# Test gradients
f64_params = math_ops.cast(params, dtypes.float64)
def gather(params):
return array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
theoretical, numerical = gradient_checker_v2.compute_gradient(
gather, [f64_params])
self.assertAllClose(theoretical, numerical)
# Test gradients when input shapes are unknown
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float64),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
])
def gather_unknown_shapes(params, indices):
return array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
if batch_dims is None or batch_dims >= 0:
theoretical, numerical = gradient_checker_v2.compute_gradient(
lambda p: gather_unknown_shapes(p, indices), [f64_params])
self.assertAllClose(theoretical, numerical)
else:
with self.assertRaisesRegex(
ValueError,
"Currently, it is unsupported to take the gradient of tf.gather"):
gradient_checker_v2.compute_gradient(
lambda p: gather_unknown_shapes(p, indices), [f64_params])
# Test the gradients shape.
with backprop.GradientTape() as tape:
zeros = array_ops.zeros_like(params, dtype=dtypes.float32)
tape.watch(zeros)
values = zeros * 2 + zeros
result = array_ops.gather(
values, indices, axis=axis, batch_dims=batch_dims)
gradients = tape.gradient(result, zeros)
self.assertAllEqual(array_ops.shape(params), array_ops.shape(gradients))
# Run the same test for strings.
params = _to_str_elements(params)
expected = _to_str_elements(expected)
result = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=2,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=4,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=5,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-4,
output_shape=[2, 3, 8, 9, 10, 5, 6, 7]
# = params.shape[:2] + indices.shape[2:] + params.shape[3:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-3,
output_shape=[2, 3, 4, 8, 9, 10, 6, 7]
# = params.shape[:3] + indices.shape[2:] + params.shape[4:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-2,
output_shape=[2, 3, 4, 5, 8, 9, 10, 7]
# = params.shape[:4] + indices.shape[2:] + params.shape[5:]
),
dict(
params_shape=[2, 3, 4, 5, 6, 7],
indices_shape=[2, 3, 8, 9, 10],
batch_dims=2,
axis=-1,
output_shape=[2, 3, 4, 5, 6, 8, 9, 10]
# = params.shape[:5] + indices.shape[2:] + params.shape[6:]
),
])
@test_util.run_in_graph_and_eager_modes
def testBatchDimsMatchesPythonBatching(self, params_shape, indices_shape,
batch_dims, axis, output_shape):
"""Checks that batch_dims matches multiple calls to tf.gather()."""
# Generate a `params` tensor with the indicated shape.
params_size = np.prod(params_shape)
params = np.reshape(np.arange(params_size), params_shape)
# Generate an `indices` tensor with the indicated shape, where each index
# is within the appropriate range.
indices_size = np.prod(indices_shape)
indices = np.reshape(np.arange(indices_size), indices_shape)
indices = indices % params_shape[axis]
# Perform repeated (batched) gather operations with numpy, to find the
# expected result.
expected = self._batchNumpyGather(params, indices, axis, batch_dims)
# On Windows, we get an exception if we pass in the transformed numpy
# arrays ("Failed to convert numpy ndarray to a Tensor (Unsupported
# feed type)."); so convert them back to lists before calling tf.gather.
params = params.tolist()
indices = indices.tolist()
result = array_ops.gather(params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
# Run the same test for strings.
params = _to_str_elements(params)
expected = _to_str_elements(expected.tolist())
result = array_ops.gather(
params, indices, axis=axis, batch_dims=batch_dims)
self.assertAllEqual(output_shape, result.shape.as_list())
self.assertAllEqual(expected, result)
def _batchNumpyGather(self, params, indices, axis, batch_dims):
"""Performs a batch gather by making recursive calls to np.take().
This is used by testBatchDims() to construct the expected value.
Args:
params: A numpy array
indices: A numpy array
axis: An integer
batch_dims: An integer
Returns:
A numpy array
"""
if batch_dims == 0:
return np.take(params, indices, axis=axis)
self.assertEqual(params.shape[0], indices.shape[0])
if axis > 0:
axis -= 1
return np.stack([
self._batchNumpyGather(params[i], indices[i], axis, batch_dims - 1)
for i in range(params.shape[0])
])
@test_util.run_v1_only("RefVariable is not supported in v2")
def testGatherRefVariable(self):
with self.cached_session():
v = variables.RefVariable(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather(v, [0, 2])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("GatherV2", gather.op.name)
self.assertAllEqual([[1, 2], [5, 6]], gather)
@test_util.run_in_graph_and_eager_modes
def testGatherResourceVariable(self):
with self.cached_session():
v = resource_variable_ops.ResourceVariable(
constant_op.constant([[1, 2], [3, 4], [5, 6]]))
self.evaluate(variables.global_variables_initializer())
gather = array_ops.gather(v, [0, 2])
if not context.executing_eagerly(): # .op doesn't make sense in Eager
self.assertEqual("ResourceGather", gather.op.inputs[0].op.type)
self.assertAllEqual([[1, 2], [5, 6]], gather)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import os
import plistlib
import shutil
import tempfile
import xml.parsers.expat
from telemetry.core import os_version
from telemetry import decorators
from telemetry.internal.platform import power_monitor
import py_utils
# TODO: rename this class (seems like this is used by mac)
class PowerMetricsPowerMonitor(power_monitor.PowerMonitor):
def __init__(self, backend):
super(PowerMetricsPowerMonitor, self).__init__()
self._powermetrics_process = None
self._backend = backend
self._output_filename = None
self._output_directory = None
@property
def binary_path(self):
return '/usr/bin/powermetrics'
def StartMonitoringPower(self, browser):
self._CheckStart()
# Empirically powermetrics creates an empty output file immediately upon
# starting. We detect file creation as a signal that measurement has
# started. In order to avoid various race conditions in tempfile creation
# we create a temp directory and have powermetrics create it's output
# there rather than say, creating a tempfile, deleting it and reusing its
# name.
self._output_directory = tempfile.mkdtemp()
self._output_filename = os.path.join(self._output_directory,
'powermetrics.output')
args = ['-f', 'plist',
'-u', self._output_filename,
'-i0',
'--show-usage-summary']
self._powermetrics_process = self._backend.LaunchApplication(
self.binary_path, args, elevate_privilege=True)
# Block until output file is written to ensure this function call is
# synchronous in respect to powermetrics starting.
def _OutputFileExists():
return os.path.isfile(self._output_filename)
py_utils.WaitFor(_OutputFileExists, 1)
@decorators.Cache
def CanMonitorPower(self):
mavericks_or_later = (
self._backend.GetOSVersionName() >= os_version.MAVERICKS)
binary_path = self.binary_path
return mavericks_or_later and self._backend.CanLaunchApplication(
binary_path)
@staticmethod
def _ParsePlistString(plist_string):
"""Wrapper to parse a plist from a string and catch any errors.
Sometimes powermetrics will exit in the middle of writing it's output,
empirically it seems that it always writes at least one sample in it's
entirety so we can safely ignore any errors in it's output.
Returns:
Parser output on successful parse, None on parse error.
"""
try:
return plistlib.readPlistFromString(plist_string)
except xml.parsers.expat.ExpatError:
return None
@staticmethod
def ParsePowerMetricsOutput(powermetrics_output):
"""Parse output of powermetrics command line utility.
Returns:
Dictionary in the format returned by StopMonitoringPower() or None
if |powermetrics_output| is empty - crbug.com/353250 .
"""
if len(powermetrics_output) == 0:
logging.warning('powermetrics produced zero length output')
return {}
# Container to collect samples for running averages.
# out_path - list containing the key path in the output dictionary.
# src_path - list containing the key path to get the data from in
# powermetrics' output.
def ConstructMetric(out_path, src_path):
RunningAverage = collections.namedtuple('RunningAverage', [
'out_path', 'src_path', 'samples'])
return RunningAverage(out_path, src_path, [])
# List of RunningAverage objects specifying metrics we want to aggregate.
metrics = [
ConstructMetric(
['platform_info', 'average_frequency_hz'],
['processor', 'freq_hz']),
ConstructMetric(
['platform_info', 'idle_percent'],
['processor', 'packages', 0, 'c_state_ratio'])]
def DataWithMetricKeyPath(metric, powermetrics_output):
"""Retrieve the sample from powermetrics' output for a given metric.
Args:
metric: The RunningAverage object we want to collect a new sample for.
powermetrics_output: Dictionary containing powermetrics output.
Returns:
The sample corresponding to |metric|'s keypath."""
# Get actual data corresponding to key path.
out_data = powermetrics_output
for k in metric.src_path:
out_data = out_data[k]
assert type(out_data) in [int, float], (
'Was expecting a number: %s (%s)' % (type(out_data), out_data))
return float(out_data)
sample_durations = []
total_energy_consumption_mwh = 0
# powermetrics outputs multiple plists separated by null terminators.
raw_plists = powermetrics_output.split('\0')
raw_plists = [x for x in raw_plists if len(x) > 0]
assert len(raw_plists) == 1
# -------- Examine contents of first plist for systems specs. --------
plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
if not plist:
logging.warning('powermetrics produced invalid output, output length: '
'%d', len(powermetrics_output))
return {}
# Powermetrics doesn't record power usage when running on a VM.
hw_model = plist.get('hw_model')
if hw_model and hw_model.startswith('VMware'):
return {}
if 'GPU' in plist:
metrics.extend([
ConstructMetric(
['component_utilization', 'gpu', 'average_frequency_hz'],
['GPU', 0, 'freq_hz']),
ConstructMetric(
['component_utilization', 'gpu', 'idle_percent'],
['GPU', 0, 'c_state_ratio'])])
# There's no way of knowing ahead of time how many cpus and packages the
# current system has. Iterate over cores and cpus - construct metrics for
# each one.
if 'processor' in plist:
core_dict = plist['processor']['packages'][0]['cores']
num_cores = len(core_dict)
cpu_num = 0
for core_idx in xrange(num_cores):
num_cpus = len(core_dict[core_idx]['cpus'])
base_src_path = ['processor', 'packages', 0, 'cores', core_idx]
for cpu_idx in xrange(num_cpus):
base_out_path = ['component_utilization', 'cpu%d' % cpu_num]
# C State ratio is per-package, component CPUs of that package may
# have different frequencies.
metrics.append(ConstructMetric(
base_out_path + ['average_frequency_hz'],
base_src_path + ['cpus', cpu_idx, 'freq_hz']))
metrics.append(ConstructMetric(
base_out_path + ['idle_percent'],
base_src_path + ['c_state_ratio']))
cpu_num += 1
# -------- Parse Data Out of Plists --------
plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
if not plist:
logging.error('Error parsing plist.')
return {}
# Duration of this sample.
sample_duration_ms = int(plist['elapsed_ns']) / 10 ** 6
sample_durations.append(sample_duration_ms)
if 'processor' not in plist:
logging.error("'processor' field not found in plist.")
return {}
processor = plist['processor']
total_energy_consumption_mwh = (
(float(processor.get('package_joules', 0)) / 3600.) * 10 ** 3)
for m in metrics:
try:
m.samples.append(DataWithMetricKeyPath(m, plist))
except KeyError:
# Old CPUs don't have c-states, so if data is missing, just ignore it.
logging.info('Field missing from powermetrics output: %s', m.src_path)
continue
# -------- Collect and Process Data --------
out_dict = {}
out_dict['identifier'] = 'powermetrics'
out_dict['energy_consumption_mwh'] = total_energy_consumption_mwh
def StoreMetricAverage(metric, sample_durations, out):
"""Calculate average value of samples in a metric and store in output
path as specified by metric.
Args:
metric: A RunningAverage object containing samples to average.
sample_durations: A list which parallels the samples list containing
the time slice for each sample.
out: The output dicat, average is stored in the location specified by
metric.out_path.
"""
if len(metric.samples) == 0:
return
assert len(metric.samples) == len(sample_durations)
avg = 0
for i in xrange(len(metric.samples)):
avg += metric.samples[i] * sample_durations[i]
avg /= sum(sample_durations)
# Store data in output, creating empty dictionaries as we go.
for k in metric.out_path[:-1]:
if not out.has_key(k):
out[k] = {}
out = out[k]
out[metric.out_path[-1]] = avg
for m in metrics:
StoreMetricAverage(m, sample_durations, out_dict)
if 'tasks' not in plist:
logging.error("'tasks' field not found in plist.")
return {}
# The following CPU metrics are already time-normalized, and segmented by
# process. Sum the metrics across all Chrome processes.
cputime = 0
energy_impact = 0
browser_process_count = 0
idle_wakeups = 0
for task in plist['tasks']:
if 'Chrome' in task['name'] or 'Chromium' in task['name']:
if 'Helper' not in task['name']:
browser_process_count += 1
cputime += float(task['cputime_ms_per_s'])
energy_impact += float(task.get('energy_impact', 0))
idle_wakeups += float(task['idle_wakeups_per_s'])
if browser_process_count == 0:
logging.warning('No Chrome or Chromium browser process found with '
'powermetrics. Chrome CPU metrics will not be emitted.')
return {}
elif browser_process_count >= 2:
logging.warning('powermetrics found more than one Chrome or Chromium '
'browser. Chrome CPU metrics will not be emitted.')
# During Telemetry unit tests, there may be multiple Chrome browsers
# present. Don't add cpu metrics, but don't return {} either.
else: # browser_process_count == 1:
chrome_dict = {}
chrome_dict['cputime_ms_per_s'] = cputime
chrome_dict['energy_impact'] = energy_impact
chrome_dict['idle_wakeups_per_s'] = idle_wakeups
out_dict['component_utilization']['chrome'] = chrome_dict
return out_dict
def _KillPowerMetricsProcess(self):
"""Kill a running powermetrics process."""
try:
if self._powermetrics_process.poll() is None:
self._powermetrics_process.terminate()
except OSError as e:
logging.warning(
'Error when trying to terminate powermetric process: %s', repr(e))
if self._powermetrics_process.poll() is None:
# terminate() can fail when Powermetrics does not have the SetUID set.
self._backend.LaunchApplication(
'/usr/bin/pkill',
['-SIGTERM', os.path.basename(self.binary_path)],
elevate_privilege=True)
def StopMonitoringPower(self):
self._CheckStop()
# Tell powermetrics to take an immediate sample.
try:
self._KillPowerMetricsProcess()
(power_stdout, power_stderr) = self._powermetrics_process.communicate()
returncode = self._powermetrics_process.returncode
assert returncode in [0, -15], (
"""powermetrics error
return code=%d
stdout=(%s)
stderr=(%s)""" % (returncode, power_stdout, power_stderr))
with open(self._output_filename, 'rb') as output_file:
powermetrics_output = output_file.read()
return PowerMetricsPowerMonitor.ParsePowerMetricsOutput(
powermetrics_output)
except Exception as e:
logging.warning(
'Error when trying to collect power monitoring data: %s', repr(e))
return PowerMetricsPowerMonitor.ParsePowerMetricsOutput('')
finally:
shutil.rmtree(self._output_directory)
self._output_directory = None
self._output_filename = None
self._powermetrics_process = None
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""*Experimental* support for running Keras models on the TPU.
To use, wrap your model with the `keras_support.tpu_model` function.
Example usage:
```
image = tf.keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3))( image)
flattened = tf.keras.layers.Flatten()(c1)
logits = tf.keras.layers.Dense(10, activation='softmax')(flattened)
model = tf.keras.Model(inputs=[image], outputs=[logits])
strategy = keras_support.TPUDistributionStrategy(num_cores_per_host=8)
model = keras_support.tpu_model(model,
strategy=strategy,
tpu_name_or_address=tpu_name)
# Only TF optimizers are currently supported.
model.compile(optimizer=tf.train.AdamOptimizer(), ...)
# `images` and `labels` should be Numpy arrays. Support for tensor input
# (e.g. datasets) is planned.
model.fit(images, labels)
```
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import re
import sys
import time
import numpy as np
from tensorflow.contrib.cluster_resolver.python.training import tpu_cluster_resolver
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers as keras_optimizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
TPUDistributionStrategy = tpu_strategy.TPUStrategy # pylint: disable=invalid-name
class TPUEmbedding(embeddings.Embedding):
"""TPU compatible embedding layer.
The default Keras layer is not TPU compatible. This layer is a drop-in
replacement: it has the same behavior and will work on CPU and GPU devices.
"""
def build(self, input_shape):
if input_shape[0] is None:
raise ValueError(
'TPUEmbeddings must have a fixed input_length or input shape.')
return super(TPUEmbedding, self).build(input_shape)
def call(self, inputs):
if K.dtype(inputs) != 'int32':
inputs = math_ops.cast(inputs, 'int32')
inputs = array_ops.one_hot(inputs, self.input_dim)
return math_ops.tensordot(inputs, self.embeddings, 1)
class KerasCrossShardOptimizer(keras_optimizers.Optimizer):
"""An optimizer that averages gradients across TPU shards."""
def __init__(self, opt, name='KerasCrossShardOptimizer'):
"""Construct a new cross-shard optimizer.
Args:
opt: An existing `Optimizer` to encapsulate.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "KerasCrossShardOptimizer".
Raises:
ValueError: If reduction is not a valid cross-shard reduction.
"""
super(KerasCrossShardOptimizer, self).__init__()
self._name = name
self._opt = opt
def get_updates(self, loss, params):
logging.info('Get updates: %s', loss)
self._opt.get_gradients = self.get_gradients
return self._opt.get_updates(loss, params)
def get_gradients(self, loss, params):
num_shards = tpu_function.get_tpu_context().number_of_shards
grads = super(KerasCrossShardOptimizer, self).get_gradients(loss, params)
return [tpu_ops.cross_replica_sum(grad) / num_shards for grad in grads]
def set_weights(self, weights):
self._opt.set_weights()
def get_weights(self):
return self._opt.get_weights()
@property
def lr(self):
return self._opt.lr
class TPUModelOp(
collections.namedtuple('TPUModelOp', [
'compile_op', 'execute_op', 'infeed_tensors', 'infeed_op', 'outfeed_op'
])):
pass
def _valid_name(tensor_name):
"""Return a valid tensor name (strips '/', ':', etc)."""
return re.sub('[^a-zA-Z0-9_-]+', '', tensor_name)
def _replicated_optimizer(opt):
"""Wrap the optimizer `opt` with CrossShardOptimizer if applicable."""
if tpu_function.get_tpu_context().number_of_shards == 1:
return opt
if isinstance(opt, keras_optimizers.TFOptimizer):
return tpu_optimizer.CrossShardOptimizer(opt.optimizer)
else:
return KerasCrossShardOptimizer(opt)
class TPURewriteContext(object):
"""Prepare the environment for a Keras model during `tpu.rewrite`.
This overrides the default placeholder behaviour to instead refer to a preset
input mapping. Placeholders are unsupported in TPU compiled code, and must
be replaced with explicit inputs or values from the infeed queue.
Instead of explicitly threading inputs all the way through the Keras codebase,
we override the behavior of the placeholder while compiling and inject the
Tensors from the infeed in place of the placeholder.
Similarly, as we compile a new sub-graph for each unique shape and execution
mode, we need to override the behavior of an embedded `name_scope` call in
the base Keras layer code. This allows us to re-use the same weights across
many compiles and share a single session/graph.
"""
def __init__(self, input_map):
self._input_map = input_map
self._default_placeholder = None
self._default_name_scope = None
def __enter__(self):
def _placeholder(dtype, shape=None, name=None): # pylint: disable=unused-argument
logging.info('Remapping placeholder for %s', name)
if name in self._input_map:
return self._input_map[name]
else:
logging.info('Default: %s', name)
return self._default_placeholder(dtype, shape, name)
def _name_scope(name, default_name=None, values=None):
caller_frame = sys._getframe().f_back
caller_obj = caller_frame.f_locals.get('self')
if (caller_obj is not None and
isinstance(caller_obj, base_layer.Layer) and name is not None):
return variable_scope.variable_scope(
name, default_name, values, reuse=variable_scope.AUTO_REUSE)
return self._default_name_scope(name, default_name, values)
self._default_placeholder = array_ops.placeholder
self._default_name_scope = ops.name_scope
self._default_make_variable = base_layer.make_variable
self._default_random_normal = random_ops.random_normal
self._default_qr = gen_linalg_ops.qr
array_ops.placeholder = _placeholder
# Replace random_ops.random_normal with a dummy function because
# `random_normal` isn't yet implemented on the TPU. Because these
# initialized values are overwritten by the CPU values, this is okay.
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
del mean
del stddev
del seed
return array_ops.zeros(shape, dtype=dtype, name=name)
random_ops.random_normal = random_normal
# Replace gen_linalg_ops.qr because QR decomposition is not yet implemented.
# TODO(saeta): Remove qr override once we confirm the qr implementation is
# ok.
# pylint: disable=redefined-builtin
def qr(input, full_matrices=False, name=None):
"""Dummy implementation of qr decomposition."""
del full_matrices # TODO(saeta): Properly handle the full matrix case.
input_shape = input.shape
if len(input_shape) < 2:
raise ValueError('Invalid shape passed to qr: %s' % input_shape)
p = min(input_shape[-1], input_shape[-2])
if len(input_shape) == 2:
q = array_ops.zeros((p, p), name=name)
r = array_ops.zeros(input_shape, name=name)
return (r, q)
elif len(input_shape) == 3:
n = input_shape[0]
q = array_ops.zeros((n, p, p), name=name)
r = array_ops.zeros(input_shape, name=name)
return (r, q)
else:
raise ValueError('Invalid shape passed to qr: %s' % input_shape)
gen_linalg_ops.qr = qr
ops.name_scope = _name_scope
base_layer.make_variable = variable_scope.get_variable
logging.info('Overriding default placeholder.')
return
def __exit__(self, exc_type, exc_val, exc_tb):
array_ops.placeholder = self._default_placeholder
ops.name_scope = self._default_name_scope
base_layer.make_variable = self._default_make_variable
random_ops.random_normal = self._default_random_normal
gen_linalg_ops.qr = self._default_qr
class SizedInfeed(collections.namedtuple('SizedInfeed',
['sharded_infeed_tensors',
'infeed_ops'])):
"""Represents an instantiation of the infeed ops for a concrete input shape.
sharded_infeed_tensors: A data structure of Tensors used to represent the
placeholder tensors that must be fed when using feed_dicts.
infeed_ops: the set of ops that will be run to drive infeed for a single step.
"""
pass
class TPUInfeedInstance(object):
"""TPUInfeedInstance represents the logic to manage feeding in a single step.
See the comments on the `TPUInfeedManager` for a description for how infeed
is managed.
"""
@abc.abstractmethod
def make_input_specs(self, input_tensors):
"""Constructs the infeed_specs for the given Infeed instance.
Args:
input_tensors: The inputs to the model.
Returns:
A list of
"""
pass
def make_feed_dict(self, tpu_model_op):
"""Constructs a feed_dict for this instance, given the tpu_model_op.
Args:
tpu_model_op: A `TPUModelOp` representing the TPU Model for this
instance's input spec.
Returns:
A dictionary to use as the feed_dict of a `session.run` call.
"""
pass
class TPUInfeedManager(object):
"""TPUInfeedManager manages the data infeeding of data to a TPU computation.
Because there are multiple data sources (e.g. in-memory NumPy arrays,
`tf.data.Dataset`s), we abstract the different logic behind a single
interface: the `TPUInfeedManager`.
(1) A `TPUFunction` is called with a set of inputs. Based on the inputs,
`TPUFunction` retrieves the corresponding `TPUInfeedManager` (or constructs a
new one if required).
(2) The `TPUFunction` calls `make_infeed_instance` on the `TPUInfeedManager`
which returns a `TPUInfeedInstance`.
(3) The `TPUFunction` checks in the shape cache for a pre-compiled instance of
the model based on the returned `input_specs` from `TPUInfeedInstance`.
(4) [Optional.] If the model has not already been instantiated for the given
input spec, the `TPUFunction` compiles the model for the input spec (using the
`TPUInfeedManager`).
(5) The `TPUInfeedInstance` constructs the session.run's feed_dict given the
compiled model instance corresponding to its shape.
"""
@abc.abstractmethod
def make_infeed_instance(self, inputs):
"""Given a single step's input, construct a `TPUInfeedInstance`.
Args:
inputs: The inputs to a given step.
Returns:
A subclass of `TPUInfeedInstance`.
"""
pass
@abc.abstractmethod
def build_infeed_from_input_specs(self, input_specs, execution_mode):
"""For a given input specification (size, type), construct the infeed ops.
This is called only once for a given input specification and builds the
graph ops. It does not have a pointer to the actual infeed data.
Args:
input_specs: TODO(saeta): Document me!
execution_mode: TODO(saeta): Document me!
Returns:
A `SizedInfeed` instance.
"""
pass
class TPUNumpyInfeedManager(TPUInfeedManager):
"""TPU Infeed manager for Numpy inputs."""
class NumpyInfeedInstance(TPUInfeedInstance):
"""Infeed instance for Numpy inputs."""
def __init__(self, sharded_inputs):
self._sharded_inputs = sharded_inputs
def make_input_specs(self, input_tensors):
# Compute an input specification (used to generate infeed enqueue and
# dequeue operations). We use the shape from our input array and the
# dtype from our model. A user may pass in a float64 for a float32
# input: for model compatibility we still must generate a float32 infeed.
input_specs = []
# We use the shape and dtype from the first shard to compute the input
# metadata (`input_specs`); all replicas have the same type and shape.
for tensor, ary in zip(input_tensors, self._sharded_inputs[0]):
input_specs.append(
tensor_spec.TensorSpec(ary.shape, tensor.dtype,
_valid_name(tensor.name)))
return input_specs
def make_feed_dict(self, tpu_model_op):
infeed_dict = {}
for infeed_tensors, inputs in zip(tpu_model_op.infeed_tensors,
self._sharded_inputs):
for tensor, value in zip(infeed_tensors, inputs):
infeed_dict[tensor] = value
return infeed_dict
def __init__(self, distribution_strategy):
self._strategy = distribution_strategy
def _split_tensors(self, inputs):
"""Split input data across shards.
Each input is sliced along the batch axis.
Args:
inputs: List of Numpy arrays to run on the TPU.
Returns:
List of lists containing the input to feed to each TPU shard.
"""
if self._strategy.num_towers == 1:
return [inputs]
batch_size = inputs[0].shape[0]
assert batch_size % self._strategy.num_towers == 0, (
'batch_size must be divisible by strategy.num_towers (%s vs %s)' %
(batch_size, self._strategy.num_towers))
shard_size = batch_size // self._strategy.num_towers
input_list = []
for index in range(self._strategy.num_towers):
shard_inputs = [
x[index * shard_size:(index + 1) * shard_size] for x in inputs
]
input_list.append(shard_inputs)
return input_list
def make_infeed_instance(self, inputs):
sharded_inputs = self._split_tensors(inputs)
return self.NumpyInfeedInstance(sharded_inputs)
def build_infeed_from_input_specs(self, input_specs, execution_mode):
infeed_op = []
shard_infeed_tensors = []
for shard_id in range(self._strategy.num_towers):
with ops.device('/device:CPU:0'):
infeed_tensors = []
with ops.device('/device:TPU:%d' % shard_id):
for spec in input_specs:
# Construct placeholders for each of the inputs.
infeed_tensors.append(
array_ops.placeholder(
dtype=spec.dtype,
shape=spec.shape,
name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
shard_infeed_tensors.append(infeed_tensors)
infeed_op.append(
tpu_ops.infeed_enqueue_tuple(
infeed_tensors, [spec.shape for spec in input_specs],
name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
device_ordinal=shard_id))
return SizedInfeed(infeed_ops=infeed_op,
sharded_infeed_tensors=shard_infeed_tensors)
class TPUDatasetInfeedManager(TPUInfeedManager):
"""Manages infeed for a `tf.data.Dataset` into a TPU computation.
"""
class DatasetInfeedInstance(TPUInfeedInstance):
"""An instance of the TPU infeed."""
def __init__(self, input_specs):
self._input_specs = input_specs
def make_input_specs(self, input_tensors):
# TODO(saeta): Do error checking here!
return self._input_specs
def make_feed_dict(self, tpu_model_op):
# TODO(saeta): Verify tpu_model_op is as expected!
return {}
def __init__(self, dataset, distribution_strategy, tpu_session):
"""Constructs a TPUDatasetInfeedManager.
Must be called within a `KerasTPUModel.tpu_session` context!
Args:
dataset: A `tf.data.Dataset` to infeed.
distribution_strategy: The `TPUDistributionStrategy` used to configure the
Keras TPU model.
tpu_session: The `tf.Session` object used for running the TPU model.
"""
self._verify_dataset_shape(dataset)
self._dataset = dataset
self._strategy = distribution_strategy
dummy_x_shape = dataset.output_shapes[0].as_list()
dummy_x_shape[0] *= distribution_strategy.num_towers
dummy_y_shape = dataset.output_shapes[1].as_list()
dummy_y_shape[0] *= distribution_strategy.num_towers
self._iterator = dataset.make_initializable_iterator()
tpu_session.run(self._iterator.initializer)
self._get_next_ops = []
ctrl_deps = []
for i in range(distribution_strategy.num_towers):
with ops.control_dependencies(ctrl_deps): # Ensure deterministic
# TODO(saeta): Ensure correct placement!
get_next_op = self._iterator.get_next()
self._get_next_ops.append(get_next_op)
ctrl_deps.extend(get_next_op)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
self._dummy_x = np.zeros(dummy_x_shape,
dtype=dataset.output_types[0].as_numpy_dtype)
self._dummy_y = np.zeros(dummy_y_shape,
dtype=dataset.output_types[1].as_numpy_dtype)
input_specs = []
if isinstance(self._iterator.output_shapes, tuple):
assert isinstance(self._iterator.output_types, tuple)
assert len(self._iterator.output_shapes) == len(
self._iterator.output_types)
for i in range(len(self._iterator.output_shapes)):
spec = tensor_spec.TensorSpec(self._iterator.output_shapes[i],
self._iterator.output_types[i])
input_specs.append(spec)
elif isinstance(self._iterator.output_shapes, tensor_shape.TensorShape):
spec = tensor_spec.TensorSpec(self._iterator.output_shapes,
self._iterator.output_types)
input_specs.append(spec)
self._infeed_instance = self.DatasetInfeedInstance(input_specs)
def _verify_dataset_shape(self, dataset):
"""Verifies a dataset is of an appropriate shape for TPUs."""
if not isinstance(dataset, dataset_ops.Dataset):
raise ValueError('The function passed as the `x` parameter did not '
'return a `tf.data.Dataset`.')
if not isinstance(dataset.output_classes, tuple):
raise ValueError('The dataset must return a tuple of tf.Tensors, '
'instead it returns: %s' % dataset.output_classes)
if len(dataset.output_classes) != 2:
raise ValueError(
'The dataset must return a 2-element tuple, got '
'%s output classes instead.' % (dataset.output_classes,))
for i, cls in enumerate(dataset.output_classes):
if cls != ops.Tensor:
raise ValueError('The dataset returned a non-Tensor type (%s) at '
'index %d.' % (cls, i))
for i, shape in enumerate(dataset.output_shapes):
if not shape:
raise ValueError('The dataset returns a scalar tensor in '
'tuple index %d. Did you forget to batch? '
'(Output shapes: %s).' % (i,
dataset.output_shapes))
for j, dim in enumerate(shape):
if dim.value is None:
if j == 0:
hint = (' Hint: did you use `ds.batch(BATCH_SIZE, '
'drop_remainder=True)`?')
else:
hint = ''
raise ValueError(
'The Keras-TPU integration for `tf.data` '
'currently requires static shapes. The provided '
'dataset only has a partially defined shape. '
'(Dimension %d of output tensor %d is not statically known '
'for output shapes: %s.%s)' % (i, j, dataset.output_shapes, hint))
@property
def dummy_x(self):
return self._dummy_x
@property
def dummy_y(self):
return self._dummy_y
def make_infeed_instance(self, inputs):
# TODO(saeta): Verify inputs is as expected.
return self._infeed_instance
def build_infeed_from_input_specs(self, input_specs, execution_mode):
shard_infeed_tensors = self._get_next_ops
assert len(shard_infeed_tensors) == self._strategy.num_towers
infeed_ops = []
for shard_id in range(self._strategy.num_towers):
with ops.device('/device:CPU:0'):
infeed_ops.append(
tpu_ops.infeed_enqueue_tuple(
shard_infeed_tensors[shard_id],
[spec.shape for spec in input_specs],
name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
device_ordinal=shard_id))
return SizedInfeed(infeed_ops=infeed_ops,
sharded_infeed_tensors=shard_infeed_tensors)
class TPUFunction(object):
"""K.function compatible interface for invoking a TPU compiled function.
Recompilation is triggered on-demand for each set of new inputs shapes: the
results are cached for future execution. We expect most computations will
be dominated by a standard batch-size, followed by a straggler batch for
the end of training or evaluation.
All `inputs` and `outputs` will be loaded via the infeed and outfeed queues
instead of being injected as `feed_dict` items or fetches.
"""
def __init__(self, model, execution_mode, strategy):
self.model = model
self.execution_mode = execution_mode
self._strategy = strategy
self._compilation_cache = {}
self._cloned_model = None
# Copy optimizer configuration. This is done prior to `_specialize_model`
# as the configuration may require evaluating variables in the CPU session.
self._optimizer_config = None
if not isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
self._optimizer_config = self.model.optimizer.get_config()
def _specialize_model(self, input_specs, infeed_manager):
"""Specialize `self.model` (a Keras model) for the given input shapes."""
# Re-create our input and output layers inside our subgraph. They will be
# attached to the true computation when we clone our model in `tpu_fn`.
K.set_learning_phase(self.execution_mode == model_fn_lib.ModeKeys.TRAIN)
# functools.partial and callable objects are not supported by tpu.rewrite
def _model_fn():
"""Compute fit/eval/predict for the TPU."""
is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT
# During train/eval, we infeed our features as well as labels.
if is_training or is_test:
infeed_layers = self.model._input_layers + self.model._output_layers
else:
infeed_layers = self.model._input_layers
# Generate our infeed operation to read features & labels.
infeed_tensors = tpu_ops.infeed_dequeue_tuple(
dtypes=[spec.dtype for spec in input_specs],
shapes=[spec.shape for spec in input_specs],
name='infeed-%s' % self.execution_mode)
assert len(infeed_tensors) == len(infeed_layers), (
'Infeed inputs did not match model: %s vs %s' % (infeed_layers,
infeed_tensors))
tpu_targets = []
tpu_input_map = {}
# Sort infeed outputs into inputs and labels for calling our Keras model.
for tensor, layer in zip(infeed_tensors, infeed_layers):
if layer in self.model._input_layers:
tpu_input_map[layer.name] = tensor
if layer in self.model._output_layers:
tpu_targets.append(tensor)
# Clone our CPU model, running within the TPU device context.
with TPURewriteContext(tpu_input_map):
# TODO(power): Replicate variables.
with ops.device('/device:TPU:0'):
self._cloned_model = models.clone_model(self.model)
# Create a copy of the optimizer for this graph.
if isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
cloned_optimizer = keras_optimizers.TFOptimizer(
self.model.optimizer.optimizer)
else:
logging.info('Cloning %s %s', self.model.optimizer.__class__.__name__,
self._optimizer_config)
cloned_optimizer = self.model.optimizer.__class__.from_config(
self._optimizer_config)
if is_training or is_test:
self._cloned_model.compile(
optimizer=_replicated_optimizer(cloned_optimizer),
loss=self.model.loss,
loss_weights=self.model.loss_weights,
metrics=self.model.metrics,
weighted_metrics=self.model.weighted_metrics,
target_tensors=tpu_targets,
)
# Compute our outfeed depending on the execution mode
if is_training:
self._cloned_model._make_train_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model.train_function.outputs
]
return [
self._cloned_model.train_function.updates_op,
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model.train_function.outputs,
name='outfeed-enqueue-train')
]
elif is_test:
self._cloned_model._make_test_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model.test_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model.test_function.outputs,
name='outfeed-enqueue-test')
]
elif is_predict:
self._cloned_model._make_predict_function()
self._outfeed_spec = [
tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
for tensor in self._cloned_model.predict_function.outputs
]
return [
tpu_ops.outfeed_enqueue_tuple(
self._cloned_model.predict_function.outputs,
name='outfeed-enqueue-predict',
)
]
else:
assert False, 'Unexpected execution mode: %s' % self.execution_mode
# Capture outfeed metadata computed during the rewrite.
self._outfeed_spec = None
# Generate out TPU operations using `tpu.split_compile_and_replicate`.
# `compile_op` can be used to test the TPU model compiles before execution.
# `execute op` replicates `_model_fn` `num_replicas` times, with each shard
# running on a different logical core.
compile_op, execute_op = tpu.split_compile_and_replicate(
_model_fn, inputs=[[]] * self._strategy.num_towers)
# Generate CPU side operations to enqueue features/labels and dequeue
# outputs from the model call.
sized_infeed = infeed_manager.build_infeed_from_input_specs(
input_specs, self.execution_mode)
# Build output ops.
outfeed_op = []
for shard_id in range(self._strategy.num_towers):
with ops.device('/device:CPU:0'):
outfeed_op.extend(
tpu_ops.outfeed_dequeue_tuple(
dtypes=[spec.dtype for spec in self._outfeed_spec],
shapes=[spec.shape for spec in self._outfeed_spec],
name='outfeed-dequeue-%s-%d' % (self.execution_mode, shard_id),
device_ordinal=shard_id))
return TPUModelOp(
compile_op,
execute_op,
infeed_tensors=sized_infeed.sharded_infeed_tensors,
infeed_op=sized_infeed.infeed_ops,
outfeed_op=outfeed_op)
def _test_model_compiles(self, tpu_model_ops):
"""Verifies that the given TPUModelOp can be compiled via XLA."""
logging.info('Started compiling')
start_time = time.clock()
result = K.get_session().run(tpu_model_ops.compile_op)
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
raise RuntimeError('Compilation failed: {}'.format(
proto.status_error_message))
end_time = time.clock()
logging.info('Finished compiling. Time elapsed: %s secs',
end_time - start_time)
def __call__(self, inputs):
assert isinstance(inputs, list)
infeed_manager = None
for x, mgr in self.model._numpy_to_infeed_manager_list:
if inputs[0] is x:
infeed_manager = mgr
break
if infeed_manager is None:
infeed_manager = TPUNumpyInfeedManager(self.model._strategy)
# Strip sample weight from inputs
if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN or
self.execution_mode == model_fn_lib.ModeKeys.EVAL):
input_tensors = self.model._feed_inputs + self.model._feed_targets
inputs = inputs[:len(input_tensors)]
else:
input_tensors = self.model._feed_inputs
infeed_instance = infeed_manager.make_infeed_instance(inputs)
del inputs # To avoid accident usage.
input_specs = infeed_instance.make_input_specs(input_tensors)
# XLA requires every operation in the graph has a fixed shape. To
# handle varying batch sizes we recompile a new sub-graph for each
# unique input shape.
shape_key = tuple([tuple(spec.shape.as_list()) for spec in input_specs])
if shape_key not in self._compilation_cache:
with self.model.tpu_session():
logging.info('New input shapes; (re-)compiling: mode=%s, %s',
self.execution_mode, input_specs)
new_tpu_model_ops = self._specialize_model(input_specs,
infeed_manager)
self._compilation_cache[shape_key] = new_tpu_model_ops
self._test_model_compiles(new_tpu_model_ops)
# Initialize our TPU weights on the first compile.
self.model._initialize_weights(self._cloned_model)
tpu_model_ops = self._compilation_cache[shape_key]
infeed_dict = infeed_instance.make_feed_dict(tpu_model_ops)
with self.model.tpu_session() as session:
_, _, outfeed_outputs = session.run([
tpu_model_ops.infeed_op, tpu_model_ops.execute_op,
tpu_model_ops.outfeed_op
], infeed_dict)
# TODO(xiejw): Decide how to reduce outputs, or just discard all but first.
if self.execution_mode == model_fn_lib.ModeKeys.PREDICT:
outputs = [[]] * len(self._outfeed_spec)
outputs_per_replica = len(self._outfeed_spec)
for i in range(self._strategy.num_towers):
output_group = outfeed_outputs[i * outputs_per_replica:(i + 1) *
outputs_per_replica]
for j in range(outputs_per_replica):
outputs[j].append(output_group[j])
return [np.concatenate(group) for group in outputs]
else:
return outfeed_outputs[:len(outfeed_outputs) // self._strategy.num_towers]
class KerasTPUModel(models.Model):
"""TPU compatible Keras model wrapper."""
def __init__(self, cpu_model, tpu_name_or_address, strategy):
super(models.Model, self).__init__( # pylint: disable=bad-super-call
inputs=cpu_model.inputs,
outputs=cpu_model.outputs,
name=cpu_model.name,
)
# Create a mapping from numpy arrays to infeed managers.
# Note: uses a list of tuples instead of a map because numpy arrays are
# not hashable.
self._numpy_to_infeed_manager_list = []
self.predict_function = None
self.test_function = None
self.train_function = None
self._strategy = strategy
self._tpu_name_or_address = tpu_name_or_address
self._cpu_model = cpu_model
self._tpu_model = None
self._tpu_weights_initialized = False
self._graph = ops.Graph()
self._cluster_resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu_name_or_address)
master = self._cluster_resolver.master()
cluster_spec = self._cluster_resolver.cluster_spec()
self._session = tf_session.Session(
graph=self._graph,
target=master,
config=config_pb2.ConfigProto(isolate_session_state=True))
# TODO(saeta): Confirm the lines below work in ClusterSpec propagation env.
if cluster_spec:
self._session.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
with self._graph.as_default():
self._session.run(tpu.initialize_system())
# If the input CPU model has already been compiled, compile our TPU model
# immediately.
if self._cpu_model.optimizer:
self.compile(
self._cpu_model.optimizer,
self._cpu_model.loss,
self._cpu_model.metrics,
self._cpu_model.loss_weights,
self._cpu_model.sample_weight_mode,
self._cpu_model.weighted_metrics,
self._cpu_model.target_tensors,
)
def get_config(self):
return {
'cpu_model': self._cpu_model,
'tpu_name_or_address': self._tpu_name_or_address,
'strategy': self._strategy,
}
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
if sample_weight_mode:
raise ValueError('sample_weight_mode not supported for TPU execution.')
if weighted_metrics:
raise ValueError('weighted_metrics not supported for TPU execution.')
if target_tensors:
raise ValueError('target_tensors is not supported for TPU execution.')
super(KerasTPUModel, self).compile(optimizer, loss, metrics, loss_weights,
sample_weight_mode, weighted_metrics,
target_tensors, **kwargs)
if not self._cpu_model.optimizer:
self._cpu_model.compile(optimizer, loss, metrics, loss_weights,
sample_weight_mode, weighted_metrics,
target_tensors, **kwargs)
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
**kwargs):
assert not self._numpy_to_infeed_manager_list # Ensure empty.
infeed_managers = [] # Managers to clean up at the end of the fit call.
if isinstance(x, dataset_ops.Dataset):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(x):
with self.tpu_session() as sess:
dataset = x()
if steps_per_epoch is None:
raise ValueError('When using tf.data as input to a model, you '
'should specify the steps_per_epoch argument.')
if y is not None:
raise ValueError('When using tf.data as input to a model, y must be '
'None')
infeed_manager = TPUDatasetInfeedManager(dataset, self._strategy, sess)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
x = infeed_manager.dummy_x
y = infeed_manager.dummy_y
infeed_managers.append((x, infeed_manager))
if isinstance(validation_data, dataset_ops.Dataset):
# TODO(b/111413240): Support taking a tf.data.Dataset directly.
raise ValueError(
'Taking a Dataset directly is not yet supported. Please '
'wrap your dataset construction code in a function and '
'pass that to fit instead. For examples, see: '
'https://github.com/tensorflow/tpu/tree/master/models/experimental'
'/keras')
if callable(validation_data):
with self.tpu_session() as sess:
dataset = validation_data()
if validation_steps is None:
raise ValueError('When using tf.data as validation for a model, you '
'should specify the validation_steps argument.')
infeed_manager = TPUDatasetInfeedManager(dataset, self._strategy, sess)
# Use dummy numpy inputs for the rest of Keras' shape checking. We
# intercept them when building the model.
val_x = infeed_manager.dummy_x
val_y = infeed_manager.dummy_y
infeed_managers.append((val_x, infeed_manager))
validation_data = (val_x, val_y)
self._numpy_to_infeed_manager_list = infeed_managers
try:
return super(KerasTPUModel, self).fit(
x,
y,
batch_size,
epochs,
verbose,
callbacks,
validation_split,
validation_data,
shuffle,
class_weight,
sample_weight,
initial_epoch,
steps_per_epoch,
validation_steps,
**kwargs)
finally:
self._numpy_to_infeed_manager_list = []
def _make_train_function(self):
if not self.train_function:
self.train_function = TPUFunction(
self, model_fn_lib.ModeKeys.TRAIN, strategy=self._strategy)
return self.train_function
def _make_test_function(self):
if not self.test_function:
self.test_function = TPUFunction(
self, model_fn_lib.ModeKeys.EVAL, strategy=self._strategy)
return self.test_function
def _make_predict_function(self):
if not self.predict_function:
self.predict_function = TPUFunction(
self, model_fn_lib.ModeKeys.PREDICT, strategy=self._strategy)
return self.predict_function
def _initialize_weights(self, cloned_model):
"""Initialize TPU weights.
This is called on the first compile of the TPU model (first call to
fit/predict/evaluate).
Args:
cloned_model: `keras.Model`, TPU model to initialize.
"""
if self._tpu_weights_initialized:
return
self._tpu_model = cloned_model
self._tpu_weights_initialized = True
weights = self._cpu_model.get_weights()
with self.tpu_session():
logging.info('Setting weights on TPU model.')
cloned_model.set_weights(weights)
def sync_to_cpu(self):
"""Copy weights from the CPU, returning a synchronized CPU model."""
if self._tpu_weights_initialized:
with self.tpu_session():
logging.info('Copying TPU weights to the CPU')
tpu_weights = self._tpu_model.get_weights()
self._cpu_model.set_weights(tpu_weights)
return self._cpu_model
def get_weights(self):
return self.sync_to_cpu().get_weights()
def save_weights(self, *args, **kw):
return self.sync_to_cpu().save_weights(*args, **kw)
def save(self, *args, **kw):
return self.sync_to_cpu().save(*args, **kw)
def set_weights(self, weights):
# We may not have a TPU model available if we haven't run fit/predict, so
# we can't directly set the TPU weights here.
# Instead, reset CPU model weights and force TPU re-initialization at the
# next call.
self._cpu_model.set_weights(weights)
self._tpu_weights_initialized = False
@contextlib.contextmanager
def tpu_session(self):
"""Yields a TPU session and sets it as the default Keras session."""
with self._graph.as_default():
default_session = K.get_session()
# N.B. We have to call `K.set_session()` AND set our session as the
# TF default. `K.get_session()` surprisingly does not return the value
# supplied by K.set_session otherwise.
K.set_session(self._session)
with self._session.as_default():
yield self._session
K.set_session(default_session)
def shutdown(self):
# TODO(b/111364423): Actually shut down the system.
logging.info('Skipping shutting down TPU system.')
# with self.tpu_session() as session:
# session.run(tpu.shutdown_system())
self._session.close()
def _validate_shapes(model):
"""Validate that all layers in `model` have constant shape."""
for layer in model.layers:
if isinstance(layer.input_shape, tuple):
input_shapes = [layer.input_shape]
else:
input_shapes = layer.input_shape
if isinstance(layer.output_shape, tuple):
output_shapes = [layer.output_shape]
else:
output_shapes = layer.output_shape
for shape in input_shapes + output_shapes:
for dim in shape[1:]:
if dim is None:
raise ValueError(
"""
Layer %(layer)s has a variable shape in a non-batch dimension. TPU models must
have constant shapes for all operations.
You may have to specify `input_length` for RNN/TimeDistributed layers.
Layer: %(layer)s
Input shape: %(input_shape)s
Output shape: %(output_shape)s
""" % {
'layer': layer,
'input_shape': layer.input_shape,
'output_shape': layer.output_shape
})
@experimental
def tpu_model(model, tpu_name_or_address=None, strategy=None):
"""Copy `model` along with weights to the TPU. Returns a TPU model.
Usage:
```
a = Input(shape=(32,))
b = Dense(32)(a)
model = Model(inputs=a, outputs=b)
# If `num_cores_per_host` is greater than one, batch parallelism will be used
# to run on multiple TPU cores.
strategy = keras_support.TPUDistributionStrategy(num_cores_per_host=8)
model = keras_support.tpu_model(model, strategy)
model.compile(
optimizer=tf.train.GradientDescentOptimizer(learning_rate=1.0),
...)
model.shutdown()
```
Args:
model: A `KerasTPUModel`.
tpu_name_or_address: A string that is either the name of the Cloud TPU,
the grpc address of the Cloud TPU, or (Googlers only) the BNS name of the
Cloud TPU. If tpu_name_or_address is None, the TPUClusterResolver will
examine the environment to determine a potential Cloud TPU to use.
strategy: `TPUDistributionStrategy`. The strategy to use for replicating
model across multiple TPU cores.
Returns:
A new `KerasTPUModel` instance.
"""
# Force initialization of the CPU model.
model.get_weights()
model.reset_states()
_validate_shapes(model)
# TODO(xiejw): Validate TPU model. TPUModel only?
# TODO(xiejw): Validate replicas. Full or 1. Shall we allow subset?
# TODO(xiejw): Adds reduction option.
if strategy is None:
strategy = TPUDistributionStrategy(num_cores_per_host=1)
return KerasTPUModel(
cpu_model=model,
tpu_name_or_address=tpu_name_or_address,
strategy=strategy)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ScaNN index evaluator custom component."""
import os
import time
from typing import Any, Dict, List, Optional, Text, Union
import logging
import json
import tfx
from tfx.types import standard_artifacts
from tfx.types.component_spec import ChannelParameter
from tfx.types.component_spec import ExecutionParameter
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.types import artifact_utils
from tfx.utils import io_utils
from typing import Optional
from tfx import types
import tensorflow as tf
import numpy as np
import tensorflow_data_validation as tfdv
from tensorflow_transform.tf_metadata import schema_utils
try:
from . import item_matcher
from . import scann_indexer
except:
import item_matcher
import scann_indexer
QUERIES_SAMPLE_RATIO = 0.01
MAX_NUM_QUERIES = 10000
NUM_NEIGBHOURS = 20
class IndexEvaluatorSpec(tfx.types.ComponentSpec):
INPUTS = {
'examples': ChannelParameter(type=standard_artifacts.Examples),
'schema': ChannelParameter(type=standard_artifacts.Schema),
'model': ChannelParameter(type=standard_artifacts.Model),
}
OUTPUTS = {
'evaluation': ChannelParameter(type=standard_artifacts.ModelEvaluation),
'blessing': ChannelParameter(type=standard_artifacts.ModelBlessing),
}
PARAMETERS = {
'min_recall': ExecutionParameter(type=float),
'max_latency': ExecutionParameter(type=float),
}
class ScaNNIndexEvaluatorExecutor(base_executor.BaseExecutor):
def Do(self,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
if 'examples' not in input_dict:
raise ValueError('Examples is missing from input dict.')
if 'model' not in input_dict:
raise ValueError('Model is missing from input dict.')
if 'evaluation' not in output_dict:
raise ValueError('Evaluation is missing from output dict.')
if 'blessing' not in output_dict:
raise ValueError('Blessing is missing from output dict.')
valid = True
self._log_startup(input_dict, output_dict, exec_properties)
embedding_files_pattern = io_utils.all_files_pattern(
artifact_utils.get_split_uri(input_dict['examples'], 'train'))
schema_file_path = artifact_utils.get_single_instance(
input_dict['schema']).uri + '/schema.pbtxt'
vocabulary, embeddings = scann_indexer.load_embeddings(
embedding_files_pattern, schema_file_path)
num_embeddings = embeddings.shape[0]
logging.info(f'{num_embeddings} embeddings are loaded.')
num_queries = int(min(num_embeddings * QUERIES_SAMPLE_RATIO, MAX_NUM_QUERIES))
logging.info(f'Sampling {num_queries} query embeddings for evaluation...')
query_embedding_indices = np.random.choice(num_embeddings, num_queries)
query_embeddings = np.take(embeddings, query_embedding_indices, axis=0)
# Load Exact matcher
exact_matcher = item_matcher.ExactMatcher(embeddings, vocabulary)
exact_matches = []
logging.info(f'Computing exact matches for the queries...')
for query in query_embeddings:
exact_matches.append(exact_matcher.match(query, NUM_NEIGBHOURS))
logging.info(f'Exact matches are computed.')
del num_embeddings, exact_matcher
# Load ScaNN index matcher
index_artifact = artifact_utils.get_single_instance(input_dict['model'])
ann_matcher = item_matcher.ScaNNMatcher(index_artifact.uri + '/serving_model_dir')
scann_matches = []
logging.info(f'Computing ScaNN matches for the queries...')
start_time = time.time()
for query in query_embeddings:
scann_matches.append(ann_matcher.match(query, NUM_NEIGBHOURS))
end_time = time.time()
logging.info(f'ScaNN matches are computed.')
# Compute average latency
elapsed_time = end_time - start_time
current_latency = elapsed_time / num_queries
# Compute recall
current_recall = 0
for exact, approx in zip(exact_matches, scann_matches):
current_recall += len(set(exact).intersection(set(approx))) / NUM_NEIGBHOURS
current_recall /= num_queries
metrics = {
'recall': current_recall,
'latency': current_latency
}
min_recall = exec_properties['min_recall']
max_latency = exec_properties['max_latency']
logging.info(f'Average latency per query achieved {current_latency}. Maximum latency allowed: {max_latency}')
logging.info(f'Recall acheived {current_recall}. Minimum recall allowed: {min_recall}')
# Validate index latency and recall
valid = (current_latency <= max_latency) and (current_recall >= min_recall)
logging.info(f'Model is valid: {valid}')
# Output the evaluation artifact.
evaluation = artifact_utils.get_single_instance(output_dict['evaluation'])
evaluation.set_string_custom_property('index_model_uri', index_artifact.uri)
evaluation.set_int_custom_property('index_model_id', index_artifact.id)
io_utils.write_string_file(
os.path.join(evaluation.uri, 'metrics'), json.dumps(metrics))
# Output the blessing artifact.
blessing = artifact_utils.get_single_instance(output_dict['blessing'])
blessing.set_string_custom_property('index_model_uri', index_artifact.uri)
blessing.set_int_custom_property('index_model_id', index_artifact.id)
if valid:
io_utils.write_string_file(os.path.join(blessing.uri, 'BLESSED'), '')
blessing.set_int_custom_property('blessed', 1)
else:
io_utils.write_string_file(os.path.join(blessing.uri, 'NOT_BLESSED'), '')
blessing.set_int_custom_property('blessed', 0)
class IndexEvaluator(base_component.BaseComponent):
SPEC_CLASS = IndexEvaluatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(ScaNNIndexEvaluatorExecutor)
def __init__(self,
examples: types.channel,
schema: types.channel,
model: types.channel,
min_recall: float,
max_latency: float,
evaluation: Optional[types.Channel] = None,
blessing: Optional[types.Channel] = None,
instance_name=None):
blessing = blessing or types.Channel(
type=standard_artifacts.ModelBlessing,
artifacts=[standard_artifacts.ModelBlessing()])
evaluation = evaluation or types.Channel(
type=standard_artifacts.ModelEvaluation,
artifacts=[standard_artifacts.ModelEvaluation()])
spec = IndexEvaluatorSpec(
examples=examples,
schema=schema,
model=model,
evaluation=evaluation,
blessing=blessing,
min_recall=min_recall,
max_latency=max_latency
)
super().__init__(spec=spec, instance_name=instance_name)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from numpy import pi
import logging
import warnings
from .kernel import PseudoSpectralKernel, tendency_forward_euler, tendency_ab2, tendency_ab3
try:
import mkl
np.use_fastnumpy = True
except ImportError:
pass
try:
import pyfftw
pyfftw.interfaces.cache.enable()
except ImportError:
pass
class Model(PseudoSpectralKernel):
"""A generic pseudo-spectral inversion model.
Attributes
----------
nx, ny : int
Number of real space grid points in the `x`, `y` directions (cython)
nk, nl : int
Number of spectal space grid points in the `k`, `l` directions (cython)
nz : int
Number of vertical levels (cython)
kk, ll : real array
Zonal and meridional wavenumbers (`nk`) (cython)
a : real array
inversion matrix (`nk`, `nk`, `nl`, `nk`) (cython)
q : real array
Potential vorticity in real space (`nz`, `ny`, `nx`) (cython)
qh : complex array
Potential vorticity in spectral space (`nk`, `nl`, `nk`) (cython)
ph : complex array
Streamfunction in spectral space (`nk`, `nl`, `nk`) (cython)
u, v : real array
Zonal and meridional velocity anomalies in real space (`nz`, `ny`, `nx`) (cython)
Ubg : real array
Background zonal velocity (`nk`) (cython)
Qy : real array
Background potential vorticity gradient (`nk`) (cython)
ufull, vfull : real arrays
Zonal and meridional full velocities in real space (`nz`, `ny`, `nx`) (cython)
uh, vh : complex arrays
Velocity anomaly components in spectral space (`nk`, `nl`, `nk`) (cython)
rek : float
Linear drag in lower layer (cython)
t : float
Model time (cython)
tc : int
Model timestep (cython)
dt : float
Numerical timestep (cython)
L, W : float
Domain length in x and y directions
filterfac : float
Amplitdue of the spectral spherical filter
twrite : int
Interval for cfl writeout (units: number of timesteps)
tmax : float
Total time of integration (units: model time)
tavestart : float
Start time for averaging (units: model time)
tsnapstart : float
Start time for snapshot writeout (units: model time)
taveint : float
Time interval for accumulation of diagnostic averages.
(units: model time)
tsnapint : float
Time interval for snapshots (units: model time)
ntd : int
Number of threads to use. Should not exceed the number of cores on
your machine.
pmodes : real array
Vertical pressure modes (unitless)
radii : real array
Deformation radii (units: model length)
"""
def __init__(
self,
# grid size parameters
nz=1,
nx=64, # grid resolution
ny=None,
L=1e6, # domain size is L [m]
W=None,
# timestepping parameters
dt=7200., # numerical timestep
twrite=1000., # interval for cfl and ke writeout (in timesteps)
tmax=1576800000., # total time of integration
tavestart=315360000., # start time for averaging
taveint=86400., # time interval used for summation in longterm average in seconds
useAB2=False, # use second order Adams Bashforth timestepping instead of 3rd
# friction parameters
rek=5.787e-7, # linear drag in lower layer
filterfac=23.6, # the factor for use in the exponential filter
# constants
f = None, # coriolis parameter (not necessary for two-layer model
# if deformation radius is provided)
g= 9.81, # acceleration due to gravity
# diagnostics parameters
diagnostics_list='all', # which diagnostics to output
# fft parameters
# removed because fftw is now manditory
#use_fftw = False, # fftw flag
#teststyle = False, # use fftw with "estimate" planner to get reproducibility
ntd = 1, # number of threads to use in fftw computations
log_level = 1, # logger level: from 0 for quiet (no log) to 4 for verbose
# logger (see https://docs.python.org/2/library/logging.html)
logfile = None, # logfile; None prints to screen
):
"""
.. note:: All of the test cases use ``nx==ny``. Expect bugs if you choose
these parameters to be different.
.. note:: All time intervals will be rounded to nearest `dt` interval.
Parameters
----------
nx : int
Number of grid points in the x direction.
ny : int
Number of grid points in the y direction (default: nx).
L : number
Domain length in x direction. Units: meters.
W :
Domain width in y direction. Units: meters (default: L).
rek : number
linear drag in lower layer. Units: seconds :sup:`-1`.
filterfac : number
amplitdue of the spectral spherical filter (originally 18.4, later
changed to 23.6).
dt : number
Numerical timstep. Units: seconds.
twrite : int
Interval for cfl writeout. Units: number of timesteps.
tmax : number
Total time of integration. Units: seconds.
tavestart : number
Start time for averaging. Units: seconds.
tsnapstart : number
Start time for snapshot writeout. Units: seconds.
taveint : number
Time interval for accumulation of diagnostic averages.
Units: seconds. (For performance purposes, averaging does not have to
occur every timestep)
tsnapint : number
Time interval for snapshots. Units: seconds.
ntd : int
Number of threads to use. Should not exceed the number of cores on
your machine.
"""
if ny is None:
ny = nx
if W is None:
W = L
# TODO: be more clear about what attributes are cython and what
# attributes are python
PseudoSpectralKernel.__init__(self, nz, ny, nx, ntd)
self.L = L
self.W = W
# timestepping
self.dt = dt
self.twrite = twrite
self.tmax = tmax
self.tavestart = tavestart
self.taveint = taveint
self.logfile = logfile
self.log_level = log_level
self.useAB2 = useAB2
self.ntd = ntd
# friction
self.rek = rek
self.filterfac = filterfac
# constants
self.g = g
if f:
self.f = f
self.f2 = f**2
# TODO: make this less complicated!
# Really we just need to initialize the grid here. It's not necessary
# to have all these silly methods. Maybe we need "hooks" instead.
self._initialize_logger()
self._initialize_grid()
self._initialize_background()
self._initialize_forcing()
self._initialize_filter()
self._initialize_time()
self._initialize_inversion_matrix()
self._initialize_diagnostics(diagnostics_list)
def run_with_snapshots(self, tsnapstart=0., tsnapint=432000.):
"""Run the model forward, yielding to user code at specified intervals.
Parameters
----------
tsnapstart : int
The timestep at which to begin yielding.
tstapint : int
The interval at which to yield.
"""
tsnapints = np.ceil(tsnapint/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapints)==0:
yield self.t
return
def run(self):
"""Run the model forward without stopping until the end."""
while(self.t < self.tmax):
self._step_forward()
def vertical_modes(self):
""" Calculate standard vertical modes. Simply
the eigenvectors of the stretching matrix S """
evals,evecs = np.linalg.eig(-self.S)
asort = evals.argsort()
# deformation wavenumbers and radii
self.kdi2 = evals[asort]
self.radii = np.zeros_like(self.kdi2)
self.radii[0] = np.sqrt(self.g*self.H)/np.abs(self.f) # barotropic def. radius
self.radii[1:] = 1./np.sqrt(self.kdi2[1:])
# eigenstructure
self.pmodes = evecs[:,asort]
# normalize to have unit L2-norm
Ai = (self.H / (self.Hi[:,np.newaxis]*(self.pmodes**2)).sum(axis=0))**0.5
self.pmodes = Ai[np.newaxis,:]*self.pmodes
def modal_projection(self,p,forward=True):
""" Performs a field p into modal amplitudes pn
using the basis [pmodes]. The inverse
transform calculates p from pn"""
if forward:
pt = np.linalg.solve(self.pmodes[np.newaxis,np.newaxis],p.T).T
else:
pt = np.einsum("ik,k...->i...",self.pmodes,p)
return pt
def stability_analysis(self,bottom_friction=False):
""" Performs the baroclinic linear instability analysis given
given the base state velocity :math: `(U, V)` and
the stretching matrix :math: `S`:
.. math:: A \Phi = \omega B \Phi,
where
.. math:: A = B (U k + V l) + I (k Q_y - l Q_x) +
1j \delta_{N N} r_{ek} I \kappa^2
where :math:`\delta_{N N} = [0,0,\dots,0,1] ,`
and
.. math:: B = S - I \kappa^2 .
The eigenstructure is
.. math:: \Phi
and the eigenvalue is
.. math:: `\omega`
The growth rate is Im\ :math:`\{\omega\}`.
Parameters
----------
bottom_friction: optional inclusion linear bottom drag
in the linear stability calculation
(default is False, as if :math: `r_{ek} = 0`)
Returns
-------
omega: complex array
The eigenvalues with largest complex part (units: inverse model time)
phi: complex array
The eigenvectors associated associated with \omega (unitless)
"""
omega = np.zeros_like(self.wv)+0.j
phi = np.zeros_like(self.qh)
I = np.eye(self.nz)
L2 = self.S[:,:,np.newaxis,np.newaxis] - self.wv2*I[:,:,np.newaxis,np.newaxis]
Q = I[:,:,np.newaxis,np.newaxis]*(self.ikQy - self.ilQx).imag
Uk =(self.Ubg*I)[:,:,np.newaxis,np.newaxis]*self.k
Vl =(self.Vbg*I)[:,:,np.newaxis,np.newaxis]*self.l
L3 = np.einsum('ij...,jk...->ik...',L2,Uk+Vl) + 0j
if bottom_friction:
L3[-1,-1,:,:] += 1j*self.rek*self.wv2
L4 = self.a.T
M = np.einsum('...ij,...jk->...ik',L4,(L3+Q).T)
evals,evecs = np.linalg.eig(M)
evals, evecs = evals.T, evecs.T
# sorting things this way proved way
# more faster than using numpy's argsort() !
imax = evals.imag.argmax(axis=0)
for i in range(self.nl):
for j in range(self.nk):
omega[i,j] = evals[imax[i,j],i,j]
phi[:,i,j] = evecs[imax[i,j],:,i,j]
return omega, phi
### PRIVATE METHODS - not meant to be called by user ###
def _step_forward(self):
self._invert()
# find streamfunction from pv
self._do_advection()
# use streamfunction to calculate advection tendency
self._do_friction()
# apply friction
self._do_external_forcing()
# apply external forcing
self._calc_diagnostics()
# do what has to be done with diagnostics
self._forward_timestep()
# apply tendencies to step the model forward
# (filter gets called here)
# the basic steps are
self._print_status()
def _initialize_time(self):
"""Set up timestep stuff"""
#self.t=0 # actual time
#self.tc=0 # timestep number
self.taveints = np.ceil(self.taveint/self.dt)
### initialization routines, only called once at the beginning ###
# TODO: clean up and simplify this whole routine
def _initialize_grid(self):
"""Set up spatial and spectral grids and related constants"""
self.x,self.y = np.meshgrid(
np.arange(0.5,self.nx,1.)/self.nx*self.L,
np.arange(0.5,self.ny,1.)/self.ny*self.W )
# Notice: at xi=1 U=beta*rd^2 = c for xi>1 => U>c
# wavenumber one (equals to dkx/dky)
self.dk = 2.*pi/self.L
self.dl = 2.*pi/self.W
# wavenumber grids
# set in kernel
#self.nl = self.ny
#self.nk = int(self.nx/2+1)
self.ll = self.dl*np.append( np.arange(0.,self.nx/2),
np.arange(-self.nx/2,0.) )
self.kk = self.dk*np.arange(0.,self.nk)
self.k, self.l = np.meshgrid(self.kk, self.ll)
self.ik = 1j*self.k
self.il = 1j*self.l
# physical grid spacing
self.dx = self.L / self.nx
self.dy = self.W / self.ny
# constant for spectral normalizations
self.M = self.nx*self.ny
# isotropic wavenumber^2 grid
# the inversion is not defined at kappa = 0
self.wv2 = self.k**2 + self.l**2
self.wv = np.sqrt( self.wv2 )
iwv2 = self.wv2 != 0.
self.wv2i = np.zeros_like(self.wv2)
self.wv2i[iwv2] = self.wv2[iwv2]**-1
def _initialize_background(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_inversion_matrix(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_forcing(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_filter(self):
"""Set up frictional filter."""
# this defines the spectral filter (following Arbic and Flierl, 2003)
cphi=0.65*pi
wvx=np.sqrt((self.k*self.dx)**2.+(self.l*self.dy)**2.)
filtr = np.exp(-self.filterfac*(wvx-cphi)**4.)
filtr[wvx<=cphi] = 1.
self.filtr = filtr
def _filter(self, q):
return self.filtr * q
def _do_external_forcing(self):
pass
# logger
def _initialize_logger(self):
self.logger = logging.getLogger(__name__)
if not (self.logfile is None):
fhandler = logging.FileHandler(filename=self.logfile, mode='w')
else:
fhandler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s: %(message)s')
fhandler.setFormatter(formatter)
if not self.logger.handlers:
self.logger.addHandler(fhandler)
self.logger.setLevel(self.log_level*10)
# this prevents the logger to propagate into the ipython notebook log
self.logger.propagate = False
self.logger.info(' Logger initialized')
# compute advection in grid space (returns qdot in fourier space)
# *** don't remove! needed for diagnostics (but not forward model) ***
def _advect(self, q, u, v):
"""Given real inputs q, u, v, returns the advective tendency for
q in spectral space."""
uq = u*q
vq = v*q
# this is a hack, since fft now requires input to have shape (nz,ny,nx)
# it does an extra unnecessary fft
is_2d = (uq.ndim==2)
if is_2d:
uq = np.tile(uq[np.newaxis,:,:], (self.nz,1,1))
vq = np.tile(vq[np.newaxis,:,:], (self.nz,1,1))
tend = self.ik*self.fft(uq) + self.il*self.fft(vq)
if is_2d:
return tend[0]
else:
return tend
# def _filter(self, q):
# """Apply filter to field q."""
# return q
def _print_status(self):
"""Output some basic stats."""
if (self.log_level) and ((self.tc % self.twrite)==0):
self.ke = self._calc_ke()
self.cfl = self._calc_cfl()
#print 't=%16d, tc=%10d: cfl=%5.6f, ke=%9.9f' % (
# self.t, self.tc, cfl, ke)
self.logger.info('Step: %i, Time: %3.2e, KE: %3.2e, CFL: %4.3f'
, self.tc,self.t,self.ke,self.cfl )
assert self.cfl<1., self.logger.error('CFL condition violated')
def _calc_diagnostics(self):
# here is where we calculate diagnostics
if (self.t>=self.dt) and (self.t>=self.tavestart) and (self.tc%self.taveints==0):
self._increment_diagnostics()
# def _forward_timestep(self):
# """Step forward based on tendencies"""
#
# #self.dqhdt = self.dqhdt_adv + self.dqhdt_forc
#
# # Note that Adams-Bashforth is not self-starting
# if self.tc==0:
# # forward Euler at the first step
# qtend = tendency_forward_euler(self.dt, self.dqhdt)
# elif (self.tc==1) or (self.useAB2):
# # AB2 at step 2
# qtend = tendency_ab2(self.dt, self.dqhdt, self.dqhdt_p)
# else:
# # AB3 from step 3 on
# qtend = tendency_ab3(self.dt, self.dqhdt,
# self.dqhdt_p, self.dqhdt_pp)
#
# # add tendency and filter
# self.set_qh(self._filter(self.qh + qtend))
#
# # remember previous tendencies
# self.dqhdt_pp[:] = self.dqhdt_p.copy()
# self.dqhdt_p[:] = self.dqhdt.copy()
# #self.dqhdt[:] = 0.
#
# # augment timestep and current time
# self.tc += 1
# self.t += self.dt
### All the diagnostic stuff follows. ###
def _calc_cfl(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
# this is stuff the Cesar added
# if self.tc==0:
# assert self.calc_cfl()<1., " *** time-step too large "
# # initialize ke and time arrays
# self.ke = np.array([self.calc_ke()])
# self.eddy_time = np.array([self.calc_eddy_time()])
# self.time = np.array([0.])
def _calc_ke(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_diagnostics(self, diagnostics_list):
# Initialization for diagnotics
self.diagnostics = dict()
self._initialize_core_diagnostics()
self._initialize_model_diagnostics()
if diagnostics_list == 'all':
pass # by default, all diagnostics are active
elif diagnostics_list == 'none':
self.set_active_diagnostics([])
else:
self.set_active_diagnostics(diagnostics_list)
def _initialize_core_diagnostics(self):
"""Diagnostics common to all models."""
self.add_diagnostic('Ensspec',
description='enstrophy spectrum',
function= (lambda self: np.abs(self.qh)**2/self.M**2)
)
self.add_diagnostic('KEspec',
description=' kinetic energy spectrum',
function=(lambda self: self.wv2*np.abs(self.ph)**2/self.M**2)
) # factor of 2 to account for the fact that we have only half of
# the Fourier coefficients.
self.add_diagnostic('q',
description='QGPV',
function= (lambda self: self.q)
)
self.add_diagnostic('EKEdiss',
description='total energy dissipation by bottom drag',
function= (lambda self: self.Hi[-1]/self.H*self.rek*(self.v[-1]**2 + self.u[-1]**2).mean())
)
self.add_diagnostic('EKE',
description='mean eddy kinetic energy',
function= (lambda self: 0.5*(self.v**2 + self.u**2).mean(axis=-1).mean(axis=-1))
)
def _calc_derived_fields(self):
"""Should be implemented by subclass."""
pass
def _initialize_model_diagnostics(self):
"""Should be implemented by subclass."""
pass
def _set_active_diagnostics(self, diagnostics_list):
for d in self.diagnostics:
self.diagnostics[d]['active'] == (d in diagnostics_list)
def add_diagnostic(self, diag_name, description=None, units=None, function=None):
# create a new diagnostic dict and add it to the object array
# make sure the function is callable
assert hasattr(function, '__call__')
# make sure the name is valid
assert isinstance(diag_name, str)
# by default, diagnostic is active
self.diagnostics[diag_name] = {
'description': description,
'units': units,
'active': True,
'count': 0,
'function': function, }
def describe_diagnostics(self):
"""Print a human-readable summary of the available diagnostics."""
diag_names = list(self.diagnostics.keys())
diag_names.sort()
print('NAME | DESCRIPTION')
print(80*'-')
for k in diag_names:
d = self.diagnostics[k]
print('{:<10} | {:<54}'.format(
*(k, d['description'])))
def _increment_diagnostics(self):
# compute intermediate quantities needed for some diagnostics
self._calc_derived_fields()
for dname in self.diagnostics:
if self.diagnostics[dname]['active']:
res = self.diagnostics[dname]['function'](self)
if self.diagnostics[dname]['count']==0:
self.diagnostics[dname]['value'] = res
else:
self.diagnostics[dname]['value'] += res
self.diagnostics[dname]['count'] += 1
def get_diagnostic(self, dname):
return (self.diagnostics[dname]['value'] /
self.diagnostics[dname]['count'])
def spec_var(self, ph):
""" compute variance of p from Fourier coefficients ph """
var_dens = 2. * np.abs(ph)**2 / self.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] = var_dens[...,0]/2.
var_dens[...,-1] = var_dens[...,-1]/2.
return var_dens.sum()
def set_qh(self, qh):
warnings.warn("Method deprecated. Set model.qh directly instead. ",
DeprecationWarning)
self.qh = qh
def set_q(self, q):
warnings.warn("Method deprecated. Set model.q directly instead. ",
DeprecationWarning)
self.q = q
|
|
import _dk_core as core
from collections import namedtuple
import math
# Light Space Perspective Shadow Maps
# http://www.cg.tuwien.ac.at/research/vr/lispsm/
Plane = namedtuple('Plane', 'n, d')
Vector = namedtuple('Vector', 'x, y, z')
AABB = namedtuple('AABB', 'min, max')
_dot = lambda a, b: a.x * b.x + a.y * b.y + a.z * b.z
_cross = lambda a, b: Vector(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x)
_add = lambda a, b: Vector(a.x + b.x, a.y + b.y, a.z + b.z)
_subtract = lambda a, b: Vector(a.x - b.x, a.y - b.y, a.z - b.z)
_linear = lambda a, b, t: Vector(a.x + b.x * t, a.y + b.y * t, a.z + b.z * t)
_lengthSq = lambda a: a.x * a.x + a.y * a.y + a.z * a.z
_length = lambda a: math.sqrt(_lengthSq(a))
_mulScalar = lambda a, b: Vector(a.x * b, a.y * b, a.z * b)
def _normalize(a):
length = _length(a)
return _mulScalar(a, 1.0 / length) if length > 0.0 else a
_copyVector = lambda a: Vector(a.x, a.y, a.z)
_coreVector = lambda a: core.Vector3(a.x, a.y, a.z)
_epsilon = 0.001
_inf = float('inf')
_fltMin = 1.175494351e-38
_fltMax = 3.402823466e+38
_dblMin = 2.2250738585072014e-308
_dblMax = 1.7976931348623158e+308
_isEqual = lambda a, b: abs(a.x - b.x) < _epsilon and abs(a.y - b.y) < _epsilon and abs(a.z - b.z) < _epsilon
_frustumAabb = AABB(core.Vector3(-1.0, -1.0, -1.0), core.Vector3(1.0, 1.0, 1.0))
def calcFrustumLineObject(invViewProjMatrix):
# get frustum points
frustum = [core.Vector3(-1.0, -1.0, -1.0),
core.Vector3( 1.0, -1.0, -1.0),
core.Vector3( 1.0, 1.0, -1.0),
core.Vector3(-1.0, 1.0, -1.0),
core.Vector3(-1.0, -1.0, 1.0),
core.Vector3( 1.0, -1.0, 1.0),
core.Vector3( 1.0, 1.0, 1.0),
core.Vector3(-1.0, 1.0, 1.0)]
# camera to world-space
for v in frustum:
v.transform(invViewProjMatrix)
obj = [None] * 6
obj[0] = frustum[0:4] # near poly ccw
obj[1] = frustum[4:] # far poly ccw
obj[2] = [frustum[0], frustum[3], frustum[7], frustum[4]] # left poly ccw
obj[3] = [frustum[1], frustum[5], frustum[6], frustum[2]] # right poly ccw
obj[4] = [frustum[4], frustum[5], frustum[1], frustum[0]] # bottom poly ccw
obj[5] = [frustum[6], frustum[7], frustum[3], frustum[2]] # top poly ccw
return obj
def lineObjectFromAABB(aabb):
aabbMin = aabb.min
aabbMax = aabb.max
box = [core.Vector3(aabbMin.x, aabbMin.y, aabbMin.z),
core.Vector3(aabbMax.x, aabbMin.y, aabbMin.z),
core.Vector3(aabbMax.x, aabbMax.y, aabbMin.z),
core.Vector3(aabbMin.x, aabbMax.y, aabbMin.z),
core.Vector3(aabbMin.x, aabbMin.y, aabbMax.z),
core.Vector3(aabbMax.x, aabbMin.y, aabbMax.z),
core.Vector3(aabbMax.x, aabbMax.y, aabbMax.z),
core.Vector3(aabbMin.x, aabbMax.y, aabbMax.z)]
obj = [None] * 6
obj[0] = box[0:4]
obj[1] = box[4:]
obj[2] = [box[0], box[3], box[7], box[4]]
obj[3] = [box[1], box[5], box[6], box[2]]
obj[4] = [box[4], box[5], box[1], box[0]]
obj[5] = [box[6], box[7], box[3], box[2]]
return obj
def intersectPlaneEdge(plane, a, b):
d = _subtract(b, a)
t = _dot(plane.n, d)
if t != 0:
t = (plane.d - _dot(plane.n, a)) / t
if 0.0 <= t <= 1.0:
return _linear(a, d, t)
def clipObjectByPlane(obj, plane):
objOut = [] # line-poly clipped by plane
interObj = [] # intersection lines
for poly in obj:
numVerts = len(poly)
result = []
interPts = []
if numVerts > 2:
outside = [False] * numVerts
for i in range(numVerts):
# both outside -> save none
outside[i] = _dot(plane.n, poly[i]) > plane.d
for i1 in range(numVerts):
i2 = (i1 + 1) % numVerts
if outside[i1] and outside[i2]:
continue
p1 = poly[i1]
p2 = poly[i2]
if outside[i1]:
# outside to inside -> calc intersection save intersection and save i+1
inter = intersectPlaneEdge(plane, p1, p2)
if inter:
result.append(inter)
interPts.append(inter)
result.append(p2)
continue
if outside[i2]:
# inside to outside -> calc intersection save intersection
inter = intersectPlaneEdge(plane, p1, p2)
if inter:
result.append(inter)
interPts.append(inter)
continue
# both inside -> save point i+1
result.append(p2)
if len(result) > 2:
objOut.append(result)
if len(interPts) == 2:
interObj.append(interPts)
# append intersection line-poly clipped by plane
if len(interObj) > 2:
poly = interObj.pop()
while len(interObj) > 0:
lastPt = poly[-1]
nextPt = None
# find lastPt in rest intersection poly-vertices
for i in range(len(interObj)-1, -1, -1):
v1, v2 = interObj[i]
if _isEqual(v1, lastPt):
interObj.pop(i)
nextPt = v2
break
if _isEqual(v2, lastPt):
interObj.pop(i)
nextPt = v1
break
if nextPt:
# found next-linked point
poly.append(nextPt)
else:
print('cannot find matching pt: ', lastPt)
# for i in range(len(interObj)):
# poly = interObj[i]
# for v in poly:
# print('inter[{}]: '.format(i), v)
interObj.pop()
assert len(poly) > 3
if len(poly) > 3:
# last point can be deleted, it is same as the first (closes polygon)
poly.pop()
objOut.append(poly)
return objOut
def clipObjectByAABB(obj, aabb):
# create planes from aabb
planes = [Plane(Vector( 0.0, -1.0, 0.0), abs(aabb.min.y)), # bottom
Plane(Vector( 0.0, 1.0, 0.0), abs(aabb.max.y)), # top
Plane(Vector(-1.0, 0.0, 0.0), abs(aabb.min.x)), # left
Plane(Vector( 1.0, 0.0, 0.0), abs(aabb.max.x)), # right
Plane(Vector( 0.0, 0.0, -1.0), abs(aabb.min.z)), # back
Plane(Vector( 0.0, 0.0, 1.0), abs(aabb.max.z))] # front
for plane in planes:
obj = clipObjectByPlane(obj, plane)
return obj
def _clipTest(p, q, u):
if p < 0.0:
r = q / p
if r > u[1]:
return False
elif r > u[0]:
u[0] = r
return True
elif p > 0.0:
r = q / p
if r < u[0]:
return False
elif r < u[1]:
u[1] = r
return True
return q >= 0.0
def intersectionLineAABB(p, d, aabb):
u = [0.0, _dblMax]
pmax = _subtract(aabb.max, p)
pmin = _subtract(p, aabb.min)
intersect = _clipTest(-d.z, pmin.z, u) and _clipTest(d.z, pmax.z, u) and \
_clipTest(-d.y, pmin.y, u) and _clipTest(d.y, pmax.y, u) and \
_clipTest(-d.x, pmin.x, u) and _clipTest(d.x, pmax.x, u)
if intersect:
v = None
if u[0] >= 0.0:
v = _linear(p, d, u[0])
if u[1] >= 0.0:
v = _linear(p, d, u[1])
return v
def includeObjectLightVolume(obj, lightDir, aabb):
numPts = 0
for poly in obj:
numPts += len(poly)
points = [None] * (numPts * 2)
count = 0
ld = Vector(-lightDir.x, -lightDir.y, -lightDir.z)
for poly in obj:
for v in poly:
points[count] = v
count += 1
# for each point add the point on the ray in -lightDir
# intersected with the aabb
pt = intersectionLineAABB(v, ld, aabb)
if pt:
points[count] = pt
count += 1
return points[:count]
def aabbFromPoints(points):
if len(points) > 0:
aabbMax = _coreVector(points[0])
aabbMin = _coreVector(points[0])
for i in range(1, len(points)):
v = points[i]
if v.x < aabbMin.x:
aabbMin.x = v.x
elif v.x > aabbMax.x:
aabbMax.x = v.x
if v.y < aabbMin.y:
aabbMin.y = v.y
elif v.y > aabbMax.y:
aabbMax.y = v.y
if v.z < aabbMin.z:
aabbMin.z = v.z
elif v.z > aabbMax.z:
aabbMax.z = v.z
return AABB(aabbMin, aabbMax)
def calcFocusedLightVolumePoints(invViewProj, lightDir, aabb):
obj = calcFrustumLineObject(invViewProj)
if aabb:
obj = clipObjectByAABB(obj, aabb)
else:
points = [core.Vector3(-1.0, -1.0, -1.0),
core.Vector3(1.0, -1.0, -1.0),
core.Vector3(1.0, 1.0, -1.0),
core.Vector3(-1.0, 1.0, -1.0),
core.Vector3(-1.0, -1.0, 1.0),
core.Vector3(1.0, -1.0, 1.0),
core.Vector3(1.0, 1.0, 1.0),
core.Vector3(-1.0, 1.0, 1.0)]
# camera to world-space
for v in points:
v.transform(invViewProj)
aabb = aabbFromPoints(points)
return includeObjectLightVolume(obj, lightDir, aabb)
def calcBodyVector(points, cameraPos):
x, y, z = 0.0, 0.0, 0.0
for p in points:
x += p.x - cameraPos.x
y += p.y - cameraPos.y
z += p.z - cameraPos.z
return _normalize(Vector(x, y, z))
def calcViewMatrix(pos, dir, up):
lftN = _normalize(_cross(dir, up))
upN = _normalize(_cross(lftN, dir))
dirN = _normalize(dir)
return core.Matrix4(lftN.x, upN.x, -dirN.x, 0.0,
lftN.y, upN.y, -dirN.y, 0.0,
lftN.z, upN.z, -dirN.z, 0.0,
-_dot(lftN, pos), -_dot(upN, pos), _dot(dirN, pos), 1.0)
def calcProjectionMatrixForLHToFitAABB(aabb):
# calculate orthogonal projection matrix to fit AABB for Left-Handed
ax, ay, az = aabb.max.x + aabb.min.x, aabb.max.y + aabb.min.y, aabb.max.z + aabb.min.z
bx, by, bz = aabb.max.x - aabb.min.x, aabb.max.y - aabb.min.y, aabb.max.z - aabb.min.z
sx, tx = (2.0 / bx, -ax / bx) if bx > 0.0 else (1.0, 0.0)
sy, ty = (2.0 / by, -ay / by) if by > 0.0 else (1.0, 0.0)
sz, tz = (2.0 / bz, -az / bz) if bz > 0.0 else (1.0, 0.0)
return core.Matrix4(sx, 0.0, 0.0, 0.0,
0.0, sy, 0.0, 0.0,
0.0, 0.0, sz, 0.0,
tx, ty, tz, 1.0)
def calcProjectionMatrixForRHToFitAABB(aabb):
# calculate orthogonal projection matrix to fit AABB for Right-Handed
ax, ay, az = aabb.max.x + aabb.min.x, aabb.max.y + aabb.min.y, aabb.max.z + aabb.min.z
bx, by, bz = aabb.max.x - aabb.min.x, aabb.max.y - aabb.min.y, aabb.max.z - aabb.min.z
sx, tx = (2.0 / bx, -ax / bx) if bx > 0.0 else (1.0, 0.0)
sy, ty = (2.0 / by, -ay / by) if by > 0.0 else (1.0, 0.0)
sz, tz = (-2.0 / bz, az / bz) if bz > 0.0 else (-1.0, 0.0)
return core.Matrix4(sx, 0.0, 0.0, 0.0,
0.0, sy, 0.0, 0.0,
0.0, 0.0, sz, 0.0,
tx, ty, tz, 1.0)
def uniformSMMatrices(camera, lightDir, sceneAABB, useBodyVector=True):
invViewProj = camera.viewProjectionMatrix()
invViewProj.inverse()
points = calcFocusedLightVolumePoints(invViewProj, lightDir, sceneAABB)
cameraPos = camera.position()
upVector = calcBodyVector(points, cameraPos) if useBodyVector else camera.direction()
# calculate view-matrix
viewMatrix = calcViewMatrix(cameraPos, lightDir, upVector)
# transform the light volume points from world into light space
for i in range(len(points)):
v = _coreVector(points[i])
v.transform(viewMatrix)
points[i] = v
# calculate cubic hull (AABB)
aabb = aabbFromPoints(points) if len(points) > 0 else _frustumAabb
# refit to unit cube
projectionMatrix = calcProjectionMatrixForRHToFitAABB(aabb)
return viewMatrix, projectionMatrix
def lispSMMatrices(camera, lightDir, sceneAABB, useBodyVector=True):
invViewProj = camera.viewProjectionMatrix()
invViewProj.inverse()
points = calcFocusedLightVolumePoints(invViewProj, lightDir, sceneAABB)
cameraPos = camera.position()
viewDir = camera.direction()
dp = _dot(viewDir, lightDir)
sinGamma = math.sqrt(1.0 - dp * dp)
# calc up-vector
if useBodyVector:
newDir = calcBodyVector(points, cameraPos)
left = _cross(lightDir, newDir)
else:
left = _cross(lightDir, viewDir)
upVector = _normalize(_cross(left, lightDir))
# temporal light View
# look from position (cameraPos)
# into direction (lightDir)
# with up vector (upVector)
viewMatrix = calcViewMatrix(cameraPos, lightDir, upVector)
# transform the light volume points from world into light space
numPoints = len(points)
pointsCopy = [None] * numPoints
for i in range(numPoints):
v = points[i]
points[i] = _coreVector(v)
pointsCopy[i] = _coreVector(v)
points[i].transform(viewMatrix)
# calculate cubic hull (AABB)
aabb = aabbFromPoints(points) if numPoints > 0 else _frustumAabb
points = pointsCopy
del pointsCopy
nearPt = core.Vector3(0, 0, -1) # frustum near-center
nearPt.transform(invViewProj) # camera space to world space
nearDist = (nearPt - cameraPos).length() # get camera near distance
# LiSPSM formulas of the paper to get n (and f)
factor = 1.0 / sinGamma
z_n = factor * nearDist # often 1
d = abs(aabb.max.y - aabb.min.y) # perspective transform depth # light space y extents
z_f = z_n + d * sinGamma
n = (z_n + math.sqrt(z_f * z_n)) / sinGamma
f = n + d
# new observer point n-1 behind eye position
# pos = eyePos-up*(n-nearDist)
pos = _linear(cameraPos, upVector, -(n-nearDist))
viewMatrix = calcViewMatrix(pos, lightDir, upVector)
# one possibility for a simple perspective transformation matrix
# with the two parameters n(near) and f(far) in y direction
# [ 1 0 0 0] a = (f+n)/(f-n)
# [ 0 a 0 1] b = -2*f*n/(f-n)
# [ 0 0 1 0]
# [ 0 b 0 0]
lispMatrix = core.Matrix4(1.0, 0.0, 0.0, 0.0,
0.0, (f+n)/(f-n), 0.0, 1.0,
0.0, 0.0, 1.0, 0.0,
0.0, -2.0*f*n/(f-n), 0.0, 0.0)
# temporal arrangement for the transformation of the points to post-perspective space
lightProjection = viewMatrix * lispMatrix
# transform the light volume points from world into the distorted light space
for v in points:
v.transform(lightProjection)
# calculate the cubic hull (an AABB)
aabb = aabbFromPoints(points) if numPoints > 0 else _frustumAabb
# refit to unit cube
projectionMatrix = calcProjectionMatrixForRHToFitAABB(aabb)
return viewMatrix, lispMatrix * projectionMatrix
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django import template
from django.core import urlresolvers
from django.template.defaultfilters import title
from django.utils.http import urlencode
from django.utils.translation import string_concat, ugettext_lazy as _
from horizon.conf import HORIZON_CONFIG
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils.filters import replace_underscores
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security \
.floating_ips.workflows import IPAssociationWorkflow
from .tabs import InstanceDetailTabs, LogTab, ConsoleTab
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class TerminateInstance(tables.BatchAction):
name = "terminate"
action_present = _("Terminate")
action_past = _("Scheduled termination of")
data_type_singular = _("Instance")
data_type_plural = _("Instances")
classes = ('btn-danger', 'btn-terminate')
def allowed(self, request, instance=None):
return True
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(tables.BatchAction):
name = "reboot"
action_present = _("Hard Reboot")
action_past = _("Hard Rebooted")
data_type_singular = _("Instance")
data_type_plural = _("Instances")
classes = ('btn-danger', 'btn-reboot')
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, api.nova.REBOOT_HARD)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
action_present = _("Soft Reboot")
action_past = _("Soft Rebooted")
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, api.nova.REBOOT_SOFT)
class TogglePause(tables.BatchAction):
name = "pause"
action_present = (_("Pause"), _("Resume"))
action_past = (_("Paused"), _("Resumed"))
data_type_singular = _("Instance")
data_type_plural = _("Instances")
classes = ("btn-pause",)
def allowed(self, request, instance=None):
self.paused = False
if not instance:
return self.paused
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
else:
self.current_present_action = PAUSE
return ((instance.status in ACTIVE_STATES or self.paused)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
action_present = (_("Suspend"), _("Resume"))
action_past = (_("Suspended"), _("Resumed"))
data_type_singular = _("Instance")
data_type_plural = _("Instances")
classes = ("btn-suspend",)
def allowed(self, request, instance=None):
self.suspended = False
if not instance:
self.suspended
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
else:
self.current_present_action = SUSPEND
return ((instance.status in ACTIVE_STATES or self.suspended)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("btn-launch", "ajax-modal")
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
class EditInstance(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal", "btn-edit")
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
param = urlencode({"step": step_slug})
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class CreateSnapshot(tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images_and_snapshots:snapshots:create"
classes = ("ajax-modal", "btn-camera")
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
class ConsoleLink(tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(ConsoleLink, self).get_link_url(datum)
tab_query_string = ConsoleTab(InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(LogLink, self).get_link_url(datum)
tab_query_string = LogTab(InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ConfirmResize(tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_confirm_resize(request, instance)
class RevertResize(tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_revert_resize(request, instance)
class AssociateIP(tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal", "btn-associate")
def allowed(self, request, instance):
fip = api.network.NetworkClient(request).floating_ips
if fip.is_simple_associate_supported():
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
next = urlresolvers.reverse("horizon:project:instances:index")
params = {"instance_id": self.table.get_object_id(datum),
IPAssociationWorkflow.redirect_param_name: next}
params = urlencode(params)
return "?".join([base_url, params])
class SimpleAssociateIP(tables.Action):
name = "associate-simple"
verbose_name = _("Associate Floating IP")
classes = ("btn-associate-simple",)
def allowed(self, request, instance):
fip = api.network.NetworkClient(request).floating_ips
if not fip.is_simple_associate_supported():
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Quantum and instance_id for Nova Network
# (Quantum API wrapper returns a 'portid_fixedip' string)
target_id = api.network.floating_ip_target_get_by_instance(
request, instance_id).split('_')[0]
fip = api.network.tenant_floating_ip_allocate(request)
api.network.floating_ip_associate(request, fip.id, target_id)
messages.success(request,
_("Successfully associated floating IP: %s")
% fip.ip)
except:
exceptions.handle(request,
_("Unable to associate floating IP."))
return shortcuts.redirect("horizon:project:instances:index")
class SimpleDisassociateIP(tables.Action):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-danger", "btn-disassociate",)
def allowed(self, request, instance):
if not HORIZON_CONFIG["simple_ip_management"]:
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Quantum and instance_id for Nova Network
# (Quantum API wrapper returns a 'portid_fixedip' string)
target_id = api.network.floating_ip_target_get_by_instance(
request, instance_id).split('_')[0]
fips = [fip for fip in api.network.tenant_floating_ip_list(request)
if fip.port_id == target_id]
# Removing multiple floating IPs at once doesn't work, so this pops
# off the first one.
if fips:
fip = fips.pop()
api.network.floating_ip_disassociate(request,
fip.id, target_id)
api.network.tenant_floating_ip_release(request, fip.id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
else:
messages.info(request, _("No floating IPs to disassociate."))
except:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect("horizon:project:instances:index")
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
instance.full_flavor = api.nova.flavor_get(request,
instance.flavor["id"])
return instance
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
context = {"instance": instance}
return template.loader.render_to_string(template_name, context)
def get_size(instance):
if hasattr(instance, "full_flavor"):
size_string = _("%(name)s | %(RAM)s RAM | %(VCPU)s VCPU "
"| %(disk)s Disk")
vals = {'name': instance.full_flavor.name,
'RAM': sizeformat.mbformat(instance.full_flavor.ram),
'VCPU': instance.full_flavor.vcpus,
'disk': sizeformat.diskgbformat(instance.full_flavor.disk)}
return size_string % vals
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("resize", "Resize/Migrate"),
("verify_resize", "Confirm or Revert Resize/Migrate"),
("revert_resize", "Revert Resize/Migrate"),
)
TASK_DISPLAY_CHOICES = (
("image_snapshot", "Snapshotting"),
("resize_prep", "Preparing Resize or Migrate"),
("resize_migrating", "Resizing or Migrating"),
("resize_migrated", "Resized or Migrated"),
("resize_finish", "Finishing Resize or Migrate"),
("resize_confirming", "Confirming Resize or Nigrate"),
("resize_reverting", "Reverting Resize or Migrate"),
("unpausing", "Resuming"),
)
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
)
name = tables.Column("name",
link=("horizon:project:instances:detail"),
verbose_name=_("Instance Name"))
ip = tables.Column(get_ips, verbose_name=_("IP Address"))
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
keypair = tables.Column(get_keyname, verbose_name=_("Keypair"))
status = tables.Column("status",
filters=(title, replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
filters=(title, replace_underscores),
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, replace_underscores),
verbose_name=_("Power State"))
numprocs = tables.Column("numprocs",
verbose_name=_("Processes"))
class Meta:
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions = (LaunchLink, TerminateInstance)
row_actions = (ConfirmResize, RevertResize, CreateSnapshot,
SimpleAssociateIP, AssociateIP,
SimpleDisassociateIP, EditInstance,
EditInstanceSecurityGroups, ConsoleLink, LogLink,
TogglePause, ToggleSuspend, SoftRebootInstance,
RebootInstance, TerminateInstance)
|
|
"""WeGovNow extension OnToMap logger."""
import json
import time
from os import path
from ast import literal_eval
from requests import request
from django.conf import settings
from django.contrib.sites.models import Site
from allauth.socialaccount.models import SocialAccount
from allauth_uwum.provider import UWUMProvider
from geokey.core.signals import get_request
from geokey_wegovnow.base import MAPPINGS
# Default headers for OnToMap
from geokey_wegovnow.conversions import make_cm_url, get_link_title
headers = {'content-type': 'application/json;charset=utf-8'}
def get_cert():
"""Get the UWUM certificate."""
cert = UWUMProvider.settings.get('CERT')
if not path.exists(cert):
raise IOError('UWUM certificate not found')
return cert
def check_mappings():
"""Get OnToMap mappings."""
mappings = get_mappings()
# Send mappings if it does not exist or does not match local mappings
if (mappings.status_code != 200 or
literal_eval(mappings.content).get('mappings') != MAPPINGS):
send_mappings()
def get_mappings():
"""Get OnToMap mappings."""
cert = get_cert()
url = settings.ONTOMAP_URLS['MAPPINGS_URL']
response = request(
'GET',
url=url,
cert=cert)
return response
def send_mappings():
"""Send OnToMap mappings."""
cert = get_cert()
url = settings.ONTOMAP_URLS['MAPPINGS_URL']
data = {
'mappings': MAPPINGS
}
response = request(
'POST',
headers=headers,
url=url,
cert=cert,
data=json.dumps(data))
return response
def get_events():
"""Get OnToMap events."""
cert = get_cert()
url = settings.ONTOMAP_URLS['EVENTS_URL']
response = request(
'GET',
headers=headers,
url=url,
cert=cert)
return response
def make_event(class_name, instance, action):
"""Make OnToMap event."""
domain = Site.objects.get_current().domain
uwum_account = SocialAccount.objects.get(
provider='uwum',
user=get_request().user)
activity_objects = []
visibility_details = []
details = {}
if action == 'removed' or instance.status != 'active':
hidden = True
else:
hidden = False
# ###########################
# ADDITIONS FOR PROJECT
# ###########################
if class_name == 'Project':
external_url = '%s/api/projects/%s/' % (
domain, instance.id)
if instance.isprivate:
hidden = True
activity_objects.append({
'type': 'Feature',
'geometry': None,
'properties': {
'hasType': 'Project',
'name': instance.name,
'external_url': make_cm_url(external_url),
'additionalProperties': {
'description': instance.description
}
}
})
visibility_details.append({
'external_url': make_cm_url(external_url),
'hidden': hidden
})
details['project_id'] = instance.id
# ###########################
# ADDITIONS FOR CATEGORY
# ###########################
if class_name == 'Category':
external_url = '%s/api/projects/%s/categories/%s/' % (
domain, instance.project.id, instance.id)
if instance.project.isprivate:
hidden = True
activity_objects.append({
'type': 'Feature',
'geometry': None,
'properties': {
'hasType': 'Category',
'name': instance.name,
'external_url': make_cm_url(external_url),
'additionalProperties': {
'description': instance.description
}
}
})
visibility_details.append({
'external_url': make_cm_url(external_url),
'hidden': hidden
})
details['project_id'] = instance.project.id
details['category_id'] = instance.id
# ###########################
# ADDITIONS FOR CONTRIBUTION
# ###########################
if class_name == 'Observation':
external_url = '%s/api/projects/%s/contributions/%s/' % (
domain, instance.project.id, instance.id)
if instance.project.isprivate:
hidden = True
geometry = literal_eval(instance.location.geometry.geojson)
additional_properties = literal_eval(json.dumps(instance.properties))
properties = {'hasType': 'Contribution', 'external_url': make_cm_url(external_url),
'additionalProperties': additional_properties,
'name': get_link_title(properties=instance.properties)}
activity_objects.append({
'type': 'Feature',
'geometry': geometry,
'properties': properties
})
visibility_details.append({
'external_url': make_cm_url(external_url),
'hidden': hidden
})
details['project_id'] = instance.project.id
details['category_id'] = instance.category.id
# ###########################
# ADDITIONS FOR COMMENT
# ###########################
if class_name == 'Comment':
contribution = instance.commentto
parent_comment = instance.respondsto or None
external_url = '%s/api/projects/%s/contributions/%s/comments' % (
domain, contribution.project.id, contribution.id)
if contribution.project.isprivate:
hidden = True
activity_objects.append({
'type': 'Feature',
'geometry': None,
'properties': {
'hasType': 'Comment',
'external_url': make_cm_url(external_url),
'additionalProperties': {
'text': instance.text,
'responds_to': (
None if not parent_comment else parent_comment.id
)
}
}
})
visibility_details.append({
'external_url': make_cm_url(external_url),
'hidden': hidden
})
details['project_id'] = contribution.project.id
details['category_id'] = contribution.category.id
details['contribution_id'] = contribution.id
# ###########################
# ADDITIONS FOR MEDIA FILE
# ###########################
if class_name == 'MediaFile':
contribution = instance.contribution
external_url = '%s/api/projects/%s/contributions/%s/media/%s' % (
domain, contribution.project.id, contribution.id, instance.id)
if contribution.project.isprivate:
hidden = True
if hasattr(instance, 'audio'):
url = domain + instance.audio.url
elif hasattr(instance, 'image'):
url = domain + instance.image.url
elif hasattr(instance, 'video'):
url = instance.youtube_link
else:
url = domain
activity_objects.append({
'type': 'Feature',
'geometry': None,
'properties': {
'hasType': 'MediaFile',
'name': instance.name,
'external_url': make_cm_url(external_url),
'additionalProperties': {
'description': instance.description,
'url': url
}
}
})
visibility_details.append({
'external_url': make_cm_url(external_url),
'hidden': hidden
})
details['project_id'] = contribution.project.id
details['category_id'] = contribution.category.id
details['contribution_id'] = contribution.id
# ###########################
# FINAL EVENT OBJECT
# ###########################
event = {
'actor': int(uwum_account.id),
'timestamp': int(round(time.time() * 1000)),
'activity_type': 'object_%s' % action,
'activity_objects': activity_objects,
'visibility_details': visibility_details,
'details': details
}
return event
def send_events(events):
"""Send OnToMap events."""
cert = get_cert()
url = settings.ONTOMAP_URLS['EVENTS_URL']
if events:
# Always make sure mappings are up-to-date before sending event
check_mappings()
data = {
'event_list': events
}
response = request(
'POST',
headers=headers,
url=url,
cert=cert,
data=json.dumps(data))
return response
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import psycopg2
from rmake.lib.ninamori import error as nerror
from rmake.lib.ninamori.connection import ConnectString
from rmake.lib.ninamori.types import Row, SQL
from rmake.lib.twisted_extras import deferred_service
from psycopg2 import extensions
from rmake.lib.dbextensions import register_types
from twisted.internet import defer
from twisted.internet import task
from twisted.python import failure
from txpostgres import txpostgres
log = logging.getLogger(__name__)
class Cursor(txpostgres.Cursor):
def execute(self, statement, args=None):
if isinstance(statement, SQL):
assert not args
statement, args = statement.statement, statement.args
d = txpostgres.Cursor.execute(self, statement, args)
d.addErrback(self._convertErrors)
return d
def query(self, statement, args=None):
d = self.execute(statement, args)
d.addCallback(lambda cu: cu.fetchall())
return d
@staticmethod
def _convertErrors(reason):
reason.trap(psycopg2.DatabaseError)
exc_value = reason.value
if getattr(exc_value, 'pgcode', None):
exc_type = nerror.getExceptionFromCode(reason.value.pgcode)
exc_value = exc_type(*exc_value.args)
exc_value.err_code = reason.value.pgcode
new = failure.Failure(exc_value, exc_type)
new.frames = reason.frames
new.stack = reason.stack
return new
return reason
def fields(self):
desc = self.description
if desc is not None:
return [x[0] for x in desc]
else:
return None
def _row(self, data):
if data is None:
return None
return Row(data, self.fields())
def fetchone(self):
return self._row(self._cursor.fetchone())
def fetchall(self):
return [self._row(x) for x in self._cursor.fetchall()]
class Connection(txpostgres.Connection):
cursorFactory = Cursor
def __init__(self, reactor, pool):
txpostgres.Connection.__init__(self, reactor)
self.pool = pool
def connect(self, path):
params = path.asDict(exclude=('driver',))
params['database'] = params.pop('dbname')
d = txpostgres.Connection.connect(self, **params)
def cb_connected(result):
extensions.register_type(extensions.UNICODE, self._connection)
return register_types(self)
d.addCallback(cb_connected)
return d
class ConnectionPool(deferred_service.Service):
min = 3
connectionFactory = Connection
def __init__(self, path, min=None):
self.path = ConnectString.parse(path)
self.pool_running = False
self.shutdownID = None
self.cleanupTask = None
if min:
self.min = min
self.connections = set()
self.connQueue = defer.DeferredQueue()
from twisted.internet import reactor
self.reactor = reactor
def postStartService(self):
return self.start()
def stopService(self):
deferred_service.Service.stopService(self)
self.close()
# Pool control
def start(self):
if self.pool_running:
return
self.shutdownID = self.reactor.addSystemEventTrigger('during',
'shutdown', self._finalClose)
self.pool_running = True
d = self.rebalance()
def cb_connected(result):
self.cleanupTask = task.LoopingCall(self.rebalance)
self.cleanupTask.start(5, now=False)
return result
d.addCallback(cb_connected)
return d
def close(self):
if self.shutdownID:
self.reactor.removeSystemEventTrigger(self.shutdownID)
self.shutdownID = None
self._finalClose()
def _finalClose(self):
for conn in self.connections:
try:
conn.close()
except psycopg2.InterfaceError:
# Connection already closed
pass
self.shutdownID = None
if self.cleanupTask:
self.cleanupTask.stop()
self.pool_running = False
def rebalance(self):
dfrs = []
for x in range(self.min - len(self.connections)):
dfrs.append(self._startOne())
d = defer.DeferredList(dfrs, fireOnOneErrback=True, consumeErrors=True)
def eb_connect_failed(reason):
# Pull the real error out of the FirstError DL slaps on
return reason.value.subFailure
d.addErrback(eb_connect_failed)
return d
def _startOne(self):
conn = self.connectionFactory(self.reactor, self)
log.debug("Connecting asynchronously to %s", self.path.asDSN())
d = conn.connect(self.path)
def cb_connected(dummy):
log.debug("Database is connected")
self._add(conn)
d.addCallback(cb_connected)
return d
def _add(self, conn):
self.connections.add(conn)
self.connQueue.put(conn)
def _remove(self, conn):
self.connections.discard(conn)
if conn in self.connQueue.pending:
self.connQueue.pending.remove(conn)
# Running queries
def runQuery(self, statement, args=None):
"""Execute a query and callback the result."""
return self._runWithConn('runQuery', statement, args)
def runOperation(self, statement, args=None):
"""Execute a statement and callback C{None} when done."""
return self._runWithConn('runOperation', statement, args)
def runInteraction(self, func, *args, **kwargs):
"""Run function in a transaction and callback the result."""
return self._runWithConn('runInteraction', func, *args, **kwargs)
def _runWithConn(self, funcName, *args, **kwargs):
if self.connQueue.pending:
d = defer.succeed(None)
else:
d = self.rebalance()
d.addCallback(lambda _: self.connQueue.get())
def gotConn(conn):
func = getattr(conn, funcName)
d2 = defer.maybeDeferred(func, *args, **kwargs)
def handleConnClosed(reason):
reason.trap(psycopg2.DatabaseError, psycopg2.InterfaceError)
msg = reason.value.pgerror
if msg and ('server closed ' in msg
or 'connection already closed' in msg):
# Connection was closed
self._remove(conn)
log.info("Lost connection to database")
return reason
d2.addErrback(handleConnClosed)
def releaseAndReturn(result):
# Only put the connection back in the queue if it is also still
# in the pool. This keeps it from being requeued if the
# connection was terminated during the operation.
if conn in self.connections:
self.connQueue.put(conn)
return result
d2.addBoth(releaseAndReturn)
return d2
d.addCallback(gotConn)
return d
|
|
import logging
import time
import traceback
from multiprocessing import Process, Queue, cpu_count
from queue import Empty
from threading import Thread
logger = logging.getLogger(__name__)
OUTPUT_QUEUE_TIMEOUT = 0.1
"""
This is how long an output queue will block while waiting for output to process
"""
PROCESS_JOIN_TIMEOUT = 5
"""
This is how long to wait for processes to join to the main thread when it
looks like we are done.
"""
OUTPUT_QUEUE_SIZE = 50
def map(process, items, mappers=None, output_queue_size=OUTPUT_QUEUE_SIZE):
"""
Implements a distributed stategy for CPU-intensive tasks. This
function constructs a set of :mod:`multiprocessing` threads (spread over
multiple cores) and uses an internal queue to aggregate outputs. To use
this function, implement a `process()` function that takes one argument --
a serializable job. Anything that this function ``yield``s will be
`yielded` in turn from the :func:`para.map` function.
:Parameters:
process : `func`
A function that takes an item as a parameter and returns a
generator of output values.
items : `iterable` ( `picklable` )
:mod:`pickle`-able items to process. Note that this must fit in
memory.
mappers : int
the number of parallel mappers to spool up
output_queue_size : int
the number of outputs to buffer before blocking mappers
:Example:
>>> import para
>>> files = ["examples/dump.xml", "examples/dump2.xml"]
>>>
>>> def filter_long_lines(path):
... with open(path) as f:
... for line in f:
... if len(line) > 100:
... yield (path, line)
...
>>> for path, line in para.map(filter_long_lines, files):
... print(path, line)
...
"""
items = list(items)
# Special case for a single item
if len(items) == 1:
return _map_single_item(process, items[0])
else:
return _map_many_items(process, items, mappers)
def _map_single_item(process, item):
yield from process(item)
def _map_many_items(process, items, mappers,
output_queue_size=OUTPUT_QUEUE_SIZE):
# Load paths into the queue
item_queue = Queue()
for item in items:
item_queue.put(item)
# How many mappers are we going to have?
mappers = min(max(1, mappers or cpu_count()), len(items))
# Prepare the output queue
output = Queue(output_queue_size or OUTPUT_QUEUE_SIZE)
# Prepare the logs queue
qlogger = QueueLogger()
qlogger.start()
# Prepare the mappers and start them
map_processes = [Mapper(process, item_queue, output, qlogger, name=str(i))
for i in range(mappers)]
for map_process in map_processes:
map_process.start()
# Read from the output queue while there's still a mapper alive or
# something in the queue to read.
while not output.empty() or mappers > 0:
try:
# if we timeout, the loop will check to see if we are done
error, value, mapper_done = \
output.get(timeout=OUTPUT_QUEUE_TIMEOUT)
if mapper_done: # Decrement the number of mappers
mappers -= 1
logger.info("Mapper shutting down. {0} mappers still running"
.format(mappers))
elif error is None:
yield value
else:
raise error
except KeyboardInterrupt:
logger.warning("KeyboardInterrupt detected. Finishing...")
break
except Empty:
# This can happen when mappers aren't adding values to the
# queue fast enough *or* if we're done processing.
# Check if we have any processes still alive
if mappers > 0:
continue # Keep trying
else:
break # Shut it down
class Mapper(Process):
def __init__(self, process, item_queue, output, logger, name=None):
"""
Implements a mapper process worker. Instances of this class will
continually try to read from an `item_queue` and execute its
`process()` function until there is nothing left to read from the
`item_queue`.
"""
super().__init__(name="{0}".format(name), daemon=True)
self.process = process
self.item_queue = item_queue
self.output = output
self.logger = logger
self.stats = []
def run(self):
logger.info("{0}: Starting up.".format(self.name))
try:
while True:
# Get an item to process
item = self.item_queue.get(timeout=0.05)
self.logger.info("{0}: Processing {1}"
.format(self.name, str(item)[:50]))
try:
start_time = time.time()
count = 0
# For each value that is yielded, add it to the output
# queue
for value in self.process(item):
self.output.put((None, value, False))
count += 1
self.stats.append((item, count, time.time() - start_time))
except Exception as e:
self.logger.error(
"Mapper {0}: An error occured while processing {1}"
.format(self.name, str(item)[:50])
)
formatted = traceback.format_exc(chain=False)
self.logger.error("{0}: {1}".format(self.name, formatted))
self.output.put((e, None, False))
return # Exits without polluting stderr
except Empty:
self.logger.info("Mapper {0}: No more items to process"
.format(self.name))
self.logger.info("\n" + "\n".join(self.format_stats()))
self.output.put((None, None, True))
def format_stats(self):
for path, outputs, duration in self.stats:
yield "{0}: - Extracted {1} values from {2} in {3} seconds" \
.format(self.name, outputs, path, duration)
class QueueLogger(Thread):
def __init__(self, logger=None):
super().__init__(daemon=True)
self.queue = Queue()
def debug(self, message):
self.queue.put((logging.DEBUG, message))
def info(self, message):
self.queue.put((logging.INFO, message))
def warning(self, message):
self.queue.put((logging.WARNING, message))
def error(self, message):
self.queue.put((logging.ERROR, message))
def run(self):
while True:
try:
level, message = self.queue.get(timeout=OUTPUT_QUEUE_TIMEOUT)
logger.log(level, message)
except Empty:
continue
|
|
#
# Broker peering simulation (part 3) in Python
# Prototypes the full flow of status and tasks
#
# While this example runs in a single process, that is just to make
# it easier to start and stop the example. Each thread has its own
# context and conceptually acts as a separate process.
#
# Author : Min RK
# Contact: benjaminrk(at)gmail(dot)com
#
import random
import sys
import threading
import time
import zmq
NBR_CLIENTS = 10
NBR_WORKERS = 5
def asbytes(obj):
s = str(obj)
if str is not bytes:
# Python 3
s = s.encode('ascii')
return s
def client_task(name, i):
"""Request-reply client using REQ socket"""
ctx = zmq.Context()
client = ctx.socket(zmq.REQ)
client.identity = (u"Client-%s-%s" % (name, i)).encode('ascii')
client.connect("ipc://%s-localfe.ipc" % name)
monitor = ctx.socket(zmq.PUSH)
monitor.connect("ipc://%s-monitor.ipc" % name)
poller = zmq.Poller()
poller.register(client, zmq.POLLIN)
while True:
time.sleep(random.randint(0, 5))
for _ in range(random.randint(0, 15)):
# send request with random hex ID
task_id = u"%04X" % random.randint(0, 10000)
client.send_string(task_id)
# wait max 10 seconds for a reply, then complain
try:
events = dict(poller.poll(10000))
except zmq.ZMQError:
return # interrupted
if events:
reply = client.recv_string()
assert reply == task_id, "expected %s, got %s" % (task_id, reply)
monitor.send_string(reply)
else:
monitor.send_string(u"E: CLIENT EXIT - lost task %s" % task_id)
return
def worker_task(name, i):
"""Worker using REQ socket to do LRU routing"""
ctx = zmq.Context()
worker = ctx.socket(zmq.REQ)
worker.identity = ("Worker-%s-%s" % (name, i)).encode('ascii')
worker.connect("ipc://%s-localbe.ipc" % name)
# Tell broker we're ready for work
worker.send(b"READY")
# Process messages as they arrive
while True:
try:
msg = worker.recv_multipart()
except zmq.ZMQError:
# interrupted
return
# Workers are busy for 0/1 seconds
time.sleep(random.randint(0, 1))
worker.send_multipart(msg)
def main(myself, peers):
print("I: preparing broker at %s..." % myself)
# Prepare our context and sockets
ctx = zmq.Context()
# Bind cloud frontend to endpoint
cloudfe = ctx.socket(zmq.ROUTER)
cloudfe.setsockopt(zmq.IDENTITY, myself)
cloudfe.bind("ipc://%s-cloud.ipc" % myself)
# Bind state backend / publisher to endpoint
statebe = ctx.socket(zmq.PUB)
statebe.bind("ipc://%s-state.ipc" % myself)
# Connect cloud and state backends to all peers
cloudbe = ctx.socket(zmq.ROUTER)
statefe = ctx.socket(zmq.SUB)
statefe.setsockopt(zmq.SUBSCRIBE, b"")
cloudbe.setsockopt(zmq.IDENTITY, myself)
for peer in peers:
print("I: connecting to cloud frontend at %s" % peer)
cloudbe.connect("ipc://%s-cloud.ipc" % peer)
print("I: connecting to state backend at %s" % peer)
statefe.connect("ipc://%s-state.ipc" % peer)
# Prepare local frontend and backend
localfe = ctx.socket(zmq.ROUTER)
localfe.bind("ipc://%s-localfe.ipc" % myself)
localbe = ctx.socket(zmq.ROUTER)
localbe.bind("ipc://%s-localbe.ipc" % myself)
# Prepare monitor socket
monitor = ctx.socket(zmq.PULL)
monitor.bind("ipc://%s-monitor.ipc" % myself)
# Get user to tell us when we can start...
# raw_input("Press Enter when all brokers are started: ")
# create workers and clients threads
for i in range(NBR_WORKERS):
thread = threading.Thread(target=worker_task, args=(myself, i))
thread.daemon = True
thread.start()
for i in range(NBR_CLIENTS):
thread_c = threading.Thread(target=client_task, args=(myself, i))
thread_c.daemon = True
thread_c.start()
# Interesting part
# -------------------------------------------------------------
# Publish-subscribe flow
# - Poll statefe and process capacity updates
# - Each time capacity changes, broadcast new value
# Request-reply flow
# - Poll primary and process local/cloud replies
# - While worker available, route localfe to local or cloud
local_capacity = 0
cloud_capacity = 0
workers = []
# setup backend poller
pollerbe = zmq.Poller()
pollerbe.register(localbe, zmq.POLLIN)
pollerbe.register(cloudbe, zmq.POLLIN)
pollerbe.register(statefe, zmq.POLLIN)
pollerbe.register(monitor, zmq.POLLIN)
while True:
# If we have no workers anyhow, wait indefinitely
try:
events = dict(pollerbe.poll(1000 if local_capacity else None))
except zmq.ZMQError:
break # interrupted
previous = local_capacity
# Handle reply from local worker
msg = None
if localbe in events:
msg = localbe.recv_multipart()
(address, empty), msg = msg[:2], msg[2:]
workers.append(address)
local_capacity += 1
# If it's READY, don't route the message any further
if msg[-1] == b'READY':
msg = None
elif cloudbe in events:
msg = cloudbe.recv_multipart()
(address, empty), msg = msg[:2], msg[2:]
# We don't use peer broker address for anything
if msg is not None:
address = msg[0]
if address in peers:
# Route reply to cloud if it's addressed to a broker
cloudfe.send_multipart(msg)
else:
# Route reply to client if we still need to
localfe.send_multipart(msg)
# Handle capacity updates
if statefe in events:
peer, s = statefe.recv_multipart()
cloud_capacity = int(s)
# handle monitor message
if monitor in events:
print(monitor.recv_string())
# Now route as many clients requests as we can handle
# - If we have local capacity we poll both localfe and cloudfe
# - If we have cloud capacity only, we poll just localfe
# - Route any request locally if we can, else to cloud
while local_capacity + cloud_capacity:
secondary = zmq.Poller()
secondary.register(localfe, zmq.POLLIN)
if local_capacity:
secondary.register(cloudfe, zmq.POLLIN)
events = dict(secondary.poll(0))
# We'll do peer brokers first, to prevent starvation
if cloudfe in events:
msg = cloudfe.recv_multipart()
elif localfe in events:
msg = localfe.recv_multipart()
else:
break # No work, go back to backends
if local_capacity:
msg = [workers.pop(0), b''] + msg
localbe.send_multipart(msg)
local_capacity -= 1
else:
# Route to random broker peer
msg = [random.choice(peers), b''] + msg
cloudbe.send_multipart(msg)
if local_capacity != previous:
statebe.send_multipart([myself, asbytes(local_capacity)])
if __name__ == '__main__':
if len(sys.argv) >= 2:
myself = asbytes(sys.argv[1])
main(myself, peers=[ asbytes(a) for a in sys.argv[2:] ])
else:
print("Usage: peering3.py <me> [<peer_1> [... <peer_N>]]")
sys.exit(1)
|
|
from dipy.tracking.streamline import set_number_of_points
import dipy.tracking.distances as pf
from dipy.segment.metricspeed import distance_matrix
from dipy.segment.metric import MinimumAverageDirectFlipMetric
import numpy as np
import pandas as pd
from scipy.spatial import distance
def flipTrackslr(streamlines,voxel_size,voxel_dim):
print(streamlines[0][0])
streamlines_flipped = []
for s in streamlines:
new_pts = []
for pt in s:
new_pts.append(np.array([pt[0],voxel_dim[1]*voxel_size[1]-pt[1],pt[2]]))
streamlines_flipped.append(np.array(new_pts))
print(streamlines_flipped[0][0])
return streamlines_flipped
# Needs testing
def applyDeformationToStreamlines(streamlines,deformation,ptsPerStream=20):
streamlines = set_number_of_points(streamlines,ptsPerStream)
pts = [pt for s in streamlines for pt in s]
print(pts[1])
print(deformation[int(pts[1][0]),int(pts[1][1]),int(pts[1][2])])
print(np.array([deformation[int(pts[1][0]),int(pts[1][1]),int(pts[1][2])][0][0]+pts[1][0],
deformation[int(pts[1][0]),int(pts[1][1]),int(pts[1][2])][0][1]+pts[1][1],
deformation[int(pts[1][0]),int(pts[1][1]),int(pts[1][2])][0][2]+pts[1][2]]))
deformed_pts = [np.array([deformation[int(pt[0]),int(pt[1]),int(pt[2])][0][0]+pt[0],
deformation[int(pt[0]),int(pt[1]),int(pt[2])][0][1]+pt[1],
deformation[int(pt[0]),int(pt[1]),int(pt[2])][0][2]+pt[2]]) for pt in pts]
streamlines = []
for i in range(0,len(pts),ptsPerStream):
stream = []
for j in range(ptsPerStream):
stream.append(np.array([deformed_pts[i+j][0],
deformed_pts[i+j][1],
deformed_pts[i+j][2]]))
streamlines.append(np.array(stream))
print(streamlines[0])
return streamlines
def tracks2CSV(out,streamlines,ptsPerStream=20):
""" Takes list of streamlines and outputs a csv so that the points
are in the format needed to apply the ants tranform
Paramaters
----------
out : string
csv output filename
streamlines : list
of ndarrays
ptsPerStream : int
number of points per streamline
"""
streamlines = set_number_of_points(streamlines,ptsPerStream)
pts = [pt for s in streamlines for pt in s]
columns = ['x','y','z','t','label','comment']
df = pd.DataFrame(data={columns[0] : [pt[0] for pt in pts],
columns[1] : [pt[1] for pt in pts],
columns[2] : [pt[2] for pt in pts],
columns[3] :[0 for pt in pts],
columns[4] :[None for pt in pts],
columns[5] :[None for pt in pts]})
df = df[['x','y','z','t','label','comment']]
df.to_csv(out,index=False,na_rep='nan')
def csv2Tracks(csv,ptsPerStream=20):
""" Takes points in ants transform format and converts back to streamlines
so that they can be saved in .trk format
Parameters
----------
csv : string
input csv
ptsPerStream : int
number of points per streamline
Returns
-------
streamlines : list
of ndarrays
"""
df = pd.read_csv(csv)
streamlines = []
for i in range(0,len(df),ptsPerStream):
stream = []
for j in range(ptsPerStream):
stream.append(np.array([df['x'].values[i+j],
df['y'].values[i+j],
df['z'].values[i+j]]))
streamlines.append(np.array(stream))
return streamlines
def sliceFibers(streamlines,roi1,roi2):
""" This function slices streamlines and returns only the section that connects
the two given ROIs. Implemented using a simple state machine. Streamlines should be
in voxel coordinates (so scale them by your voxel dims first - also make sure
image orientations match streamline orientation i.e LAS, LPS or flip your rois
to match.
Parameters
----------
streamlines: list
of ndarrays
roi1 : 3D numpy array
doesn't have to be binarized
roi2 : 3D numpy array_equal
doesn't have to be binarized
Returns
-------
trimmedStreamlines : list
of ndarrays
"""
def inROI1(pt,roi1):
return roi1[int(pt[0]),int(pt[1]),int(pt[2])] != 0
def inROI2(pt,roi2):
return roi2[int(pt[0]),int(pt[1]),int(pt[2])] != 0
def inFirstROI(pt,firstROI,roi1,roi2):
if firstROI == 'roi1':
return inROI1(pt,roi1)
else:
return inROI2(pt,roi2)
def inSecondROI(pt,firstROI,roi1,roi2):
if firstROI == 'roi1':
return inROI2(pt,roi2)
else:
return inROI1(pt,roi1)
def slice(s,roi1,roi2):
state0 = True
state1 = False
state2 = False
firstROI = None
ind1 = None
ind2 = None
for i,pt in enumerate(s):
if state0 and not inROI1(pt,roi1) and not inROI2(pt,roi2):
pass
elif state0 and inROI1(pt,roi1):
firstROI = 'roi1'
state0 = False
state1 = True
elif state0 and inROI2(pt,roi2):
firstROI = 'roi2'
state0 = False
state1 = True
elif state1 and inFirstROI(pt,firstROI,roi1,roi2):
pass
elif state1 and not inFirstROI(pt,firstROI,roi1,roi2):
ind1 = i
state1 = False
state2 = True
elif state2 and not inROI1(pt,roi1) and not inROI2(pt,roi2):
pass
elif state2 and inFirstROI(pt,firstROI,roi1,roi2):
state2 = False
state1 = True
elif state2 and inSecondROI(pt,firstROI,roi1,roi2):
ind2 = i
break
if ind2 != None:
return s[ind1:ind2]
else:
return None
streamlines = set_number_of_points(streamlines,200)
trimmedStreamlines = [slice(s,roi1,roi2) for s in streamlines]
trimmedStreamlines = [s for s in trimmedStreamlines if s is not None]
return trimmedStreamlines
def orientFibers(templateBundle,correspondingFibers,numPoints=20):
""" Takes in templateBundle and the list of corresponding fibers in a particular
subject. It checks to see if the order of points in the corresponding fiber match
the order of the points in the template fiber: i.e if p0 in template is posterior, p0
in subject should also be posterior.
Parameters
----------
templateBundle : list
of streamlines
correspondingFibers : list
of streamlines
Returns
-------
subjectBundle: list of corresponding streamlines oriented in same way as templateBundle
"""
subjectBundle = []
templateBundle = set_number_of_points(templateBundle,numPoints)
correspondingFibers = set_number_of_points(correspondingFibers,numPoints)
for t,s in zip(templateBundle,correspondingFibers):
d1 = np.mean([distance.euclidean(p1,p2) for p1,p2 in zip(t,s)])
d2 = np.mean([distance.euclidean(p1,p2) for p1,p2 in zip(t,np.flip(s,axis=0))])
if d1 <= d2:
subjectBundle.append(s)
else:
subjectBundle.append(np.flip(s,axis=0))
return subjectBundle
def maskTracks(fullset, subset):
""" This function takes in two sets of tracks and returns the
fullset - subset
Parameters
----------
fullset : list
of streamlines
subset : list
of streamlines
Returns
-------
maskedset : list
of streamlines - fullset - subset
"""
def StreamInSet(streamline, subset):
subset = [np.floor(s) for s in subset]
streamline = np.floor(streamline)
for s in subset:
if (np.array_equal(s,streamline)):
return True
return False
maskedset = [fullset[i] for i in range(len(fullset))
if not StreamInSet(fullset[i],subset)]
return maskedset
def meanFiber(bundle,numPoints=20):
""" This function reduces a full bundle of tracks to a single mean fiber.
We calculalate a distance matrix for the streamlines, and return the streamline
with the smallest mean distance to every other fiber.
Parameters
----------
bundle : list
of streamlines
numPoints : int
number of points per streamline
Returns
-------
meanTrack : list
of ndarrays [3x1]
"""
bundle = set_number_of_points(bundle,numPoints)
metric = MinimumAverageDirectFlipMetric()
distanceMatrix = distance_matrix(metric,mergedStreamlines)
index = np.argmin(np.mean(distanceMatrix,axis=0))
return bundle[index]
|
|
#!/usr/bin/env python
# =================================================
# Dependencies
# -------------------------------------------------
from __future__ import print_function
import cdms2
import glob
import json
import os
import pkg_resources
import sys
from genutil import StringConstructor
from PMPdriver_lib import AddParserArgument
from PMPdriver_lib import metrics_to_json
from PMPdriver_lib import sort_human
from PMPdriver_lib import find_realm, get_file
from EnsoMetrics.EnsoCollectionsLib import CmipVariables, defCollection, ReferenceObservations
from EnsoMetrics.EnsoComputeMetricsLib import ComputeCollection
# To avoid below error when using multi cores
# OpenBLAS blas_thread_init: pthread_create failed for thread XX of 96: Resource temporarily unavailable
os.environ['OPENBLAS_NUM_THREADS'] = '1'
# =================================================
# Collect user defined options
# -------------------------------------------------
param = AddParserArgument()
# Pre-defined options
mip = param.mip
exp = param.exp
print('mip:', mip)
print('exp:', exp)
# Path to model data as string template
modpath = param.process_templated_argument("modpath")
modpath_lf = param.process_templated_argument("modpath_lf")
# Check given model option
models = param.modnames
# Include all models if conditioned
if ('all' in [m.lower() for m in models]) or (models == 'all'):
model_index_path = param.modpath.split('/')[-1].split('.').index("%(model)")
models = ([p.split('/')[-1].split('.')[model_index_path] for p in glob.glob(modpath(
mip=mip, exp=exp, model='*', realization='*', variable='ts'))])
# remove duplicates
models = sorted(list(dict.fromkeys(models)), key=lambda s: s.lower())
print('models:', models)
# Realizations
realization = param.realization
print('realization: ', realization)
# Metrics Collection
mc_name = param.metricsCollection
dict_mc = defCollection(mc_name)
list_metric = sorted(dict_mc['metrics_list'].keys())
print('mc_name:', mc_name)
# case id
case_id = param.case_id
# Output
outdir_template = param.process_templated_argument("results_dir")
outdir = StringConstructor(str(outdir_template(
output_type='%(output_type)',
mip=mip, exp=exp, metricsCollection=mc_name, case_id=case_id)))
netcdf_path = outdir(output_type='diagnostic_results')
json_name_template = param.process_templated_argument("json_name")
netcdf_name_template = param.process_templated_argument("netcdf_name")
print('outdir:', str(outdir_template(
output_type='%(output_type)',
mip=mip, exp=exp, metricsCollection=mc_name)))
print('netcdf_path:', netcdf_path)
# Switches
debug = param.debug
print('debug:', debug)
# =================================================
# Prepare loop iteration
# -------------------------------------------------
# Environmental setup
try:
egg_pth = pkg_resources.resource_filename(
pkg_resources.Requirement.parse("pcmdi_metrics"), "share/pmp")
except Exception:
egg_pth = os.path.join(sys.prefix, "share", "pmp")
print('egg_pth:', egg_pth)
# Create output directory
for output_type in ['graphics', 'diagnostic_results', 'metrics_results']:
if not os.path.exists(outdir(output_type=output_type)):
os.makedirs(outdir(output_type=output_type))
print(outdir(output_type=output_type))
# list of variables
list_variables = list()
for metric in list_metric:
listvar = dict_mc['metrics_list'][metric]['variables']
for var in listvar:
if var not in list_variables:
list_variables.append(var)
list_variables = sorted(list_variables)
print(list_variables)
# list of observations
list_obs = list()
for metric in list_metric:
dict_var_obs = dict_mc['metrics_list'][metric]['obs_name']
for var in dict_var_obs.keys():
for obs in dict_var_obs[var]:
if obs not in list_obs:
list_obs.append(obs)
list_obs = sorted(list_obs)
#
# finding file and variable name in file for each observations dataset
#
dict_obs = dict()
for obs in list_obs:
# be sure to add your datasets to EnsoCollectionsLib.ReferenceObservations if needed
dict_var = ReferenceObservations(obs)['variable_name_in_file']
dict_obs[obs] = dict()
for var in list_variables:
#
# finding variable name in file
#
try: var_in_file = dict_var[var]['var_name']
except:
print('\033[95m' + str(var) + " is not available for " + str(obs) + " or unscripted" + '\033[0m')
else:
if isinstance(var_in_file, list):
var0 = var_in_file[0]
else:
var0 = var_in_file
try:
# finding file for 'obs', 'var'
file_name = param.reference_data_path[obs].replace('VAR', var0)
file_areacell = None ## temporary for now
try:
file_landmask = param.reference_data_lf_path[obs]
except:
file_landmask = None
try:
areacell_in_file = dict_var['areacell']['var_name']
except:
areacell_in_file = None
try:
landmask_in_file = dict_var['landmask']['var_name']
except:
landmask_in_file = None
# if var_in_file is a list (like for thf) all variables should be read from the same realm
if isinstance(var_in_file, list):
list_files = list()
list_files = [param.reference_data_path[obs].replace('VAR', var1) for var1 in var_in_file]
list_areacell = [file_areacell for var1 in var_in_file]
list_name_area = [areacell_in_file for var1 in var_in_file]
try:
list_landmask = [param.reference_data_lf_path[obs] for var1 in var_in_file]
except:
list_landmask = None
list_name_land = [landmask_in_file for var1 in var_in_file]
else:
list_files = file_name
list_areacell = file_areacell
list_name_area = areacell_in_file
list_landmask = file_landmask
list_name_land = landmask_in_file
dict_obs[obs][var] = {'path + filename': list_files, 'varname': var_in_file,
'path + filename_area': list_areacell, 'areaname': list_name_area,
'path + filename_landmask': list_landmask, 'landmaskname': list_name_land}
except:
print('\033[95m' + 'Observation dataset' + str(obs) + " is not given for variable " + str(var) + '\033[0m')
print('PMPdriver: dict_obs readin end')
# =================================================
# Loop for Models
# -------------------------------------------------
# finding file and variable name in file for each observations dataset
dict_metric, dict_dive = dict(), dict()
dict_var = CmipVariables()['variable_name_in_file']
print('models:', models)
for mod in models:
print(' ----- model: ', mod, ' ---------------------')
print('PMPdriver: var loop start for model ', mod)
dict_mod = {mod: {}}
dict_metric[mod], dict_dive[mod] = dict(), dict()
model_path_list = glob.glob(
modpath(mip=mip, exp=exp, realm='atmos', model=mod, realization='*', variable='ts'))
model_path_list = sort_human(model_path_list)
if debug:
print('model_path_list:', model_path_list)
# Find where run can be gripped from given filename template for modpath
print('realization:', realization)
run_in_modpath = modpath(mip=mip, exp=exp, realm='atmos', model=mod, realization=realization,
variable='ts').split('/')[-1].split('.').index(realization)
print('run_in_modpath:', run_in_modpath)
# Collect all available runs
runs_list = [model_path.split('/')[-1].split('.')[run_in_modpath] for model_path in model_path_list]
# Adjust realization to be included
if realization in ["all" ,"*"]:
pass
elif realization in ["first"]:
runs_list = runs_list[:1]
else:
runs_list = [realization]
if debug:
print('runs_list:', runs_list)
# =================================================
# Loop for Realizations
# -------------------------------------------------
for run in runs_list:
print(' --- run: ', run, ' ---')
mod_run = '_'.join([mod, run])
dict_mod = {mod_run: {}}
if debug:
print('list_variables:', list_variables)
try:
for var in list_variables:
print(' --- var: ', var, ' ---')
# finding variable name in file
var_in_file = dict_var[var]['var_name']
print('var_in_file:', var_in_file)
if isinstance(var_in_file, list):
var0 = var_in_file[0]
else:
var0 = var_in_file
# finding variable type (atmos or ocean)
areacell_in_file, realm = find_realm(var0)
if realm == 'Amon':
realm2 = 'atmos'
elif realm == 'Omon':
realm2 = 'ocean'
else:
realm2 = realm
print('var, areacell_in_file, realm:', var, areacell_in_file, realm)
#
# finding file for 'mod', 'var'
#
file_name = get_file(modpath(mip=mip, realm=realm, exp=exp, model=mod, realization=run, variable=var0))
file_areacell = get_file(modpath_lf(mip=mip, realm=realm2, model=mod, variable=areacell_in_file))
if not os.path.isfile(file_areacell):
file_areacell = None
file_landmask = get_file(modpath_lf(mip=mip, realm=realm2, model=mod, variable=dict_var['landmask']['var_name']))
# -- TEMPORARY --
if mip == 'cmip6':
if mod in ['IPSL-CM6A-LR', 'CNRM-CM6-1']:
file_landmask = '/work/lee1043/ESGF/CMIP6/CMIP/'+mod+'/sftlf_fx_'+mod+'_historical_r1i1p1f1_gr.nc'
elif mod in ['GFDL-ESM4']:
file_landmask = modpath_lf(mip=mip, realm="atmos", model='GFDL-CM4', variable=dict_var['landmask']['var_name'])
if mip == 'cmip5':
if mod == "BNU-ESM":
# Incorrect latitude in original sftlf fixed
file_landmask = "/work/lee1043/ESGF/CMIP5/BNU-ESM/sftlf_fx_BNU-ESM_historical_r0i0p0.nc"
elif mod == "HadCM3":
# Inconsistent lat/lon between sftlf and other variables
file_landmask = None
# Inconsistent grid between areacella and tauu (probably staggering grid system)
file_areacell = None
# -- TEMPORARY END --
"""
try:
areacell_in_file = dict_var['areacell']['var_name']
except:
areacell_in_file = None
"""
try:
landmask_in_file = dict_var['landmask']['var_name']
except:
landmask_in_file = None
if isinstance(var_in_file, list):
list_areacell, list_files, list_landmask, list_name_area, list_name_land = \
list(), list(), list(), list(), list()
for var1 in var_in_file:
areacell_in_file, realm = find_realm(var1)
modpath_tmp = get_file(modpath(mip=mip, exp=exp, realm=realm, model=mod, realization=realization, variable=var1))
#modpath_lf_tmp = get_file(modpath_lf(mip=mip, realm=realm2, model=mod, variable=dict_var['landmask']['var_name']))
if not os.path.isfile(modpath_tmp):
modpath_tmp = None
#if not os.path.isfile(modpath_lf_tmp):
# modpath_lf_tmp = None
file_areacell_tmp = get_file(modpath_lf(mip=mip, realm=realm2, model=mod, variable=areacell_in_file))
print("file_areacell_tmp:", file_areacell_tmp)
if not os.path.isfile(file_areacell_tmp):
file_areacell_tmp = None
list_files.append(modpath_tmp)
list_areacell.append(file_areacell_tmp)
list_name_area.append(areacell_in_file)
#list_landmask.append(modpath_lf_tmp)
list_landmask.append(file_landmask)
list_name_land.append(landmask_in_file)
else:
if not os.path.isfile(file_name):
file_name = None
if file_landmask is not None:
if not os.path.isfile(file_landmask):
file_landmask = None
list_files = file_name
list_areacell = file_areacell
list_name_area = areacell_in_file
list_landmask = file_landmask
list_name_land = landmask_in_file
# Variable from ocean grid
if var in ['ssh']:
list_landmask = None
# Temporay control of areacello for models with zos on gr instead on gn
if mod in ['BCC-ESM1', 'CESM2', 'CESM2-FV2', 'CESM2-WACCM', 'CESM2-WACCM-FV2',
'GFDL-CM4', 'GFDL-ESM4', 'MRI-ESM2-0', # cmip6
#'BCC-CSM1-1', 'BCC-CSM1-1-M', 'EC-EARTH', 'GFDL-CM3', 'GISS-E2-R',
'BCC-CSM1-1', 'BCC-CSM1-1-M', 'GFDL-CM3', 'GISS-E2-R',
'MRI-CGCM3']: # cmip5
list_areacell = None
dict_mod[mod_run][var] = {
'path + filename': list_files, 'varname': var_in_file,
'path + filename_area': list_areacell, 'areaname': list_name_area,
'path + filename_landmask': list_landmask, 'landmaskname': list_name_land}
print('PMPdriver: var loop end')
# dictionary needed by EnsoMetrics.ComputeMetricsLib.ComputeCollection
dictDatasets = {'model': dict_mod, 'observations': dict_obs}
print('dictDatasets:')
print(json.dumps(dictDatasets, indent=4, sort_keys=True))
# regridding dictionary (only if you want to specify the regridding)
dict_regrid = {}
"""
# Usage of dict_regrid (select option as below):
dict_regrid = {
'regridding': {
'model_orand_obs': 2, 'regridder': 'cdms', 'regridTool': 'esmf', 'regridMethod': 'linear',
'newgrid_name': 'generic 1x1deg'},
}
"""
# Prepare netcdf file setup
json_name = json_name_template(mip=mip, exp=exp, metricsCollection=mc_name, case_id=case_id, model=mod, realization=run)
netcdf_name = netcdf_name_template(mip=mip, exp=exp, metricsCollection=mc_name, case_id=case_id, model=mod, realization=run)
netcdf = os.path.join(netcdf_path, netcdf_name)
if debug:
print('file_name:', file_name)
print('list_files:', list_files)
print('netcdf_name:', netcdf_name)
print('json_name:', json_name)
# Computes the metric collection
print("\n### Compute the metric collection ###\n")
cdms2.setAutoBounds('on')
dict_metric[mod][run], dict_dive[mod][run] = ComputeCollection(mc_name, dictDatasets, mod_run, netcdf=param.nc_out,
netcdf_name=netcdf, debug=debug)
if debug:
print('file_name:', file_name)
print('list_files:', list_files)
print('netcdf_name:', netcdf_name)
print('dict_metric:')
print(json.dumps(dict_metric, indent=4, sort_keys=True))
# OUTPUT METRICS TO JSON FILE (per simulation)
metrics_to_json(mc_name, dict_obs, dict_metric, dict_dive, egg_pth, outdir, json_name, mod=mod, run=run)
except Exception as e:
print('failed for ', mod, run)
print(e)
if not debug:
pass
print('PMPdriver: model loop end')
# =================================================
# OUTPUT METRICS TO JSON FILE (for all simulations)
# -------------------------------------------------
#json_name = json_name_template(mip=mip, exp=exp, metricsCollection=mc_name, model='all', realization='all')
#metrics_to_json(mc_name, dict_obs, dict_metric, dict_dive, egg_pth, outdir, json_name)
|
|
#!/usr/bin/env python3
import re
import struct
import sys
import numpy as np
import matplotlib.pyplot as plt
import EbbUtils as eu
import PlotUtils as pu
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
#-----------------------------------------------------------
def plotstate(Mesh, U, field, fname, clim1, clim2, color, plotExact, plotError):
V = Mesh['V']
E = Mesh['E']
BE = Mesh['BE']
#f = plt.figure(figsize=(12,6))
F = pu.getField(U, field)
if((plotExact == True) or (plotError == True)):
Exact = F
s = field.lower()
if (s == 'pressure'):
Exact = pu.p_a(V[:,0], V[:,1])
#F = ((pr - F)/pr)*100.
elif (s == 'density'):
Exact = pu.rho_a(V[:,0], V[:,1])
#F = ((rho_a - F)/rho_a)*100.
elif (s == 'xmomentum'):
Exact = pu.rho_a(V[:,0], V[:,1])*pu.u_a(V[:,0], V[:,1])
#F = ((ru_a - F)/ru_a)*100.
elif (s == 'ymomentum'):
Exact = pu.rho_a(V[:,0], V[:,1])*pu.v_a(V[:,0], V[:,1])
#F = ((rv_a - F)/rv_a)*100.
elif (s == 'energy'):
Exact = pu.E_a(V[:,0], V[:,1])
#F = ((E_a - F)/E_a)*100.
elif (s == 'renergy'):
Exact = pu.rE_a(V[:,0], V[:,1])
#F = ((rE_a - F)/rE_a)*100.
elif (s == 'xvelocity'):
Exact = pu.u_a(V[:,0], V[:,1])
#F = ((u_a - F)/u_a)*100.
elif (s == 'yvelocity'):
Exact = pu.v_a(V[:,0], V[:,1])
#F = ((v_a - F)/v_a)*100.
if(plotError == True):
F = np.abs(Exact - F)
#F = ((Exact - F)/Exact)*100.
if(plotExact == True):
F = Exact
clim1 = np.min(F)
clim2 = np.max(F)
if(F.shape[0] == V.shape[0]):
plt.tripcolor(V[:,0], V[:,1], F, triangles=E, shading='gouraud', edgecolors=color, vmin=clim1, vmax=clim2, linewidth=1)
#plt.tripcolor(V[:,0], V[:,1], F, triangles=E, shading='flat', edgecolors=color, vmin=clim1, vmax=clim2, linewidth=1)
else:
plt.tripcolor(V[:,0], V[:,1], triangles=E, facecolors=F, shading='flat', vmin=clim1, vmax=clim2, linewidth=1)
for i in range(len(BE)):
x = [V[BE[i,0],0], V[BE[i,1],0]]
y = [V[BE[i,0],1], V[BE[i,1],1]]
plt.plot(x, y, '-', linewidth=2, color='black')
dosave = (len(fname) != 0)
plt.axis('equal')
#plt.axis([-100, 100,-100, 100])
#plt.axis([-0.5, 1.5, -0.75, 1.5])
#plt.colorbar()
#plt.clim(0, 0.7)
#plt.clim(9, 12)
if(plotError == True):
plt.title(field+' error', fontsize=16)
elif(plotExact == True):
plt.title(field+' exact', fontsize=16)
else:
plt.title(field, fontsize=16)
#f.tight_layout()
#plt.show(block=(not dosave))
#if (dosave):
# plt.savefig(fname)
#plt.close(f)
#-----------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) < 4:
print('Not enough input arguments')
sys.exit()
state = sys.argv[1]
meshIndicies = [i for i, s in enumerate(sys.argv) if(eu.meshTypeSupported(s))]
slnIndicies = [i for i, s in enumerate(sys.argv) if 'sln' in s]
meshFiles = []
for idx in meshIndicies:
meshFiles.append(sys.argv[idx])
slnFiles = []
for idx in slnIndicies:
slnFiles.append(sys.argv[idx])
sort_nicely(meshFiles)
sort_nicely(slnFiles)
globalMin = float("inf")
globalMax = float("-inf")
combineSln = False
mesh = {}
if((len(slnFiles) != len(meshFiles)) and (len(meshFiles) == 1)):
combineSln = True
mesh = eu.importMesh(meshFiles[0])
elif(len(slnFiles) != len(meshFiles)):
raise Exception("different number of solution files and mesh files! aborting")
slns = []
combinedSln = {}
U = []
edgeVals = []
edges = []
M = 0
uAlloced = False
for slnFile in slnFiles:
sln = eu.importEbbSolution(slnFile)
if((sln['version'] == 1) and combineSln):
raise Exception("Cannot recombine solution files with file version 1")
if(not combineSln):
F = pu.getField(sln['U'], state)
minVal = np.min(F)
globalMin = min(globalMin, minVal)
maxVal = np.max(F)
globalMax = max(globalMax, maxVal)
slns.append(sln)
else:
raise Exception("Recombing solutions not yet supported")
print("globalMin = "+str(globalMin))
print("globalMax = "+str(globalMax))
globalMin = globalMin - 0.001
globalMax = globalMax + 0.001
colors = ['k', 'b', 'g', 'c', 'y', 'm', 'r']
f = plt.figure(figsize=(12,6))
for idx in range(len(meshFiles)):
if(not combineSln):
mesh = eu.importMesh(meshFiles[idx])
plotstate(mesh, slns[idx]['U'], state, "", globalMin, globalMax, colors[idx%len(colors)], False, False)
plt.hold(True)
plt.colorbar()
plt.show(block=True)
#plt.close(f)
'''
f = plt.figure(figsize=(12,6))
for idx in range(len(meshFiles)):
if(not combineSln):
mesh = eu.importMesh(meshFiles[idx])
plotstate(mesh, slns[idx]['U'], state, "", globalMin, globalMax, colors[idx%len(colors)], True, False)
plt.hold(True)
plt.colorbar()
plt.show(block=False)
#plt.close(f)
f = plt.figure(figsize=(12,6))
for idx in range(len(meshFiles)):
if(not combineSln):
mesh = eu.importMesh(meshFiles[idx])
plotstate(mesh, slns[idx]['U'], state, "", globalMin, globalMax, colors[idx%len(colors)], False, True)
plt.hold(True)
plt.colorbar()
plt.show(block=True)
'''
plt.close(f)
|
|
#!/usr/bin/env python
import csv
import datetime
import glob
import json
import logging
import os
import re
import requests
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import sys
sys.path.insert(0, '../Py-CatDV')
from pycatdv import Catdvlib
__author__ = "Edson Cudjoe"
__copyright__ = "Copyright 2015, Intervideo"
__credits__ = ["Edson Cudjoe"]
__license__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "Edson Cudjoe"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "4 February 2015"
logging.basicConfig(format='%(process)d-%(levelname)s %(asctime)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
filename='logs/ltohistory.log',
filemode='a')
class LTOHistory(Catdvlib):
def __init__(self, server, api, lto_system_ip):
super(LTOHistory, self).__init__(server, api)
self.lto_ip = lto_system_ip
self.HOME = os.getcwd()
def __str__(self):
return super(LTOHistory, self).__str__()
def byte2tb(self, byte):
"""Converts byte data from the LTO file to terabytes"""
try:
f = float(byte)
tb = ((((f / 1024) / 1024) / 1024) / 1024)
return tb
except ValueError:
print("Value could not be converted to float. {}".format(str(byte)))
def set_lto_date_range(self):
"""
Determines the date range as strings to be entered to the Space
LTO History Manager
"""
today = datetime.date.today()
first = today.replace(day=1)
last_mth = first - datetime.timedelta(days=1)
beginning = last_mth.replace(day=1)
end = last_mth.strftime("%Y/%m/%d")
start = beginning.strftime("%Y/%m/%d")
return start, end
def open_chrome_browser(self):
chrome_profile = webdriver.ChromeOptions()
pref = {'download.default_directory': self.HOME}
chrome_profile.add_experimental_option('prefs', pref)
chrome_driver = '/usr/local/bin/chromedriver'
try:
gc = webdriver.Chrome(chrome_driver,
chrome_options=chrome_profile)
time.sleep(5)
gc.get("http://{}/login/?p=/lto/catalogue/".format(self.lto_ip))
return gc
except Exception as gc_err:
raise gc_err
def open_firefox_browser(self):
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', os.getcwd())
profile.set_preference('browser.helperApps.neverAsk.saveToDisk',
'application/json,text/javascript,text/json,'
'text/x-json')
try:
ffx = webdriver.Firefox(firefox_profile=profile)
time.sleep(5)
ffx.get("http://{}/login/?p=/lto/catalogue/".format(self.lto_ip))
return ffx
except Exception as ffx_err:
raise ffx_err
def open_browser(self):
try:
self.browser = self.open_chrome_browser()
except Exception as e:
print('ERROR: {}'.format(e))
return self.browser
def browser_login(self, usr, pwd):
"""Login to the Space LTO web interface"""
username = self.browser.find_element_by_name("txt_username")
password = self.browser.find_element_by_name("txt_password")
username.send_keys(usr)
password.send_keys(pwd)
btn = self.browser.find_element_by_tag_name("button")
btn.click()
time.sleep(1)
def download_lto_history_file(self, username, password):
"""
Automates getting the tape history file from the Space LTO
web interface
"""
try:
window = self.open_browser()
self.browser_login(usr=username, pwd=password)
# Export to file tab
tabs = window.find_elements_by_class_name("switcher-button")
tabs[3].click()
exp_format = window.find_element_by_id("sel_exporthist_format")
for f in exp_format.find_elements_by_tag_name("option"):
if f.text == "JSON":
f.click()
break
set_from = window.find_element_by_id("txt_exporthist_from")
set_to = window.find_element_by_id("txt_exporthist_to")
dates = self.set_lto_date_range()
# print(dates)
# raw_input('\ncontinue?:')
if window.name == 'chrome':
set_from.clear()
set_from.send_keys(dates[0])
time.sleep(5)
set_to.clear()
# raw_input('entering date {}'.format(dates[1]))
time.sleep(2)
set_to.click()
# Current bug chromedriver number 5058 on selenium GH issues
# chromedriver is unable to press '3' key
set_to.send_keys(dates[1])
# raw_input('correct?')
else:
set_from.send_keys(Keys.COMMAND + "a")
set_from.send_keys(Keys.DELETE)
set_from.send_keys(dates[0])
time.sleep(1)
set_to.send_keys(Keys.COMMAND + "a")
set_to.send_keys(Keys.DELETE)
set_to.send_keys(dates[1])
time.sleep(1)
# click on blank area to close calender
border_click = window.find_element_by_id("browse")
border_click.click()
time.sleep(1)
# dl = raw_input('continue to download')
# download file
export = window.find_element_by_id("btn_exporthist_save")
export.click()
time.sleep(10)
window.quit()
except IndexError:
raise IndexError
def get_lto_info(self):
"""Collects LTO information into a list."""
name_size = None
get_lto = True
while get_lto:
fname = open(os.path.abspath('history.json'), 'r')
assert fname.name.endswith('.json')
if '.json' in fname.name:
jdata = self.get_json(fname)
current = self.json_to_list(jdata)
name_size = self.json_final(current)
get_lto = False
elif '.csv' in fname.name:
lto_file = open(fname)
data = csv.reader(lto_file)
name_size = lto_to_list(data)
get_lto = False
else:
print('\nNo file submitted.')
get_lto = False
if name_size:
return name_size
def get_json(self, submitted):
"""
Reads submitted JSON file and returns a dictionary
"""
# lto = open(submitted_lto_file, 'r')
jfile = json.load(submitted)
return jfile
def json_to_list(self, json_data_from_file):
"""Reads data from JSON dictionary. Returns data into a list"""
json_collect = []
for i in json_data_from_file['tapes']:
json_collect.append((i['name'], i['used_size']))
return json_collect
def json_final(self, current_json_list):
"""
Converts given filesize into TB. returns list of tuples containing
IV barcode numbers plus file size.
"""
final = []
for c in current_json_list:
try:
tb = self.byte2tb(c[1]) # converts GB byte data to TB
a = re.search(r'(IV\d\d\d\d)', c[0])
final.append((str(a.group()), round(tb, 2)))
except AttributeError:
pass
return final
def catdv_login(self, user_instance):
"""Enter CatDV server login details to get access to the API"""
try:
user_instance.get_auth()
user_instance.get_session_key()
user_instance.get_catalog_name()
time.sleep(1)
except Exception as e:
logging.error('Incorrect login')
raise e
def client_name_id(self, user):
"""
Puts client names and id numbers from tuple into a dictionary
"""
clients = {}
try:
for name in user.catalog_names:
clients[name[0]] = name[1]
return clients
except Exception as e:
print(e)
def calculate_written_data(self, lto_data, names_dict, server, api_vers,
key):
"""
Searches the CatDV API based on the IV barcode number.
Collects the group name details from the results.
Calculates how TB has been written based on the amount detailed on
the Space LTO results.
"""
try:
cat_grp_names = {i: [0, 0] for i in names_dict.keys()}
#print('Querying the CatDV Server. Please wait...')
#create api request for 'IV0XXX' barcode
for i in lto_data:
raw_data = requests.get('http://{}/api/{}/clips;'
'jsessionid={}'
'?filter=and((clip.userFields[U7])'
'has({}))&include='
'userFields'.format(server,
api_vers, key,
i[0]))
assert raw_data.status_code == 200
res = json.loads(raw_data.text)
grp_nm = res['data']['items'][0]['groupName']
cat_grp_names[grp_nm][0] += 1
cat_grp_names[grp_nm][1] += i[1]
time.sleep(1)
for ca in cat_grp_names.items():
print('{}TB written over {} tapes for {}'.format(ca[1][1], ca[1][0],
ca[0]))
except Exception as e:
print(e)
return cat_grp_names
def total_sizes(self, client_dict, name_size):
"""Returns total amount archived for each client/catalog group"""
assert client_dict
assert len(name_size) > 0
print(client_dict)
print(name_size)
try:
for item in client_dict.items():
barcodes = self.get_barcodes(item[1])
print('Barcodes: {}'.format(barcodes))
two = set(self.et_client_items(name_size, barcodes))
print('Two: {}'.format(two))
terabytes = self.get_storage_size(two)
print('T: {}'.format(terabytes))
print('\n{0}TB written for {1}\n'.format(terabytes, item[0]))
except Exception as e:
print(e)
def get_barcodes(self, catalog_id, user):
"""Gets a list of IV barcodes for user-specified client."""
user.iv_barcodes = []
user.get_catalog_clips(catalog_id)
user.collect_iv_numbers()
return user.sort_barcodes()
def get_client_items(self, name_size, clientlist):
"""Separates main list for each client"""
try:
client_mnth = []
for p in sorted(clientlist):
for i in sorted(name_size):
if i[0] in p:
client_mnth.append(i)
print('get_clientitems.client_mnth: {}'.format(client_mnth))
return client_mnth
except:
raise TypeError
def get_storage_size(self, client_items):
"""Sum of disc size for each tape"""
count = 0
for i in client_items:
count += i[1]
return count
def show_catalog_names(self, user):
try:
print('\nCurrent catalogs available: ')
for name in user.catalog_names:
print(name[0])
except Exception as e:
print(e)
def get_catdv_data(textfile):
"""
Opens text file from CatDV output containing Intervideo barcodes.
these barcodes are added to a list.
"""
catdv_list = []
with open(textfile) as client_barcodes:
reader = csv.reader(client_barcodes)
for row in reader:
try:
catdv_list.append(row[0])
except:
pass
return catdv_list
def make_csv_file(final):
"""
Creates a CSV file to be used with spreadsheets from the intervideo
LTO tape barcode and size data.
"""
fname = raw_input('Enter name of csv file to save into: ')
name_ext = fname + ".csv"
with open(name_ext, 'wb') as csvfile:
writedata = csv.writer(csvfile, delimiter=',')
for i in range(len(final)):
writedata.writerow(final[i])
print('File has been created.')
def lto_to_list(data):
"""
Takes the output of CSV reader as input. Converts this data into a
list to be compared with the individual client barcode lists
generated from CatDV data.
"""
collect = []
final = []
for item in data:
try:
collect.append((item[0], item[6]))
except Exception:
print('Unable to add data: {}'.format(item))
continue
for c in collect:
if 'Name' in c[0]:
final.append(c)
else:
if 'test' in c[0]:
continue
# 1 file has been labelled incorrectly.
# It will be temporarily skipped until the tape has been
# fixed.
elif 'Intervideo' in c[0]:
continue
else:
gb = byte2tb(c[1])
a = re.search(r'(IV\d\d\d\d)', c[0])
final.append((str(a.group()), round(gb, 2)))
return final
def print_manual(collected):
for name, size in collected.items():
print('Archived: {}TB for {}'.format(size, name))
def main():
print("Getting LTO History file")
try:
a = LTOHistory('192.168.0.101:8080', '4', '192.168.16.99')
a.download_lto_history_file('admin', 'space')
hist_data = a.get_lto_info()
print(hist_data)
start = True
while start:
auth = raw_input('Login to CatDV Api? [y/n]: ').lower()
if auth == 'y':
a.catdv_login(a)
client_name_and_gid = a.client_name_id(a)
a.calculate_written_data(hist_data,
client_name_and_gid,
a.server,
a.api,
a.key)
start = False
else:
print('Unable to provide other options')
break
except TypeError:
print('Your CatDV username or password is incorrect')
logging.exception("Exception:")
except Exception as e:
logging.exception("Exception ocurred")
finally:
for file in glob.glob(r'{}/*.json'.format(os.getcwd())):
os.remove(file)
a.delete_session()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2021 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import time
from decimal import Decimal
from test_framework.blocktools import get_masternode_payment, create_coinbase, create_block
from test_framework.mininode import *
from test_framework.test_framework import DashTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, get_bip9_status
'''
feature_llmq_is_cl_conflicts.py
Checks conflict handling between ChainLocks and InstantSend
'''
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.clsigs = {}
self.islocks = {}
def send_clsig(self, clsig):
hash = uint256_from_str(hash256(clsig.serialize()))
self.clsigs[hash] = clsig
inv = msg_inv([CInv(29, hash)])
self.send_message(inv)
def send_islock(self, islock):
hash = uint256_from_str(hash256(islock.serialize()))
self.islocks[hash] = islock
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def on_getdata(self, message):
for inv in message.inv:
if inv.hash in self.clsigs:
self.send_message(self.clsigs[inv.hash])
if inv.hash in self.islocks:
self.send_message(self.islocks[inv.hash])
class LLMQ_IS_CL_Conflicts(DashTestFramework):
def set_test_params(self):
self.set_dash_test_params(4, 3, fast_dip3_enforcement=True)
#disable_mocktime()
def run_test(self):
self.activate_dip8()
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
# mine single block, wait for chainlock
self.nodes[0].generate(1)
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash())
self.test_chainlock_overrides_islock(False)
self.test_chainlock_overrides_islock(True, False)
self.test_chainlock_overrides_islock(True, True)
self.test_chainlock_overrides_islock_overrides_nonchainlock()
def test_chainlock_overrides_islock(self, test_block_conflict, mine_confllicting=False):
if not test_block_conflict:
assert not mine_confllicting
# create three raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_obj = FromHex(CTransaction(), rawtx1)
rawtx2_obj = FromHex(CTransaction(), rawtx2)
rawtx1_txid = self.nodes[0].sendrawtransaction(rawtx1)
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
# Create a chained TX on top of tx1
inputs = []
n = 0
for out in rawtx1_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx1_txid, "vout": n})
n += 1
rawtx4 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx4 = self.nodes[0].signrawtransactionwithwallet(rawtx4)['hex']
rawtx4_txid = self.nodes[0].sendrawtransaction(rawtx4)
# wait for transactions to propagate
self.sync_mempools()
for node in self.nodes:
self.wait_for_instantlock(rawtx1_txid, node)
self.wait_for_instantlock(rawtx4_txid, node)
block = self.create_block(self.nodes[0], [rawtx2_obj])
if test_block_conflict:
# The block shouldn't be accepted/connected but it should be known to node 0 now
submit_result = self.nodes[0].submitblock(ToHex(block))
assert(submit_result == "conflict-tx-lock")
cl = self.create_chainlock(self.nodes[0].getblockcount() + 1, block)
if mine_confllicting:
islock_tip = self.nodes[0].generate(1)[-1]
self.test_node.send_clsig(cl)
for node in self.nodes:
self.wait_for_best_chainlock(node, block.hash)
self.sync_blocks()
if mine_confllicting:
# The tip with IS-locked txes should be marked conflicting now
found1 = False
found2 = False
for tip in self.nodes[0].getchaintips(2):
if tip["hash"] == islock_tip:
assert tip["status"] == "conflicting"
found1 = True
elif tip["hash"] == block.hash:
assert tip["status"] == "active"
found2 = True
assert found1 and found2
# At this point all nodes should be in sync and have the same "best chainlock"
submit_result = self.nodes[1].submitblock(ToHex(block))
if test_block_conflict:
# Node 1 should receive the block from node 0 and should not accept it again via submitblock
assert(submit_result == "duplicate")
else:
# The block should get accepted now, and at the same time prune the conflicting ISLOCKs
assert(submit_result is None)
for node in self.nodes:
self.wait_for_chainlocked_block(node, block.hash)
# Create a chained TX on top of tx2
inputs = []
n = 0
for out in rawtx2_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx2_txid, "vout": n})
n += 1
rawtx5 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx5 = self.nodes[0].signrawtransactionwithwallet(rawtx5)['hex']
rawtx5_txid = self.nodes[0].sendrawtransaction(rawtx5)
# wait for the transaction to propagate
self.sync_mempools()
for node in self.nodes:
self.wait_for_instantlock(rawtx5_txid, node)
if mine_confllicting:
# Lets verify that the ISLOCKs got pruned and conflicting txes were mined but never confirmed
for node in self.nodes:
rawtx = node.getrawtransaction(rawtx1_txid, True)
assert not rawtx['chainlock']
assert not rawtx['instantlock']
assert not rawtx['instantlock_internal']
assert_equal(rawtx['confirmations'], 0)
assert_equal(rawtx['height'], -1)
rawtx = node.getrawtransaction(rawtx4_txid, True)
assert not rawtx['chainlock']
assert not rawtx['instantlock']
assert not rawtx['instantlock_internal']
assert_equal(rawtx['confirmations'], 0)
assert_equal(rawtx['height'], -1)
rawtx = node.getrawtransaction(rawtx2_txid, True)
assert rawtx['chainlock']
assert rawtx['instantlock']
assert not rawtx['instantlock_internal']
else:
# Lets verify that the ISLOCKs got pruned
for node in self.nodes:
assert_raises_rpc_error(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx1_txid, True)
assert_raises_rpc_error(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx4_txid, True)
rawtx = node.getrawtransaction(rawtx2_txid, True)
assert rawtx['chainlock']
assert rawtx['instantlock']
assert not rawtx['instantlock_internal']
def test_chainlock_overrides_islock_overrides_nonchainlock(self):
# create two raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_txid = encode(hash256(hex_str_to_bytes(rawtx1))[::-1], 'hex_codec').decode('ascii')
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
# Create an ISLOCK but don't broadcast it yet
islock = self.create_islock(rawtx2)
# Disable ChainLocks to avoid accidential locking
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 4070908800)
# Send tx1, which will later conflict with the ISLOCK
self.nodes[0].sendrawtransaction(rawtx1)
# fast forward 11 minutes, so that the TX is considered safe and included in the next block
self.bump_mocktime(int(60 * 11))
# Mine the conflicting TX into a block
good_tip = self.nodes[0].getbestblockhash()
self.nodes[0].generate(2)
self.sync_all()
# Assert that the conflicting tx got mined and the locked TX is not valid
assert(self.nodes[0].getrawtransaction(rawtx1_txid, True)['confirmations'] > 0)
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[0].sendrawtransaction, rawtx2)
# Create the block and the corresponding clsig but do not relay clsig yet
cl_block = self.create_block(self.nodes[0])
cl = self.create_chainlock(self.nodes[0].getblockcount() + 1, cl_block)
self.nodes[0].submitblock(ToHex(cl_block))
self.sync_all()
assert self.nodes[0].getbestblockhash() == cl_block.hash
# Send the ISLOCK, which should result in the last 2 blocks to be invalidated, even though the nodes don't know
# the locked transaction yet
self.test_node.send_islock(islock)
time.sleep(5)
assert(self.nodes[0].getbestblockhash() == good_tip)
assert(self.nodes[1].getbestblockhash() == good_tip)
# Send the actual transaction and mine it
self.nodes[0].sendrawtransaction(rawtx2)
self.nodes[0].generate(1)
self.sync_all()
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[0].getbestblockhash() != good_tip)
assert(self.nodes[1].getbestblockhash() != good_tip)
# Check that the CL-ed block overrides the one with islocks
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0) # Re-enable ChainLocks to accept clsig
self.test_node.send_clsig(cl) # relay clsig ASAP to prevent nodes from locking islock-ed tip
self.wait_for_sporks_same()
for node in self.nodes:
self.wait_for_chainlocked_block(node, cl_block.hash)
def create_block(self, node, vtx=[]):
bt = node.getblocktemplate()
height = bt['height']
tip_hash = bt['previousblockhash']
coinbasevalue = bt['coinbasevalue']
miner_address = node.getnewaddress()
mn_payee = bt['masternode'][0]['payee']
# calculate fees that the block template included (we'll have to remove it from the coinbase as we won't
# include the template's transactions
bt_fees = 0
for tx in bt['transactions']:
bt_fees += tx['fee']
new_fees = 0
for tx in vtx:
in_value = 0
out_value = 0
for txin in tx.vin:
txout = node.gettxout(uint256_to_string(txin.prevout.hash), txin.prevout.n, False)
in_value += int(txout['value'] * COIN)
for txout in tx.vout:
out_value += txout.nValue
new_fees += in_value - out_value
# fix fees
coinbasevalue -= bt_fees
coinbasevalue += new_fees
realloc_info = get_bip9_status(self.nodes[0], 'realloc')
realloc_height = 99999999
if realloc_info['status'] == 'active':
realloc_height = realloc_info['since']
mn_amount = get_masternode_payment(height, coinbasevalue, realloc_height)
miner_amount = coinbasevalue - mn_amount
outputs = {miner_address: str(Decimal(miner_amount) / COIN)}
if mn_amount > 0:
outputs[mn_payee] = str(Decimal(mn_amount) / COIN)
coinbase = FromHex(CTransaction(), node.createrawtransaction([], outputs))
coinbase.vin = create_coinbase(height).vin
# We can't really use this one as it would result in invalid merkle roots for masternode lists
if len(bt['coinbase_payload']) != 0:
cbtx = FromHex(CCbTx(version=1), bt['coinbase_payload'])
coinbase.nVersion = 3
coinbase.nType = 5 # CbTx
coinbase.vExtraPayload = cbtx.serialize()
coinbase.calc_sha256()
block = create_block(int(tip_hash, 16), coinbase, nTime=bt['curtime'])
block.vtx += vtx
# Add quorum commitments from template
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
def create_chainlock(self, height, block):
request_id_buf = ser_string(b"clsig") + struct.pack("<I", height)
request_id = hash256(request_id_buf)[::-1].hex()
message_hash = block.hash
quorum_member = None
for mn in self.mninfo:
res = mn.node.quorum('sign', 100, request_id, message_hash)
if res and quorum_member is None:
quorum_member = mn
recSig = self.get_recovered_sig(request_id, message_hash, node=quorum_member.node)
clsig = msg_clsig(height, block.sha256, hex_str_to_bytes(recSig['sig']))
return clsig
if __name__ == '__main__':
LLMQ_IS_CL_Conflicts().main()
|
|
# Copyright (c) 2016 QNAP Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import mock
from oslo_config import cfg
from oslo_utils import units
import six
from six.moves import urllib
from cinder import test
from cinder.volume.drivers import qnap
CONF = cfg.CONF
FAKE_LUNNAA = {'LUNNAA': 'fakeLunNaa'}
FAKE_SNAPSHOT = {'snapshot_id': 'fakeSnapshotId'}
FAKE_PASSWORD = 'qnapadmin'
FAKE_PARMS = {}
FAKE_PARMS['pwd'] = base64.b64encode(FAKE_PASSWORD.encode("utf-8"))
FAKE_PARMS['serviceKey'] = 1
FAKE_PARMS['user'] = 'admin'
sanitized_params = {}
for key in FAKE_PARMS:
value = FAKE_PARMS[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
global_sanitized_params = urllib.parse.urlencode(sanitized_params)
header = {
'charset': 'utf-8', 'Content-Type': 'application/x-www-form-urlencoded'}
login_url = ('/cgi-bin/authLogin.cgi?')
get_basic_info_url = ('/cgi-bin/authLogin.cgi')
FAKE_RES_DETAIL_DATA_LOGIN = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<authSid><![CDATA[fakeSid]]></authSid>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS = """
<QDocRoot version="1.0">
<model>
<displayModelName><![CDATA[TS-870U-RP]]></displayModelName>
<internalModelName><![CDATA[TS-879]]></internalModelName>
</model>
<firmware>
<version><![CDATA[4.2.1]]></version>
</firmware>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_GETBASIC_INFO = """
<QDocRoot version="1.0">
<model>
<displayModelName><![CDATA[ES1640dc]]></displayModelName>
<internalModelName><![CDATA[ES1640dc]]></internalModelName>
</model>
<firmware>
<version><![CDATA[1.1.3]]></version>
</firmware>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES = """
<QDocRoot version="1.0">
<model>
<displayModelName><![CDATA[TES-1885U]]></displayModelName>
<internalModelName><![CDATA[ES-X85U]]></internalModelName>
</model>
<firmware>
<version><![CDATA[1.1.3]]></version>
</firmware>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_LUN_INFO = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<iSCSILUNList>
<LUNInfo>
<LUNNAA><![CDATA[fakeLunNaa]]></LUNNAA>
<LUNName><![CDATA[fakeLunName]]></LUNName>
<LUNIndex><![CDATA[fakeLunIndex]]></LUNIndex>
<LUNThinAllocate><![CDATA[fakeLunThinAllocate]]></LUNThinAllocate>
<LUNPath><![CDATA[fakeLunPath]]></LUNPath>
<LUNTargetList>
<row>
<targetIndex><![CDATA[9]]></targetIndex>
<LUNNumber><![CDATA[1]]></LUNNumber>
<LUNEnable><![CDATA[1]]></LUNEnable>
</row>
</LUNTargetList>
<LUNStatus>1</LUNStatus>
</LUNInfo>
</iSCSILUNList>
<result><![CDATA[0]]></result>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_MAPPED_LUN_INFO = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<iSCSILUNList>
<LUNInfo>
<LUNNAA><![CDATA[fakeLunNaa]]></LUNNAA>
<LUNName><![CDATA[fakeLunName]]></LUNName>
<LUNIndex><![CDATA[fakeLunIndex]]></LUNIndex>
<LUNThinAllocate><![CDATA[fakeLunThinAllocate]]></LUNThinAllocate>
<LUNPath><![CDATA[fakeLunPath]]></LUNPath>
<LUNTargetList>
<row>
<targetIndex><![CDATA[9]]></targetIndex>
<LUNNumber><![CDATA[1]]></LUNNumber>
<LUNEnable><![CDATA[1]]></LUNEnable>
</row>
</LUNTargetList>
<LUNStatus>2</LUNStatus>
</LUNInfo>
</iSCSILUNList>
<result><![CDATA[0]]></result>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_SNAPSHOT = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<SnapshotList>
<row>
<snapshot_id><![CDATA[fakeSnapshotId]]></snapshot_id>
</row>
</SnapshotList>
<result><![CDATA[0]]></result>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<Pool_Index>
<row>
<poolIndex><![CDATA[fakePoolIndex]]></poolIndex>
<poolID><![CDATA[fakePoolId]]></poolID>
<pool_status><![CDATA[0]]></pool_status>
<capacity_bytes><![CDATA[930213412209]]></capacity_bytes>
<allocated_bytes><![CDATA[1480470528]]></allocated_bytes>
<freesize_bytes><![CDATA[928732941681]]></freesize_bytes>
<lun_meta_reserve_ratio><![CDATA[0.0315]]></lun_meta_reserve_ratio>
<pool_capacity><![CDATA[866 GB]]></pool_capacity>
<pool_allocated><![CDATA[1.38 GB]]></pool_allocated>
<pool_freesize><![CDATA[865 GB]]></pool_freesize>
<pool_threshold><![CDATA[80 %]]></pool_threshold>
<pool_used><![CDATA[0 %]]></pool_used>
<pool_available><![CDATA[100 %]]></pool_available>
<pool_owner><![CDATA[SCA]]></pool_owner>
<pool_type><![CDATA[mirror]]></pool_type>
<pool_dedup><![CDATA[1.00]]></pool_dedup>
<pool_bound><![CDATA[0]]></pool_bound>
<pool_progress><![CDATA[0]]></pool_progress>
<pool_scrub><![CDATA[0]]></pool_scrub>
</row>
</Pool_Index>
<result><![CDATA[0]]></result>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<iSCSIPortal>
<servicePort><![CDATA[fakeServicePort]]></servicePort>
<targetIQNPrefix><![CDATA[fakeTargetIqnPrefix]]></targetIQNPrefix>
<targetIQNPostfix><![CDATA[fakeTargetIqnPostfix]]></targetIQNPostfix>
</iSCSIPortal>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_ETHERNET_IP = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<func>
<ownContent>
<IPConfig>
<ethdev><![CDATA[fakeEthdev]]></ethdev>
<ethSlotid><![CDATA[0]]></ethSlotid>
<IPType><![CDATA[static]]></IPType>
<IP><![CDATA[fakeIp]]></IP>
</IPConfig>
</ownContent>
</func>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_CREATE_LUN = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<result><![CDATA[fakeLunIndex]]></result>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_CREATE_TARGET = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<result><![CDATA[fakeTargetIndex]]></result>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_GETHOSTIDLISTBYINITIQN = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<content>
<host_list total="4">
<host>
<index><![CDATA[fakeIndex]]></index>
<hostid><![CDATA[fakeHostId]]></hostid>
<name><![CDATA[fakeHostName]]></name>
<iqns>
<iqn><![CDATA[fakeIqn]]></iqn>
</iqns>
</host>
</host_list>
</content>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<targetACL>
<row>
<targetIndex><![CDATA[fakeTargeIndex]]></targetIndex>
<targetIQN><![CDATA[fakeTargetIqn]]></targetIQN>
<targetInitListCnt><![CDATA[0]]></targetInitListCnt>
<targetInitInfo>
<initiatorIndex><![CDATA[2]]></initiatorIndex>
<initiatorAlias><![CDATA[fakeInitiatorAlias]]></initiatorAlias>
<initiatorIQN><![CDATA[fakeInitiatorIqn]]></initiatorIQN>
<bCHAPEnable><![CDATA[0]]></bCHAPEnable>
<bMutualCHAPEnable><![CDATA[0]]></bMutualCHAPEnable>
</targetInitInfo>
</row>
</targetACL>
<iSCSITargetList>
<targetInfo>
<targetIndex><![CDATA[fakeTargeIndex]]></targetIndex>
<targetName><![CDATA[fakeTargetName]]></targetName>
<targetIQN active="1">fakeTargetIqn</targetIQN>
<targetStatus><![CDATA[1]]></targetStatus>
</targetInfo>
</iSCSITargetList>
<result><![CDATA[0]]></result>
</QDocRoot>"""
FAKE_RES_DETAIL_DATA_TARGET_INFO = """
<QDocRoot version="1.0">
<authPassed><![CDATA[1]]></authPassed>
<targetInfo>
<row>
<targetIndex><![CDATA[fakeTargeIndex]]></targetIndex>
<targetName><![CDATA[fakeTargetName]]></targetName>
<targetIQN active="1">fakeTargetIqn</targetIQN>
<targetStatus><![CDATA[1]]></targetStatus>
</row>
</targetInfo>
<result><![CDATA[0]]></result>
</QDocRoot>"""
FAKE_RES_DETAIL_GET_ALL_ISCSI_PORTAL_SETTING = {
'data': FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING,
'error': None,
'http_status': 'fackStatus'
}
FAKE_RES_DETAIL_ISCSI_PORTAL_INFO = {
'data': FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO,
'error': None,
'http_status': 'fackStatus'
}
def create_configuration(
username,
password,
management_url,
san_iscsi_ip,
poolname,
thin_provision=True):
"""Create configuration."""
configuration = mock.Mock()
configuration.san_login = username
configuration.san_password = password
configuration.qnap_management_url = management_url
configuration.san_thin_provision = thin_provision
configuration.san_iscsi_ip = san_iscsi_ip
configuration.qnap_poolname = poolname
configuration.safe_get.return_value = 'QNAP'
configuration.iscsi_ip_address = '1.2.3.4'
configuration.qnap_storage_protocol = 'iscsi'
configuration.reserved_percentage = 0
return configuration
class QnapDriverBaseTestCase(test.TestCase):
"""Base Class for the QnapDriver Tests."""
def setUp(self):
"""Setup the Qnap Driver Base TestCase."""
super(QnapDriverBaseTestCase, self).setUp()
self.driver = None
self.mock_HTTPConnection = None
@staticmethod
def driver_mock_decorator(configuration):
"""Driver mock decorator."""
def driver_mock_wrapper(func):
def inner_driver_mock(
self,
mock_http_connection,
*args,
**kwargs):
"""Inner driver mock."""
self.mock_HTTPConnection = mock_http_connection
self.driver = qnap.QnapISCSIDriver(configuration=configuration)
self.driver.do_setup('context')
func(self, *args, **kwargs)
return inner_driver_mock
return driver_mock_wrapper
def tearDown(self):
"""Tear down."""
super(QnapDriverBaseTestCase, self).tearDown()
class SnapshotClass(object):
"""Snapshot Class."""
volume = {}
name = ''
volume_name = ''
volume_size = 0
metadata = {'snapshot_id': 'fakeSnapshotId'}
def __init__(self, volume, volume_size):
"""Init."""
self.volume = volume
self.volume_size = volume_size
def __getitem__(self, arg):
"""Getitem."""
return {
'display_name': 'fakeSnapshotDisplayName',
'id': 'fakeSnapshotId',
'volume_size': self.volume_size,
'metadata': self.metadata
}[arg]
class VolumeClass(object):
"""Volume Class."""
display_name = ''
id = ''
size = 0
name = ''
volume_metadata = {}
def __init__(self, display_name, id, size, name):
"""Init."""
self.display_name = display_name
self.id = id
self.size = size
self.name = name
self.volume_metadata = {'LUNNAA': 'fakeLunNaa'}
def __getitem__(self, arg):
"""Getitem."""
return {
'display_name': self.display_name,
'size': self.size,
'id': self.id,
'name': self.name,
'provider_location': None,
'volume_metadata': self.volume_metadata,
'metadata': self.volume_metadata
}[arg]
def __setitem__(self, key, value):
"""Setitem."""
if key == 'display_name':
self.display_name = value
class HostClass(object):
"""Host Class."""
def __init__(self, host):
"""Init."""
self.host = host
def __getitem__(self, arg):
"""Getitem."""
return {
'host': 'fakeHost',
}[arg]
class FakeLoginResponse(object):
"""Fake login response."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_LOGIN
class FakeGetBasicInfoResponse(object):
"""Fake GetBasicInfo response."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_GETBASIC_INFO
class FakeGetBasicInfoTsResponse(object):
"""Fake GetBasicInfoTs response."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS
class FakeGetBasicInfoTesResponse(object):
"""Fake GetBasicInfoTs response."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES
class FakeLunInfoResponse(object):
"""Fake lun info response."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_LUN_INFO
class FakePoolInfoResponse(object):
"""Fake pool info response."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO
class FakeCreateLunResponse(object):
"""Fake create lun response."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_CREATE_LUN
class FakeCreatTargetResponse(object):
"""Fake create target response."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_CREATE_TARGET
class FakeGetIscsiPortalInfoResponse(object):
"""Fake get iscsi portal inforesponse."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO
def __repr__(self):
"""Repr."""
return six.StringIO(FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO)
class FakeCreateSnapshotResponse(object):
"""Fake Create snapshot inforesponse."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_SNAPSHOT
class FakeGetAllIscsiPortalSetting(object):
"""Fake get all iSCSI portal setting."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING
class FakeGetAllEthernetIp(object):
"""Fake get all ethernet ip setting."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_ETHERNET_IP
class FakeTargetInfo(object):
"""Fake target info setting."""
status = 'fackStatus'
def read(self):
"""Mock response.read."""
return FAKE_RES_DETAIL_DATA_TARGET_INFO
class QnapDriverLoginTestCase(QnapDriverBaseTestCase):
"""Tests do_setup api."""
@mock.patch('six.moves.http_client.HTTPConnection')
def test_do_setup_positive(
self,
mock_http_connection):
"""Test do_setup with http://1.2.3.4:8080."""
fake_login_response = FakeLoginResponse()
fake_get_basic_info_response = FakeGetBasicInfoResponse()
mock_http_connection.return_value.getresponse.side_effect = ([
fake_login_response,
fake_get_basic_info_response,
fake_login_response])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertEqual('fakeSid', self.driver.api_executor.sid)
self.assertEqual('admin', self.driver.api_executor.username)
self.assertEqual('qnapadmin', self.driver.api_executor.password)
self.assertEqual('1.2.3.4', self.driver.api_executor.ip)
self.assertEqual('8080', self.driver.api_executor.port)
self.assertFalse(self.driver.api_executor.ssl)
@mock.patch('six.moves.http_client.HTTPSConnection')
def test_do_setup_positive_with_ssl(
self,
mock_http_connection):
"""Test do_setup with https://1.2.3.4:443."""
fake_login_response = FakeLoginResponse()
fake_get_basic_info_response = FakeGetBasicInfoResponse()
mock_http_connection.return_value.getresponse.side_effect = ([
fake_login_response,
fake_get_basic_info_response,
fake_login_response])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'https://1.2.3.4:443',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.assertEqual('fakeSid', self.driver.api_executor.sid)
self.assertEqual('admin', self.driver.api_executor.username)
self.assertEqual('qnapadmin', self.driver.api_executor.password)
self.assertEqual('1.2.3.4', self.driver.api_executor.ip)
self.assertEqual('443', self.driver.api_executor.port)
self.assertTrue(self.driver.api_executor.ssl)
class QnapDriverVolumeTestCase(QnapDriverBaseTestCase):
"""Tests volume related api's."""
def get_lun_info_return_value(self):
"""Return the lun form get_lun_info method."""
root = ET.fromstring(FAKE_RES_DETAIL_DATA_LUN_INFO)
lun_list = root.find('iSCSILUNList')
lun_info_tree = lun_list.findall('LUNInfo')
for lun in lun_info_tree:
return lun
def get_mapped_lun_info_return_value(self):
"""Return the lun form get_lun_info method."""
root = ET.fromstring(FAKE_RES_DETAIL_DATA_MAPPED_LUN_INFO)
lun_list = root.find('iSCSILUNList')
lun_info_tree = lun_list.findall('LUNInfo')
for lun in lun_info_tree:
return lun
def get_snapshot_info_return_value(self):
"""Return the lun form get_lun_info method."""
root = ET.fromstring(FAKE_RES_DETAIL_DATA_SNAPSHOT)
snapshot_list = root.find('SnapshotList')
snapshot_info_tree = snapshot_list.findall('row')
for snapshot in snapshot_info_tree:
return snapshot
@mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata')
@mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_create_volume_positive(
self,
mock_api_executor,
mock_gen_random_name,
mock_get_volume_metadata):
"""Test create_volume with fake_volume."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_api_executor.return_value.get_lun_info.side_effect = [
None,
self.get_lun_info_return_value()]
mock_gen_random_name.return_value = 'fakeLun'
mock_api_executor.return_value.create_lun.return_value = 'fakeIndex'
mock_get_volume_metadata.return_value = {}
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.create_volume(fake_volume)
mock_api_executor.return_value.create_lun.assert_called_once_with(
fake_volume,
self.driver.configuration.qnap_poolname,
'fakeLun',
True)
expected_call_list = [
mock.call(LUNName='fakeLun'),
mock.call(LUNIndex='fakeIndex')]
self.assertEqual(
expected_call_list,
mock_api_executor.return_value.get_lun_info.call_args_list)
@mock.patch.object(
qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_delete_volume_positive(
self,
mock_api_executor,
mock_get_lun_naa_from_volume_metadata):
"""Test delete_volume with fake_volume."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_get_lun_naa_from_volume_metadata.return_value = FAKE_LUNNAA
mock_api_executor.return_value.get_lun_info.return_value = (
self.get_lun_info_return_value())
mock_api_executor.return_value.delete_lun.return_value = None
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.delete_volume(fake_volume)
mock_api_executor.return_value.delete_lun.assert_called_once_with(
'fakeLunIndex')
@mock.patch.object(
qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata')
@mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name')
@mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_create_cloned_volume_volume_size_less_src_verf(
self,
mock_api_executor,
mock_get_volume_metadata,
mock_gen_random_name,
mock_get_lun_naa_from_volume_metadata):
"""Test create cloned volume."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 90, 'fakeLunName')
fake_src_vref = VolumeClass(
'fakeSrcVrefName', 'fakeId', 100, 'fakeSrcVref')
mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa'
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_get_volume_metadata.return_value = {}
mock_api_executor.return_value.get_lun_info.side_effect = [
self.get_lun_info_return_value(),
None,
self.get_lun_info_return_value()]
mock_gen_random_name.side_effect = ['fakeSnapshot', 'fakeLun']
mock_api_executor.return_value.get_snapshot_info.side_effect = [
None, self.get_snapshot_info_return_value()]
mock_api_executor.return_value.create_snapshot_api.return_value = (
'fakeSnapshotId')
mock_api_executor.return_value.clone_snapshot.return_value = None
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.create_cloned_volume(fake_volume, fake_src_vref)
expected_call_list = [
mock.call(LUNNAA='fakeLunNaa'),
mock.call(LUNName='fakeLun'),
mock.call(LUNName='fakeLun')]
self.assertEqual(
expected_call_list,
mock_api_executor.return_value.get_lun_info.call_args_list)
expected_call_list = [
mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot'),
mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot')]
self.assertEqual(
expected_call_list,
mock_api_executor.return_value.get_snapshot_info.call_args_list)
mock_api_return = mock_api_executor.return_value
mock_api_return.create_snapshot_api.assert_called_once_with(
'fakeLunIndex', 'fakeSnapshot')
mock_api_return.clone_snapshot.assert_called_once_with(
'fakeSnapshotId', 'fakeLun')
@mock.patch.object(
qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata')
@mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun')
@mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name')
@mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_create_cloned_volume_volume_size_morethan_src_verf(
self,
mock_api_executor,
mock_get_volume_metadata,
mock_gen_random_name,
mock_extend_lun,
mock_get_lun_naa_from_volume_metadata):
"""Test create cloned volume."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
fake_src_vref = VolumeClass(
'fakeSrcVrefName', 'fakeId', 90, 'fakeSrcVref')
mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa'
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_get_volume_metadata.return_value = FAKE_LUNNAA
mock_api_executor.return_value.get_lun_info.side_effect = [
self.get_lun_info_return_value(),
None,
self.get_lun_info_return_value()]
mock_gen_random_name.side_effect = ['fakeSnapshot', 'fakeLun']
mock_api_executor.return_value.get_snapshot_info.side_effect = [
None, self.get_snapshot_info_return_value()]
mock_api_executor.return_value.create_snapshot_api.return_value = (
'fakeSnapshotId')
mock_api_executor.return_value.clone_snapshot.return_value = None
mock_extend_lun.return_value = None
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.create_cloned_volume(fake_volume, fake_src_vref)
mock_extend_lun.assert_called_once_with(fake_volume, 'fakeLunNaa')
@mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_create_snapshot_positive(
self,
mock_api_executor,
mock_gen_random_name):
"""Test create snapshot."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
snapshot = SnapshotClass(fake_volume, 100)
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_api_executor.return_value.get_lun_info.return_value = (
self.get_lun_info_return_value())
mock_gen_random_name.return_value = 'fakeSnapshot'
mock_api_executor.return_value.get_snapshot_info.side_effect = [
None, self.get_snapshot_info_return_value()]
mock_api_executor.return_value.create_snapshot_api.return_value = (
'fakeSnapshotId')
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.create_snapshot(snapshot)
mock_api_return = mock_api_executor.return_value
mock_api_return.get_lun_info.assert_called_once_with(
LUNNAA='fakeLunNaa')
expected_call_list = [
mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot'),
mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot')]
self.assertEqual(
expected_call_list,
mock_api_executor.return_value.get_snapshot_info.call_args_list)
mock_api_return.create_snapshot_api.assert_called_once_with(
'fakeLunIndex', 'fakeSnapshot')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_delete_snapshot_positive(
self,
mock_api_executor):
"""Test delete snapshot."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
fake_snapshot = SnapshotClass(fake_volume, 100)
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_api_executor.return_value.api_delete_snapshot.return_value = None
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.delete_snapshot(fake_snapshot)
mock_api_return = mock_api_executor.return_value
mock_api_return.api_delete_snapshot.assert_called_once_with(
'fakeSnapshotId')
@mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata')
@mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun')
@mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_create_volume_from_snapshot_positive_volsize_more_snapshotvolsize(
self,
mock_api_executor,
mock_gen_random_name,
mock_extend_lun,
mock_get_volume_metadata):
"""Test create volume from snapshot positive."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
fake_snapshot = SnapshotClass(fake_volume, 90)
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_gen_random_name.return_value = 'fakeLun'
mock_api_executor.return_value.get_lun_info.side_effect = [
None,
self.get_lun_info_return_value()]
mock_api_executor.return_value.clone_snapshot.return_value = None
mock_api_executor.return_value.create_snapshot_api.return_value = (
'fakeSnapshotId')
mock_extend_lun.return_value = None
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot)
expected_call_list = [
mock.call(LUNName='fakeLun'),
mock.call(LUNName='fakeLun')]
self.assertEqual(
expected_call_list,
mock_api_executor.return_value.get_lun_info.call_args_list)
mock_api_return = mock_api_executor.return_value
mock_api_return.clone_snapshot.assert_called_once_with(
'fakeSnapshotId', 'fakeLun')
mock_extend_lun.assert_called_once_with(fake_volume, 'fakeLunNaa')
def get_specific_poolinfo_return_value(self):
"""Get specific pool info."""
root = ET.fromstring(FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO)
pool_list = root.find('Pool_Index')
pool_info_tree = pool_list.findall('row')
for pool in pool_info_tree:
return pool
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_get_volume_stats(
self,
mock_api_executor):
"""Get volume stats."""
mock_api_return = mock_api_executor.return_value
mock_api_return.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_api_return.get_specific_poolinfo.return_value = (
self.get_specific_poolinfo_return_value())
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
expected_res = {'volume_backend_name': 'QNAP',
'vendor_name': 'QNAP',
'driver_version': '1.0.0',
'storage_protocol': 'iscsi'}
single_pool = dict(
pool_name=self.driver.configuration.qnap_poolname,
total_capacity_gb=930213412209 / units.Gi,
free_capacity_gb=928732941681 / units.Gi,
provisioned_capacity_gb=1480470528 / units.Gi,
reserved_percentage=self.driver.configuration.reserved_percentage,
QoS_support=False)
expected_res['pools'] = [single_pool]
self.assertEqual(
expected_res,
self.driver.get_volume_stats(refresh=True))
mock_api_return.get_specific_poolinfo.assert_called_once_with(
self.driver.configuration.qnap_poolname)
@mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_extend_volume(
self,
mock_api_executor,
mock_extend_lun):
"""Test extend volume."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.extend_volume(fake_volume, 'fakeSize')
mock_extend_lun.assert_called_once_with(fake_volume, '')
@mock.patch.object(
qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_extend_lun(
self,
mock_api_executor,
mock_get_lun_naa_from_volume_metadata):
"""Test _extend_lun method."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa'
mock_api_executor.return_value.get_lun_info.return_value = (
self.get_lun_info_return_value())
mock_api_executor.return_value.edit_lun.return_value = None
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver._extend_lun(fake_volume, '')
mock_api_return = mock_api_executor.return_value
mock_api_return.get_lun_info.assert_called_once_with(
LUNNAA='fakeLunNaa')
expect_lun = {
'LUNName': 'fakeLunName',
'LUNCapacity': fake_volume['size'],
'LUNIndex': 'fakeLunIndex',
'LUNThinAllocate': 'fakeLunThinAllocate',
'LUNPath': 'fakeLunPath',
'LUNStatus': '1'}
mock_api_return.edit_lun.assert_called_once_with(expect_lun)
@mock.patch.object(qnap.QnapISCSIDriver,
'_get_lun_naa_from_volume_metadata')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_initialize_connection_with_target_exist(
self,
mock_api_executor,
mock_get_lun_naa_from_volume_metadata):
"""Test initialize connection."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
fake_connector = {'initiator': 'fakeInitiatorIqn'}
mock_api_return = mock_api_executor.return_value
mock_api_return.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_api_return.get_iscsi_portal_info.return_value = (
FAKE_RES_DETAIL_ISCSI_PORTAL_INFO)
mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa'
mock_api_executor.return_value.get_lun_info.side_effect = [
self.get_lun_info_return_value(),
self.get_lun_info_return_value()]
mock_api_return.get_all_iscsi_portal_setting.return_value = (
FAKE_RES_DETAIL_GET_ALL_ISCSI_PORTAL_SETTING)
mock_api_executor.return_value.map_lun.return_value = None
mock_api_return.get_ethernet_ip.return_value = ['1.2.3.4']
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
expected_properties = {
'target_discovered': True,
'target_portal': '1.2.3.4:fakeServicePort',
'target_iqn': 'fakeTargetIqn',
'target_lun': 1,
'volume_id': fake_volume['id'],
'target_portals': ['1.2.3.4:fakeServicePort'],
'target_iqns': ['fakeTargetIqn'],
'target_luns': [1]}
expected_return = {
'driver_volume_type': 'iscsi', 'data': expected_properties}
self.assertEqual(expected_return, self.driver.initialize_connection(
fake_volume, fake_connector))
mock_api_return = mock_api_executor.return_value
mock_api_return.get_iscsi_portal_info.assert_called_once_with()
expected_call_list = [
mock.call(LUNNAA='fakeLunNaa'),
mock.call(LUNNAA='fakeLunNaa')]
self.assertEqual(
expected_call_list,
mock_api_executor.return_value.get_lun_info.call_args_list)
mock_api_return = mock_api_executor.return_value
mock_api_return.get_all_iscsi_portal_setting.assert_called_once_with()
mock_api_return.map_lun.assert_called_once_with(
'fakeLunIndex', 'fakeTargeIndex')
mock_api_return.get_ethernet_ip.assert_called_once_with(type='data')
@mock.patch.object(
qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata')
@mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor')
def test_terminate_connection(
self,
mock_api_executor,
mock_get_lun_naa_from_volume_metadata):
"""Test terminate connection."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
fake_connector = {'initiator': 'fakeInitiator'}
mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa'
mock_api_executor.return_value.get_basic_info.return_value = (
'ES1640dc ', 'ES1640dc ', '1.1.3')
mock_api_executor.return_value.get_lun_info.return_value = (
self.get_mapped_lun_info_return_value())
mock_api_executor.return_value.unmap_lun.return_value = None
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.terminate_connection(fake_volume, fake_connector)
mock_api_return = mock_api_executor.return_value
mock_api_return.get_lun_info.assert_called_once_with(
LUNNAA='fakeLunNaa')
mock_api_return.unmap_lun.assert_called_once_with(
'fakeLunIndex', '9')
class QnapAPIExecutorTestCase(QnapDriverBaseTestCase):
"""Tests QnapAPIExecutor."""
@mock.patch('six.moves.http_client.HTTPConnection')
def test_create_lun(
self,
mock_http_connection):
"""Test create lun."""
fake_volume = VolumeClass(
'fakeDisplayName', 'fakeId', 100, 'fakeLunName')
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.assertEqual(
'fakeLunIndex',
self.driver.api_executor.create_lun(
fake_volume, 'fakepool', 'fakeLun', 'False'))
fake_params = {}
fake_params['func'] = 'add_lun'
fake_params['FileIO'] = 'no'
fake_params['LUNThinAllocate'] = '1'
fake_params['LUNName'] = 'fakeLun'
fake_params['LUNPath'] = 'fakeLun'
fake_params['poolID'] = 'fakepool'
fake_params['lv_ifssd'] = 'no'
fake_params['LUNCapacity'] = 100
fake_params['lv_threshold'] = '80'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
create_lun_url = (
'/cgi-bin/disk/iscsi_lun_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', create_lun_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_delete_lun(
self,
mock_http_connection):
"""Test delete lun."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.delete_lun('fakeLunIndex')
fake_params = {}
fake_params['func'] = 'remove_lun'
fake_params['run_background'] = '1'
fake_params['ha_sync'] = '1'
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
delete_lun_url = (
'/cgi-bin/disk/iscsi_lun_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', delete_lun_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_specific_poolinfo(
self,
mock_http_connection):
"""Test get specific pool info."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakePoolInfoResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_specific_poolinfo('Pool1')
fake_params = {}
fake_params['store'] = 'poolInfo'
fake_params['func'] = 'extra_get'
fake_params['poolID'] = 'Pool1'
fake_params['Pool_Info'] = '1'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
get_specific_poolinfo_url = (
'/cgi-bin/disk/disk_manage.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_specific_poolinfo_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_create_target(
self,
mock_http_connection):
"""Test create target."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreatTargetResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.create_target('fakeTargetName', 'sca')
fake_params = {}
fake_params['func'] = 'add_target'
fake_params['targetName'] = 'fakeTargetName'
fake_params['targetAlias'] = 'fakeTargetName'
fake_params['bTargetDataDigest'] = '0'
fake_params['bTargetHeaderDigest'] = '0'
fake_params['bTargetClusterEnable'] = '1'
fake_params['controller_name'] = 'sca'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
create_target_url = (
'/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', create_target_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_add_target_init(
self,
mock_http_connection):
"""Test add target init."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.add_target_init(
'fakeTargetIqn', 'fakeInitiatorIqn')
fake_params = {}
fake_params['func'] = 'add_init'
fake_params['targetIQN'] = 'fakeTargetIqn'
fake_params['initiatorIQN'] = 'fakeInitiatorIqn'
fake_params['initiatorAlias'] = 'fakeInitiatorIqn'
fake_params['bCHAPEnable'] = '0'
fake_params['CHAPUserName'] = ''
fake_params['CHAPPasswd'] = ''
fake_params['bMutualCHAPEnable'] = '0'
fake_params['mutualCHAPUserName'] = ''
fake_params['mutualCHAPPasswd'] = ''
fake_params['ha_sync'] = '1'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
add_target_init_url = (
'/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', add_target_init_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_map_lun(
self,
mock_http_connection):
"""Test map lun."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.map_lun(
'fakeLunIndex', 'fakeTargetIndex')
fake_params = {}
fake_params['func'] = 'add_lun'
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['targetIndex'] = 'fakeTargetIndex'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
map_lun_url = (
'/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', map_lun_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_unmap_lun(
self,
mock_http_connection):
"""Test unmap lun."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.unmap_lun(
'fakeLunIndex', 'fakeTargetIndex')
fake_params = {}
fake_params['func'] = 'remove_lun'
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['targetIndex'] = 'fakeTargetIndex'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
unmap_lun_url = (
'/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', unmap_lun_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_iscsi_portal_info(
self,
mock_http_connection):
"""Test get iscsi portal info."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreateLunResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_iscsi_portal_info()
fake_params = {}
fake_params['func'] = 'extra_get'
fake_params['iSCSI_portal'] = '1'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
get_iscsi_portal_info_url = (
'/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_iscsi_portal_info_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_lun_info(
self,
mock_http_connection):
"""Test get lun info."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeLunInfoResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_lun_info()
fake_params = {}
fake_params['func'] = 'extra_get'
fake_params['lunList'] = '1'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
get_lun_info_url = (
'/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_lun_info_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_snapshot_info(
self,
mock_http_connection):
"""Test get snapshot info."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeLunInfoResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_snapshot_info(
lun_index='fakeLunIndex', snapshot_name='fakeSnapshotName')
fake_params = {}
fake_params['func'] = 'extra_get'
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['snapshot_list'] = '1'
fake_params['snap_start'] = '0'
fake_params['snap_count'] = '100'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
get_snapshot_info_url = (
'/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_snapshot_info_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_create_snapshot_api(
self,
mock_http_connection):
"""Test create snapshot api."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreateSnapshotResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.create_snapshot_api(
'fakeLunIndex', 'fakeSnapshotName')
fake_params = {}
fake_params['func'] = 'create_snapshot'
fake_params['lunID'] = 'fakeLunIndex'
fake_params['snapshot_name'] = 'fakeSnapshotName'
fake_params['expire_min'] = '0'
fake_params['vital'] = '1'
fake_params['snapshot_type'] = '0'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
create_snapshot_api_url = (
'/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', create_snapshot_api_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_api_delete_snapshot(
self,
mock_http_connection):
"""Test api de;ete snapshot."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreateSnapshotResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.api_delete_snapshot(
'fakeSnapshotId')
fake_params = {}
fake_params['func'] = 'del_snapshots'
fake_params['snapshotID'] = 'fakeSnapshotId'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
api_delete_snapshot_url = (
'/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', api_delete_snapshot_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_clone_snapshot(
self,
mock_http_connection):
"""Test clone snapshot."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeCreateSnapshotResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.clone_snapshot(
'fakeSnapshotId', 'fakeLunName')
fake_params = {}
fake_params['func'] = 'clone_qsnapshot'
fake_params['by_lun'] = '1'
fake_params['snapshotID'] = 'fakeSnapshotId'
fake_params['new_name'] = 'fakeLunName'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
clone_snapshot_url = (
'/cgi-bin/disk/snapshot.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', clone_snapshot_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_edit_lun(
self,
mock_http_connection):
"""Test edit lun."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeLunInfoResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
fake_lun = {'LUNName': 'fakeLunName',
'LUNCapacity': 100,
'LUNIndex': 'fakeLunIndex',
'LUNThinAllocate': False,
'LUNPath': 'fakeLunPath',
'LUNStatus': 'fakeLunStatus'}
self.driver.api_executor.edit_lun(fake_lun)
fake_params = {}
fake_params['func'] = 'edit_lun'
fake_params['LUNName'] = 'fakeLunName'
fake_params['LUNCapacity'] = 100
fake_params['LUNIndex'] = 'fakeLunIndex'
fake_params['LUNThinAllocate'] = False
fake_params['LUNPath'] = 'fakeLunPath'
fake_params['LUNStatus'] = 'fakeLunStatus'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
edit_lun_url = (
'/cgi-bin/disk/iscsi_lun_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', edit_lun_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_all_iscsi_portal_setting(
self,
mock_http_connection):
"""Test get all iscsi portal setting."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeLunInfoResponse()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_all_iscsi_portal_setting()
fake_params = {}
fake_params['func'] = 'get_all'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
get_all_iscsi_portal_setting_url = (
'/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_all_iscsi_portal_setting_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_ethernet_ip(
self,
mock_http_connection):
"""Test get ethernet ip."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeGetAllEthernetIp()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_ethernet_ip(type='data')
fake_params = {}
fake_params['subfunc'] = 'net_setting'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
get_ethernet_ip_url = (
'/cgi-bin/sys/sysRequest.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_ethernet_ip_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_target_info(
self,
mock_http_connection):
"""Test get target info."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoResponse(),
FakeLoginResponse(),
FakeTargetInfo()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_target_info('fakeTargetIndex')
fake_params = {}
fake_params['func'] = 'extra_get'
fake_params['targetInfo'] = 1
fake_params['targetIndex'] = 'fakeTargetIndex'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
get_target_info_url = (
'/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_target_info_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
class QnapAPIExecutorTsTestCase(QnapDriverBaseTestCase):
"""Tests QnapAPIExecutorTS."""
@mock.patch('six.moves.http_client.HTTPConnection')
def test_remove_target_init(
self,
mock_http_connection):
"""Test remove target init."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeTargetInfo()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.driver.api_executor.remove_target_init(
'fakeTargetIqn', 'fakeDefaultAcl')
fake_params = {}
fake_params['func'] = 'remove_init'
fake_params['targetIQN'] = 'fakeTargetIqn'
fake_params['initiatorIQN'] = 'fakeDefaultAcl'
fake_params['ha_sync'] = '1'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
remove_target_init_url = (
'/cgi-bin/disk/iscsi_target_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', remove_target_init_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_target_info(
self,
mock_http_connection):
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeTargetInfo()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_target_info(
'fakeTargetIndex')
fake_params = {}
fake_params['func'] = 'extra_get'
fake_params['targetInfo'] = 1
fake_params['targetIndex'] = 'fakeTargetIndex'
fake_params['ha_sync'] = '1'
fake_params['sid'] = 'fakeSid'
sanitized_params = {}
for key in fake_params:
value = fake_params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
get_target_info_url = (
'/cgi-bin/disk/iscsi_portal_setting.cgi?%s' % sanitized_params)
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_target_info_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_ethernet_ip(
self,
mock_http_connection):
"""Test get ethernet ip."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTsResponse(),
FakeLoginResponse(),
FakeGetAllEthernetIp()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Storage Pool 1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_ethernet_ip(
type='data')
get_ethernet_ip_url = (
'/cgi-bin/sys/sysRequest.cgi?subfunc=net_setting&sid=fakeSid')
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_ethernet_ip_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
class QnapAPIExecutorTesTestCase(QnapDriverBaseTestCase):
"""Tests QnapAPIExecutorTES."""
@mock.patch('six.moves.http_client.HTTPConnection')
def test_get_ethernet_ip(
self,
mock_http_connection):
"""Test get ehternet ip."""
mock_http_connection.return_value.getresponse.side_effect = ([
FakeLoginResponse(),
FakeGetBasicInfoTesResponse(),
FakeLoginResponse(),
FakeGetAllEthernetIp()])
self.driver = qnap.QnapISCSIDriver(
configuration=create_configuration(
'admin',
'qnapadmin',
'http://1.2.3.4:8080',
'1.2.3.4',
'Pool1',
True))
self.driver.do_setup('context')
self.driver.api_executor.get_ethernet_ip(
type='data')
get_ethernet_ip_url = (
'/cgi-bin/sys/sysRequest.cgi?subfunc=net_setting&sid=fakeSid')
expected_call_list = [
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_basic_info_url),
mock.call('POST', login_url, global_sanitized_params, header),
mock.call('GET', get_ethernet_ip_url)]
self.assertEqual(
expected_call_list,
mock_http_connection.return_value.request.call_args_list)
|
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for pw_build.create_python_tree"""
import os
from pathlib import Path
import tempfile
from typing import List
import unittest
from parameterized import parameterized # type: ignore
from pw_build.python_package import PythonPackage
from pw_build.create_python_tree import build_python_tree, copy_extra_files
from pw_build.generate_python_package import _PYPROJECT_FILE as PYPROJECT_TEXT
def _setup_cfg(package_name: str) -> str:
return f'''
[metadata]
name = {package_name}
version = 0.0.1
author = Pigweed Authors
author_email = [email protected]
description = Pigweed swiss-army knife
[options]
packages = find:
zip_safe = False
[options.package_data]
{package_name} =
py.typed
'''
def _create_fake_python_package(location: Path, files: List[str],
package_name: str) -> None:
for file in files:
destination = location / file
destination.parent.mkdir(parents=True, exist_ok=True)
text = f'"""{package_name}"""'
if str(destination).endswith('setup.cfg'):
text = _setup_cfg(package_name)
elif str(destination).endswith('pyproject.toml'):
# Make sure pyproject.toml file has valid syntax.
text = PYPROJECT_TEXT
destination.write_text(text)
class TestCreatePythonTree(unittest.TestCase):
"""Integration tests for create_python_tree."""
def setUp(self):
self.maxDiff = None # pylint: disable=invalid-name
# Save the starting working directory for returning to later.
self.start_dir = Path.cwd()
# Create a temp out directory
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
# cd to the starting dir before cleaning up the temp out directory
os.chdir(self.start_dir)
# Delete the TemporaryDirectory
self.temp_dir.cleanup()
def _check_result_paths_equal(self, install_dir, expected_results) -> None:
# Normalize path strings to posix before comparing.
expected_paths = set(Path(p).as_posix() for p in expected_results)
actual_paths = set(
p.relative_to(install_dir).as_posix()
for p in install_dir.glob('**/*') if p.is_file())
self.assertEqual(expected_paths, actual_paths)
@parameterized.expand([
(
# Test name
'working case',
# Package name
'mars',
# File list
[
'planets/BUILD.mars_rocket',
'planets/mars/__init__.py',
'planets/mars/__main__.py',
'planets/mars/moons/__init__.py',
'planets/mars/moons/deimos.py',
'planets/mars/moons/phobos.py',
'planets/hohmann_transfer_test.py',
'planets/pyproject.toml',
'planets/setup.cfg',
],
# Extra_files
[],
# Package definition
{
'generate_setup': {
'metadata': {
'name': 'mars',
'version': '0.0.1'
},
},
'inputs': [
],
'setup_sources': [
'planets/pyproject.toml',
'planets/setup.cfg',
],
'sources': [
'planets/mars/__init__.py',
'planets/mars/__main__.py',
'planets/mars/moons/__init__.py',
'planets/mars/moons/deimos.py',
'planets/mars/moons/phobos.py',
],
'tests': [
'planets/hohmann_transfer_test.py',
],
},
# Output file list
[
'mars/__init__.py',
'mars/__main__.py',
'mars/moons/__init__.py',
'mars/moons/deimos.py',
'mars/moons/phobos.py',
'mars/tests/hohmann_transfer_test.py',
],
),
(
# Test name
'with extra files',
# Package name
'saturn',
# File list
[
'planets/BUILD.saturn_rocket',
'planets/hohmann_transfer_test.py',
'planets/pyproject.toml',
'planets/saturn/__init__.py',
'planets/saturn/__main__.py',
'planets/saturn/misson.py',
'planets/saturn/moons/__init__.py',
'planets/saturn/moons/enceladus.py',
'planets/saturn/moons/iapetus.py',
'planets/saturn/moons/rhea.py',
'planets/saturn/moons/titan.py',
'planets/setup.cfg',
'planets/setup.py',
],
# Extra files
[
'planets/BUILD.saturn_rocket > out/saturn/BUILD.rocket',
],
# Package definition
{
'inputs': [
],
'setup_sources': [
'planets/pyproject.toml',
'planets/setup.cfg',
'planets/setup.py',
],
'sources': [
'planets/saturn/__init__.py',
'planets/saturn/__main__.py',
'planets/saturn/misson.py',
'planets/saturn/moons/__init__.py',
'planets/saturn/moons/enceladus.py',
'planets/saturn/moons/iapetus.py',
'planets/saturn/moons/rhea.py',
'planets/saturn/moons/titan.py',
],
'tests': [
'planets/hohmann_transfer_test.py',
]
},
# Output file list
[
'saturn/BUILD.rocket',
'saturn/__init__.py',
'saturn/__main__.py',
'saturn/misson.py',
'saturn/moons/__init__.py',
'saturn/moons/enceladus.py',
'saturn/moons/iapetus.py',
'saturn/moons/rhea.py',
'saturn/moons/titan.py',
'saturn/tests/hohmann_transfer_test.py',
],
),
]) # yapf: disable
def test_build_python_tree(
self,
_test_name,
package_name,
file_list,
extra_files,
package_definition,
expected_file_list,
) -> None:
"""Check results of build_python_tree and copy_extra_files."""
temp_root = Path(self.temp_dir.name)
_create_fake_python_package(temp_root, file_list, package_name)
os.chdir(temp_root)
install_dir = temp_root / 'out'
package = PythonPackage.from_dict(**package_definition)
build_python_tree(python_packages=[package],
tree_destination_dir=install_dir,
include_tests=True)
copy_extra_files(extra_files)
# Check expected files are in place.
self._check_result_paths_equal(install_dir, expected_file_list)
@parameterized.expand([
(
# Test name
'everything in correct locations',
# Package name
'planets',
# File list
[
'BUILD.mars_rocket',
],
# Extra_files
[
'BUILD.mars_rocket > out/mars/BUILD.rocket',
],
# Output file list
[
'mars/BUILD.rocket',
],
# Should raise exception
None,
),
(
# Test name
'missing source files',
# Package name
'planets',
# File list
[
'BUILD.mars_rocket',
],
# Extra_files
[
'BUILD.venus_rocket > out/venus/BUILD.rocket',
],
# Output file list
[],
# Should raise exception
FileNotFoundError,
),
(
# Test name
'existing destination files',
# Package name
'planets',
# File list
[
'BUILD.jupiter_rocket',
'out/jupiter/BUILD.rocket',
],
# Extra_files
[
'BUILD.jupiter_rocket > out/jupiter/BUILD.rocket',
],
# Output file list
[],
# Should raise exception
FileExistsError,
),
]) # yapf: disable
def test_copy_extra_files(
self,
_test_name,
package_name,
file_list,
extra_files,
expected_file_list,
should_raise_exception,
) -> None:
"""Check results of build_python_tree and copy_extra_files."""
temp_root = Path(self.temp_dir.name)
_create_fake_python_package(temp_root, file_list, package_name)
os.chdir(temp_root)
install_dir = temp_root / 'out'
# If exceptions should be raised
if should_raise_exception:
with self.assertRaises(should_raise_exception):
copy_extra_files(extra_files)
return
# Do the copy
copy_extra_files(extra_files)
# Check expected files are in place.
self._check_result_paths_equal(install_dir, expected_file_list)
if __name__ == '__main__':
unittest.main()
|
|
#
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
import java.lang.Thread
import java.util.LinkedList
from multiverse.server.plugins import *
from multiverse.server.objects import *
from multiverse.server.engine import *
from multiverse.server.util import *
from multiverse.server.engine import WorldLoaderOverride
class myWorldLoaderOverride(WorldLoaderOverride):
def __init__(self):
pass
def adjustLightData(self, worldCollectionName, objectName, lightData):
return True
def adjustObjectTemplate(self, worldCollectionName, objectName, template):
instanceOid = template.get(Namespace.WORLD_MANAGER, ":instance")
roomStyle = None
try:
roomStyle = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomStyle")
except:
pass
if roomStyle is not None:
template.put(Namespace.WORLD_MANAGER, "RoomStyle", roomStyle)
accountId = None
try:
accountId = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "AccountId")
except:
pass
if accountId is not None:
template.put(Namespace.WORLD_MANAGER, "AccountId", accountId)
props = None
try:
props = EnginePlugin.getObjectProperty(instanceOid, Namespace.INSTANCE, "RoomItemsProps")
except:
pass
if props is not None and props.containsKey(objectName):
objProps = props[objectName]
for key in objProps.keySet():
value = objProps[key]
template.put(Namespace.WORLD_MANAGER, key, value)
return True
def adjustRegion(self, worldCollectionName, objectName, region):
return True
def adjustRegionConfig(self, worldCollectionName, objectName, region, regionConfig):
return True
def adjustSpawnData(self, worldCollectionName, objectName, spawnData):
return True
InstancePlugin.registerWorldLoaderOverrideClass("placesWorldLoaderOverride", myWorldLoaderOverride)
template = Template("friendworld template")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_WORLD_FILE_NAME, "$WORLD_DIR/friendworld.mvw")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_INIT_SCRIPT_FILE_NAME, "$WORLD_DIR/instance_load_default.py")
rc = InstanceClient.registerInstanceTemplate(template);
overrideTemplate = Template()
overrideTemplate.put(Namespace.INSTANCE, InstanceClient.TEMPL_INSTANCE_NAME, "default") # instance name here
overrideTemplate.put(Namespace.INSTANCE, "populationLimit", Integer(100)) # instance name here
rc = InstanceClient.createInstance("friendworld template", overrideTemplate); # template name here
Log.info("startup_instance.py: createInstance 'default' #1 result=" + str(rc))
rc = InstanceClient.createInstance("friendworld template", overrideTemplate); # template name here
Log.info("startup_instance.py: createInstance 'default' #2 result=" + str(rc))
rc = InstanceClient.createInstance("friendworld template", overrideTemplate); # template name here
Log.info("startup_instance.py: createInstance 'default' #3 result=" + str(rc))
rc = InstanceClient.createInstance("friendworld template", overrideTemplate); # template name here
Log.info("startup_instance.py: createInstance 'default' #4 result=" + str(rc))
##########
template = Template("titanic template")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_WORLD_FILE_NAME, "$WORLD_DIR/../titanic_world/titanic_world.mvw")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_INIT_SCRIPT_FILE_NAME, "$WORLD_DIR/instance_load_default.py")
rc = InstanceClient.registerInstanceTemplate(template);
overrideTemplate = Template()
overrideTemplate.put(Namespace.INSTANCE, InstanceClient.TEMPL_INSTANCE_NAME, "titanic") # instance name here
rc = InstanceClient.createInstance("titanic template", overrideTemplate); # template name here
Log.info("startup_instance.py: createInstance 'titanic' result=" + str(rc))
##########
template = Template("hip hop environment template")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_WORLD_FILE_NAME, "$WORLD_DIR/hiphopEnvironment.mvw")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_INIT_SCRIPT_FILE_NAME, "$WORLD_DIR/instance_load_hiphopEnvironment.py")
rc = InstanceClient.registerInstanceTemplate(template)
overrideTemplate = Template()
overrideTemplate.put(Namespace.INSTANCE, InstanceClient.TEMPL_INSTANCE_NAME, "hiphopEnvironment") # instance name here
rc = InstanceClient.createInstance("hip hop environment template", overrideTemplate); # template name here
Log.info("startup_instance.py: createInstance 'hiphopEnvironment' result=" + str(rc))
##########
template = Template("hip hop room template")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_WORLD_FILE_NAME, "$WORLD_DIR/hiphoproom.mvw")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_INIT_SCRIPT_FILE_NAME, "$WORLD_DIR/instance_load_room_hiphop.py")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_LOADER_OVERRIDE_NAME, "placesWorldLoaderOverride")
rc = InstanceClient.registerInstanceTemplate(template)
overrideTemplate = Template()
overrideTemplate.put(Namespace.INSTANCE, InstanceClient.TEMPL_INSTANCE_NAME, "hiphop") # instance name here
rc = InstanceClient.createInstance("hip hop room template", overrideTemplate) # template name here
Log.info("startup_instance.py: createInstance 'hiphoproom' result=" + str(rc))
##########
template = Template("hip hop room unfurnished template")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_WORLD_FILE_NAME, "$WORLD_DIR/hiphoproom.mvw")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_INIT_SCRIPT_FILE_NAME, "$WORLD_DIR/instance_load_room_hiphop_unfurnished.py")
rc = InstanceClient.registerInstanceTemplate(template)
overrideTemplate = Template()
overrideTemplate.put(Namespace.INSTANCE, InstanceClient.TEMPL_INSTANCE_NAME, "hiphop_unfurnished") # instance name here
rc = InstanceClient.createInstance("hip hop room unfurnished template", overrideTemplate) # template name here
Log.info("startup_instance.py: createInstance 'hiphoproom_unfurnished' result=" + str(rc))
##########
template = Template("cute room template")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_WORLD_FILE_NAME, "$WORLD_DIR/cuteroom.mvw")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_INIT_SCRIPT_FILE_NAME, "$WORLD_DIR/instance_load_room_cute.py")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_LOADER_OVERRIDE_NAME, "placesWorldLoaderOverride")
rc = InstanceClient.registerInstanceTemplate(template)
overrideTemplate = Template()
overrideTemplate.put(Namespace.INSTANCE, InstanceClient.TEMPL_INSTANCE_NAME, "cute") # instance name here
rc = InstanceClient.createInstance("cute room template", overrideTemplate) # template name here
Log.info("startup_instance.py: createInstance 'cuteroom' result=" + str(rc))
##########
template = Template("goth room template")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_WORLD_FILE_NAME, "$WORLD_DIR/gothroom.mvw")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_INIT_SCRIPT_FILE_NAME, "$WORLD_DIR/instance_load_room_goth.py")
rc = InstanceClient.registerInstanceTemplate(template)
overrideTemplate = Template()
overrideTemplate.put(Namespace.INSTANCE, InstanceClient.TEMPL_INSTANCE_NAME, "goth") # instance name here
rc = InstanceClient.createInstance("goth room template", overrideTemplate) # template name here
Log.info("startup_instance.py: createInstance 'gothroom' result=" + str(rc))
##########
template = Template("metal room template")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_WORLD_FILE_NAME, "$WORLD_DIR/metalroom.mvw")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_INIT_SCRIPT_FILE_NAME, "$WORLD_DIR/instance_load_room_metal.py")
rc = InstanceClient.registerInstanceTemplate(template)
overrideTemplate = Template()
overrideTemplate.put(Namespace.INSTANCE, InstanceClient.TEMPL_INSTANCE_NAME, "metal") # instance name here
rc = InstanceClient.createInstance("metal room template", overrideTemplate) # template name here
Log.info("startup_instance.py: createInstance 'metalroom' result=" + str(rc))
##########
template = Template("sports room template")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_WORLD_FILE_NAME, "$WORLD_DIR/sportsroom.mvw")
template.put(Namespace.INSTANCE, InstanceClient.TEMPL_INIT_SCRIPT_FILE_NAME, "$WORLD_DIR/instance_load_room_sports.py")
rc = InstanceClient.registerInstanceTemplate(template)
overrideTemplate = Template()
overrideTemplate.put(Namespace.INSTANCE, InstanceClient.TEMPL_INSTANCE_NAME, "sports") # instance name here
rc = InstanceClient.createInstance("sports room template", overrideTemplate) # template name here
Log.info("startup_instance.py: createInstance 'sportsroom' result=" + str(rc))
##########
class RoomInstanceTimeout(InstanceTimeout):
def readyForTimeout(self,instance):
return instance.getPlayerPopulation() == 0 and instance.getName().find("room-") == 0
instanceTimeout = RoomInstanceTimeout(60)
instanceTimeout.start()
class PopulationClass(InstancePlugin.PopulationChangeCallback):
def __init__(self):
self.popLock = LockFactory.makeLock("PopulationClass")
self.recentPopulationNumbers = {}
self.allPopulationNumbers = {}
# Clear the table if it exists
if (Engine.getDatabase().databaseContainsTable(Engine.getDBName(), "populations")):
Engine.getDatabase().executeUpdate("DELETE FROM populations")
else:
# Now recreate it
Engine.getDatabase().executeUpdate("CREATE TABLE populations (instance_id BIGINT, account_id INT, population INT, INDEX USING HASH (instance_id)) ENGINE = MEMORY")
def onInstancePopulationChange(self, instanceOid, instanceName, population):
#Log.info("PopulationClass.onInstancePopulationChange called: instance " + str(instanceOid) + ", name " + instanceName + ", pop " + str(population))
#self.recentPopulationNumbers[instanceOid] = (population, instanceOid)
#return
try:
if instanceName.find("room-") != 0:
raise
accountIdStr = instanceName[5:]
accountId = Integer.valueOf(accountIdStr)
except:
Log.warn("PopulationClass.onInstancePopulationChange: For instanceOid " + str(instanceOid) + ", instanceName " + instanceName + " is not of the form 'room-nnnn'")
return
#Log.debug("PopulationClass.onInstancePopulationChange setting recent: instance " + str(instanceOid) + ", name " + instanceName + ", pop " + str(population))
try:
self.popLock.lock()
self.recentPopulationNumbers[accountId] = (population, instanceOid)
finally:
self.popLock.unlock()
def writePopulationToDatabase(self):
# Uncommenting these lines allows testing when there is no web server.
#Log.debug("PopulationClass.writePopulationToDatabase entered")
statements = LinkedList()
# Acquire the lock to copy the recentPopulationNumbers dictionary,
# and then release it.
try:
self.popLock.lock()
mostRecentPopulationNumbers = self.recentPopulationNumbers
self.recentPopulationNumbers = {}
finally:
self.popLock.unlock()
#Log.debug("PopulationClass.writePopulationToDatabase: " + str(len(mostRecentPopulationNumbers)) + " elements")
# Iterate over the recent population changes elements.
# If the instanceOid already exists in allPopulationNumbers and
# the population is zero, remove the row and remove the element
# of allPopulationNumbers; otherwise, create the update statement.
# If it's not in allPopulationNumbers, create the insert statement.
for accountId, (population, instanceOid) in mostRecentPopulationNumbers.items():
if accountId in self.allPopulationNumbers:
if (population == 0):
statements.add("DELETE FROM populations WHERE account_id = " + str(accountId) + ";")
del self.allPopulationNumbers[accountId]
else:
statements.add("UPDATE populations SET population = " + str(population) + " WHERE instance_id = " + str(instanceOid) + ";")
else:
statements.add("INSERT INTO populations (account_id, instance_id, population) VALUES (" + str(accountId) + "," +
str(instanceOid) + "," + str(population) + ");")
self.allPopulationNumbers[accountId] = (population, instanceOid)
# If there is nothing to do, return
if statements.size() == 0:
return
else:
Engine.getDatabase().executeBatch(statements)
if (Log.loggingDebug):
batch = ""
for i in range(statements.size() - 1):
batch += "\n" + statements.get(i)
Log.debug("PopulationClass.writePopulationFields: ran SQL statements " + batch)
class PopulationRunnable(Thread):
def __init__(self, intervalArg, populationClassArg):
self.interval = intervalArg
self.populationClass = populationClassArg
def run(self):
while True:
Thread.sleep(self.interval)
self.populationClass.writePopulationToDatabase()
populationClass = PopulationClass()
Engine.getPlugin("Instance").registerPopulationChangeCallback(populationClass)
# Start the thread with a write-out interval of 10 seconds
populationRunnable = PopulationRunnable(10 * 1000, populationClass)
populationRunnable.start()
Engine.getPlugin("Instance").setPluginAvailable(True)
|
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import, division)
import re
import hashlib
import logging
import datetime
from functools import partial, reduce
import six
from IPy import IP
from django.core.urlresolvers import reverse
from django.utils.encoding import force_bytes, force_text
from django.utils import timezone, html
from django.db import connection
import julia
logger = logging.getLogger(__name__)
class lock(object):
"""
Context manager for aquiring a transaction-wide lock on PostgreSQL tables.
The __init__ method accepts two or more arguments.
The first argument is always a LOCK mode from the list of the standard lock names
ACCESS SHARE
ROW SHARE
ROW EXCLUSIVE
SHARE UPDATE EXCLUSIVE
SHARE
SHARE ROW EXCLUSIVE
EXCLUSIVE
ACCESS EXCLUSIVE
The subsequent args are list of the models that require a lock.
"""
def __init__(self, mode, *models):
self.mode = mode
self.models = models
self.tables = []
self.cursor = None
# get the list of tables
for model in self.models:
self.tables.append(model._meta.db_table)
def __enter__(self):
self.cursor = connection.cursor()
# lock the tables
self.lock(self.cursor, self.tables, self.mode)
return self.cursor
def __exit__(self, type, value, traceback):
# unlock the tables
self.unlock(self.cursor, self.tables)
@staticmethod
def lock(cursor, tables, mode):
logger.debug('locking the tables {} with {}'.format(', '.join(tables), mode))
cursor.execute('LOCK TABLE {} IN {} MODE'.format(', '.join(tables), mode))
@staticmethod
def unlock(cursor, tables):
pass
class Rank(object):
def __init__(self, ranks, score):
self.score = int(score)
self.i = self.title = self.lower = self.upper = None
for i, (title, min_score) in enumerate(ranks):
if self.score >= min_score:
self.i = i
self.title = title
self.lower = min_score
else:
# update the existing rank's upper bound
self.upper = min_score
break
@property
def total(self):
return self.upper
@property
def remaining(self):
return self.upper - self.score
@property
def complete(self):
return self.score
@property
def remaining_ratio(self):
return self.remaining / self.total
@property
def complete_ratio(self):
return self.complete / self.total
def calc_coop_score(procedures):
"""
Calculate and return overall COOP prcedure score.
Args:
procedures - iterable of either tracker.models.Procedure objects or julia.node.ListValueNode nodes
"""
score = 0
if procedures:
for procedure in procedures:
try:
procedure.score
except AttributeError:
score += procedure['score'].value
else:
score += procedure.score
return score
def calc_accuracy(weapons, interested, min_ammo=None):
"""
Calculate average accuracy of a list (or any other iterable) of Weapon model instances.
Args:
weapons - Weapon instance iterable
interested - list of weapon ids accuracy should be counted against
min_ammo - min number of ammo required to calculate accuracy
"""
hits = 0
shots = 0
for weapon in weapons:
if weapon.name in interested:
hits += weapon.hits
shots += weapon.shots
return int(calc_ratio(hits, shots, min_divisor=min_ammo) * 100)
def calc_ratio(divident, divisor, min_divident=None, min_divisor=None):
"""
Return quotient result of true division operation for `divident` and `division`
If either of `min_divident`, `min_divisor` values is greater
than its corresponding test values, return zero.
"""
try:
assert(min_divident is None or divident >= min_divident)
assert(min_divisor is None or divisor >= min_divisor)
return divident/divisor
except (ValueError, TypeError, ZeroDivisionError, AssertionError):
return 0.0
def force_ipy(ip_address):
# no conversion is needed
if isinstance(ip_address, IP):
return ip_address
return IP(ip_address)
def force_timedelta(value):
"""
Pass `value` to the datetime.timedelta constructor
as number of seconds unless `value` is a timedelta instance itself
then return the instance.
"""
if isinstance(value, datetime.timedelta):
return value
return datetime.timedelta(seconds=int(value))
def force_clean_name(name):
"""Return a name free of SWAT text tags and leading/trailing whitespace."""
while True:
match = re.search(r'(\[[\\/]?[cub]\]|\[c=[^\[\]]*?\])', name, flags=re.I)
if not match:
break
name = name.replace(match.group(1), '')
return name.strip()
def force_valid_name(name, ip_address):
"""
Enforce name for given name, ip address pair.
If provided name is empty, return the 8 to 16 characters of the sha1 hash
derived from the numeric form of the provided IP address.
Otherwise return the provided name as is.
"""
if not name:
return ('_%s' %
hashlib.sha1(force_bytes(force_ipy(ip_address).int())).hexdigest()[8:16]
)
return name
def force_name(name, ip_address):
"""Return a non-empty tagless name."""
return force_valid_name(force_clean_name(name), ip_address)
def format_name(name):
name = html.escape(name)
# replace [c=xxxxxx] tags with html span tags
name = re.sub(
r'\[c=([a-f0-9]{6})\](.*?)(?=\[c=([a-f0-9]{6})\]|\[\\c\]|$)',
r'<span style="color:#\1;">\2</span>',
name,
flags=re.I
)
# remove [b], [\b], [u], [\u], [\c] tags
name = re.sub(r'\[(?:\\)?[buc]\]', '', name, flags=re.I)
return html.mark_safe(name)
def sort_key(*comparable):
def key(player):
stats = []
for prop in comparable:
sign = 1
if prop.startswith('-'):
sign = -1
prop = prop[1:]
stats.append(getattr(player, prop) * sign)
return stats
return key
def rank_dicts(dicts):
best = {}
for d in dicts:
for key, value in six.iteritems(d):
if key not in best or value > best[key]:
best[key] = value
return best
def escape_cache_key(key):
"""Remove anything other than letters, digits and a dot from a key."""
return re.sub(r'[^0-9a-z.]', '', force_text(key), flags=re.I)
def make_cache_key(*components):
"""
Produce a cache key from the function arguments.
Args:
*components
Example
foo:bar:ham:
"""
return '%s:' % ':'.join(map(force_text, components))
def today():
return timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
def tomorrow():
return today() + datetime.timedelta(days=1)
def Enum(*sequential, **named):
"""
Create an enumeration.
>>> Numbers = Enum('ZERO', 'ONE', 'TWO')
>>> Numbers.ZERO
0
>>> Numbers.ONE
1
Credits http://stackoverflow.com/a/1695250
"""
enums = dict(six.moves.zip(sequential, six.moves.range(len(sequential))), **named)
return type('Enum', (), enums)
|
|
from SimPEG import Mesh, Regularization, Maps, Utils, EM
from SimPEG.EM.Static import DC
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import copy
#import pandas as pd
#from scipy.sparse import csr_matrix, spdiags, dia_matrix,diags
#from scipy.sparse.linalg import spsolve
from scipy.stats import norm,multivariate_normal
import sys
path ="../pymatsolver/"
path = "../../../Documents/pymatsolver/"
sys.path.append(path)
from pymatsolver import PardisoSolver
#from scipy.interpolate import LinearNDInterpolator, interp1d
#from sklearn.mixture import GaussianMixture
from SimPEG import DataMisfit, Regularization, Optimization, InvProblem, Directives, Inversion
import SimPEG
import scipy.sparse as sp
import os
import glob
#Remove older results
files = glob.glob('./*.npz')
for f in files:
os.remove(f)
#2D model
csx, csy, csz = 0.25,0.25,0.25
# Number of core cells in each directiPon s
ncx, ncz = 123,41
# Number of padding cells to add in each direction
npad = 12
# Vectors of cell lengthts in each direction
hx = [(csx,npad, -1.5),(csx,ncx),(csx,npad, 1.5)]
hz= [(csz,npad,-1.5),(csz,ncz)]
# Create mesh
mesh = Mesh.TensorMesh([hx, hz],x0="CN")
# Map mesh coordinates from local to UTM coordiantes
#mesh.x0[2] = mesh.x0[2]-mesh.vectorCCz[-npad-1]
mesh.x0[1] = mesh.x0[1]+csz/2.
#mesh.x0[0] = mesh.x0[0]+csx/2.
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#plt.gca().set_xlim([-20,20])
#plt.gca().set_ylim([-15,0])
#mesh.plotGrid()
#plt.gca().set_aspect('equal')
#plt.show()
print "Mesh Size: ", mesh.nC
#Model Creation
lnsig_air = 1e-8;
x0,z0, r0 = -6., -4., 3.
x1,z1, r1 = 6., -4., 3.
ln_sigback = -5.
ln_sigc = -3.
ln_sigr = -7.
noisemean = 0.
noisevar = 0.0
overburden_extent = 0.
ln_over = -4.
#m = (lnsig_background)*np.ones(mesh.nC);
#mu =np.ones(mesh.nC);
mtrue = ln_sigback*np.ones(mesh.nC) + norm(noisemean,noisevar).rvs(mesh.nC)
overb = (mesh.gridCC[:,1] >-overburden_extent) & (mesh.gridCC[:,1]<=0)
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])+ norm(noisemean,noisevar).rvs(np.prod((mtrue[overb]).shape))
csph = (np.sqrt((mesh.gridCC[:,1]-z0)**2.+(mesh.gridCC[:,0]-x0)**2.))< r0
mtrue[csph] = ln_sigc*np.ones_like(mtrue[csph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[csph]).shape))
#Define the sphere limit
rsph = (np.sqrt((mesh.gridCC[:,1]-z1)**2.+(mesh.gridCC[:,0]-x1)**2.))< r1
mtrue[rsph] = ln_sigr*np.ones_like(mtrue[rsph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[rsph]).shape))
mtrue = Utils.mkvc(mtrue);
mesh.plotGrid()
plt.gca().set_xlim([-10,10])
plt.gca().set_ylim([-10,0])
xyzlim = np.r_[[[-10.,10.],[-10.,1.]]]
actind, meshCore = Utils.meshutils.ExtractCoreMesh(xyzlim,mesh)
plt.hist(mtrue[actind],bins =50,normed=True);
fig0 = plt.figure()
ax0 = fig0.add_subplot(111)
mm = meshCore.plotImage(mtrue[actind],ax = ax0)
plt.colorbar(mm[0])
ax0.set_aspect("equal")
#plt.show()
#Gradient array 1 2D
srclist = []
nSrc = 23
lines = 1
ylines = np.r_[0.]
xlines = np.r_[0.]
z = 0.
#xline
for k in range(lines):
for i in range(nSrc):
if i<=11:
locA = np.r_[-14.+1., z]
locB = np.r_[-8.+2.*i-1., z]
#M = np.c_[np.arange(-12.,-12+2*(i+1),2),np.ones(i+1)*z]
#N = np.c_[np.arange(-10.,-10+2*(i+1),2),np.ones(i+1)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
#print -locA,-locB,"\n",[-M,-N],"\n"
else:
locA = np.r_[-14.+2*(i-11)+1., z]
locB = np.r_[14.-1.,z]
#M = np.c_[np.arange(locA[0]+1.,12.,2),np.ones(nSrc-i)*z]
#N = np.c_[np.arange(locA[0]+3.,14.,2),np.ones(nSrc-i)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print "line2",locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
mapping = Maps.ExpMap(mesh)
survey = DC.Survey(srclist)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
problem.Solver = PardisoSolver
survey.dobs = survey.dpred(mtrue)
survey.std = 0.05*np.ones_like(survey.dobs)
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
print '# of data: ', survey.dobs.shape
class SimultaneousSrc(DC.Src.BaseSrc):
"""
Dipole source
"""
QW = None
Q = None
W = None
def __init__(self, rxList,Q,W, **kwargs):
SimPEG.Survey.BaseSrc.__init__(self, rxList, **kwargs)
def eval(self, prob):
return self.QW
class SimultaneousRx(DC.Rx.BaseRx):
"""
SimultaneousRx receiver
"""
def __init__(self, locs, rxType='phi', **kwargs):
# We may not need this ...
SimPEG.Survey.BaseRx.__init__(self, locs, rxType)
@property
def nD(self):
"""Number of data in the receiver."""
return self.locs.shape[0]
# Not sure why ...
# return int(self.locs[0].size / 2)
def getP(self, mesh, Gloc):
return self.locs
P = []
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
P = rx.getP(mesh,'CC')
#Update W Inversion
nsubSrc = 5
m0 = (-5.)*np.ones(mapping.nP);
miter = m0
n_its = 50
InnerIt = 3
dmisfitsub = []
dmisfitall = []
#beta schedule
beta = 1.
betalist = [beta]
coolingFactor = 2.
coolingRate = 3
W = np.random.randn(survey.nSrc,nsubSrc)
dmisAll = DataMisfit.l2_DataMisfit(survey)
dmisfitall.append(dmisAll.eval(m0)/survey.nD)
print "Starting Model Dmisfit compared to full dataset: ",dmisAll.eval(m0)/survey.nD
print "Check misfit with true model: ",dmisAll.eval(mtrue)/survey.nD
for it in range(n_its):
problem.unpair()
problem.pair(survey)
Q = problem.getRHS()
sub = problem.getRHS().dot(W)
rx_r = SimultaneousRx(locs=P)
srcList_r = []
for isrc in range(sub.shape[1]):
src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
srcList_r.append(src_r)
survey_r = DC.Survey(srcList_r)
problem.unpair()
problem.pair(survey_r)
d = survey_r.dpred(mtrue)
survey_r.dobs = d
survey_r.std = np.ones_like(d)*0.05
survey_r.eps = 1e-5*np.linalg.norm(survey_r.dobs)
print '# of data: ', survey_r.dobs.shape
regmesh = mesh;
dmis = DataMisfit.l2_DataMisfit(survey_r)
reg = Regularization.Tikhonov(regmesh)#,mapping = mapping)#,indActive=actind)
reg.mref = m0
opt = Optimization.InexactGaussNewton(maxIter=1,tolX=1e-6)
opt.remember('xc')
invProb = InvProblem.BaseInvProblem(dmis, reg, opt)
#beta = Directives.BetaEstimate_ByEig(beta0= 10.,beta0_ratio=1e0)
reg.alpha_s = 1e-6;
invProb.beta = beta
#betaSched = Directives.BetaSchedule(coolingFactor=5, coolingRate=2)
#sav0 = Directives.SaveEveryIteration()
#sav1 = Directives.SaveModelEveryIteration()
#sav2 = Directives.SaveOutputDictEveryIteration()
inv = Inversion.BaseInversion(invProb)#, directiveList=[sav2])#[beta,betaSched])#sav0,sav1,
msimple = inv.run(miter);
beta = invProb.beta
if np.mod(it+1,coolingRate) ==0:
beta = beta/coolingFactor
betalist.append(beta)
miter = copy.deepcopy(msimple)
dmisfitsub.append(dmis.eval(msimple)/survey_r.nD)
print "Dmisfit compared to sub dataset: ",dmis.eval(msimple)/survey_r.nD
print "Check misfit with true model: ",dmis.eval(mtrue)/survey_r.nD
problem.unpair()
problem.pair(survey)
dmisAll = DataMisfit.l2_DataMisfit(survey)
dmisfitall.append(dmisAll.eval(msimple)/survey.nD)
print "Dmisfit compared to full dataset: ",dmisAll.eval(msimple)/survey.nD
print "Check misfit with true model: ",dmisAll.eval(mtrue)/survey.nD
if np.mod(it+1,InnerIt) ==0:
W = np.random.randn(survey.nSrc,nsubSrc)
print 'update W'
#mm = mesh.plotImage(miter)
#plt.colorbar(mm[0])
#plt.gca().set_xlim([-10.,10.])
#plt.gca().set_ylim([-10.,0.])
np.save('./dmisfitsub.npy',dmisfitsub)
np.save('./dmisfitall.npy',dmisfitall)
np.save('./beta.npy',betalist)
np.save('./finalresult',msimple)
#plt.show()'
|
|
# Copyright 2014, Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from neutron.common import constants as n_const
from neutron.common import utils as n_utils
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
# A class to represent a DVR-hosted subnet including vif_ports resident on
# that subnet
class LocalDVRSubnetMapping(object):
def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
# set of commpute ports on on this dvr subnet
self.compute_ports = {}
self.subnet = subnet
self.csnat_ofport = csnat_ofport
self.dvr_owned = False
def __str__(self):
return ("subnet = %s compute_ports = %s csnat_port = %s"
" is_dvr_owned = %s" %
(self.subnet, self.get_compute_ofports(),
self.get_csnat_ofport(), self.is_dvr_owned()))
def get_subnet_info(self):
return self.subnet
def set_dvr_owned(self, owned):
self.dvr_owned = owned
def is_dvr_owned(self):
return self.dvr_owned
def add_compute_ofport(self, vif_id, ofport):
self.compute_ports[vif_id] = ofport
def remove_compute_ofport(self, vif_id):
self.compute_ports.pop(vif_id, 0)
def remove_all_compute_ofports(self):
self.compute_ports.clear()
def get_compute_ofports(self):
return self.compute_ports
def set_csnat_ofport(self, ofport):
self.csnat_ofport = ofport
def get_csnat_ofport(self):
return self.csnat_ofport
class OVSPort(object):
def __init__(self, id, ofport, mac, device_owner):
self.id = id
self.mac = mac
self.ofport = ofport
self.subnets = set()
self.device_owner = device_owner
def __str__(self):
return ("OVSPort: id = %s, ofport = %s, mac = %s, "
"device_owner = %s, subnets = %s" %
(self.id, self.ofport, self.mac,
self.device_owner, self.subnets))
def add_subnet(self, subnet_id):
self.subnets.add(subnet_id)
def remove_subnet(self, subnet_id):
self.subnets.remove(subnet_id)
def remove_all_subnets(self):
self.subnets.clear()
def get_subnets(self):
return self.subnets
def get_device_owner(self):
return self.device_owner
def get_mac(self):
return self.mac
def get_ofport(self):
return self.ofport
class OVSDVRNeutronAgent(object):
'''
Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
'''
# history
# 1.0 Initial version
def __init__(self, context, plugin_rpc, integ_br, tun_br,
bridge_mappings, phys_brs, int_ofports, phys_ofports,
patch_int_ofport=constants.OFPORT_INVALID,
patch_tun_ofport=constants.OFPORT_INVALID,
host=None, enable_tunneling=False,
enable_distributed_routing=False):
self.context = context
self.plugin_rpc = plugin_rpc
self.host = host
self.enable_tunneling = enable_tunneling
self.enable_distributed_routing = enable_distributed_routing
self.bridge_mappings = bridge_mappings
self.phys_brs = phys_brs
self.int_ofports = int_ofports
self.phys_ofports = phys_ofports
self.reset_ovs_parameters(integ_br, tun_br,
patch_int_ofport, patch_tun_ofport)
self.reset_dvr_parameters()
self.dvr_mac_address = None
if self.enable_distributed_routing:
self.get_dvr_mac_address()
self.conf = cfg.CONF
def setup_dvr_flows(self):
self.setup_dvr_flows_on_integ_br()
self.setup_dvr_flows_on_tun_br()
self.setup_dvr_flows_on_phys_br()
self.setup_dvr_mac_flows_on_all_brs()
def reset_ovs_parameters(self, integ_br, tun_br,
patch_int_ofport, patch_tun_ofport):
'''Reset the openvswitch parameters'''
self.int_br = integ_br
self.tun_br = tun_br
self.patch_int_ofport = patch_int_ofport
self.patch_tun_ofport = patch_tun_ofport
def reset_dvr_parameters(self):
'''Reset the DVR parameters'''
self.local_dvr_map = {}
self.local_csnat_map = {}
self.local_ports = {}
self.registered_dvr_macs = set()
def get_dvr_mac_address(self):
try:
self.get_dvr_mac_address_with_retry()
except oslo_messaging.RemoteError as e:
LOG.warning(_LW('L2 agent could not get DVR MAC address at '
'startup due to RPC error. It happens when the '
'server does not support this RPC API. Detailed '
'message: %s'), e)
except oslo_messaging.MessagingTimeout:
LOG.error(_LE('DVR: Failed to obtain a valid local '
'DVR MAC address - L2 Agent operating '
'in Non-DVR Mode'))
if not self.in_distributed_mode():
# switch all traffic using L2 learning
# REVISIT(yamamoto): why to install the same flow as
# setup_integration_br?
self.int_br.install_normal()
def get_dvr_mac_address_with_retry(self):
# Get the local DVR MAC Address from the Neutron Server.
# This is the first place where we contact the server on startup
# so retry in case it's not ready to respond
for retry_count in reversed(range(5)):
try:
details = self.plugin_rpc.get_dvr_mac_address_by_host(
self.context, self.host)
except oslo_messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('L2 agent could not get DVR MAC '
'address from server. Retrying. '
'Detailed message: %s'), e)
else:
LOG.debug("L2 Agent DVR: Received response for "
"get_dvr_mac_address_by_host() from "
"plugin: %r", details)
self.dvr_mac_address = details['mac_address']
return
def setup_dvr_flows_on_integ_br(self):
'''Setup up initial dvr flows into br-int'''
if not self.in_distributed_mode():
return
LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"),
self.dvr_mac_address)
# Remove existing flows in integration bridge
if self.conf.AGENT.drop_flows_on_start:
self.int_br.delete_flows()
# Add a canary flow to int_br to track OVS restarts
self.int_br.setup_canary_table()
# Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1)
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN,
priority=1)
# Insert 'normal' action as the default for Table LOCAL_SWITCHING
self.int_br.install_normal(table_id=constants.LOCAL_SWITCHING,
priority=1)
for physical_network in self.bridge_mappings:
self.int_br.install_drop(table_id=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.int_ofports[
physical_network])
def setup_dvr_flows_on_tun_br(self):
'''Setup up initial dvr flows into br-tun'''
if not self.enable_tunneling or not self.in_distributed_mode():
return
self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS,
priority=1,
in_port=self.patch_int_ofport)
# table-miss should be sent to learning table
self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN,
dest_table_id=constants.LEARN_FROM_TUN)
self.tun_br.install_goto(table_id=constants.DVR_PROCESS,
dest_table_id=constants.PATCH_LV_TO_TUN)
def setup_dvr_flows_on_phys_br(self):
'''Setup up initial dvr flows into br-phys'''
if not self.in_distributed_mode():
return
for physical_network in self.bridge_mappings:
self.phys_brs[physical_network].install_goto(
in_port=self.phys_ofports[physical_network],
priority=2,
dest_table_id=constants.DVR_PROCESS_VLAN)
self.phys_brs[physical_network].install_goto(
priority=1,
dest_table_id=constants.DVR_NOT_LEARN_VLAN)
self.phys_brs[physical_network].install_goto(
table_id=constants.DVR_PROCESS_VLAN,
priority=0,
dest_table_id=constants.LOCAL_VLAN_TRANSLATION)
self.phys_brs[physical_network].install_drop(
table_id=constants.LOCAL_VLAN_TRANSLATION,
in_port=self.phys_ofports[physical_network],
priority=2)
self.phys_brs[physical_network].install_normal(
table_id=constants.DVR_NOT_LEARN_VLAN,
priority=1)
def _add_dvr_mac_for_phys_br(self, physical_network, mac):
self.int_br.add_dvr_mac_vlan(mac=mac,
port=self.int_ofports[physical_network])
phys_br = self.phys_brs[physical_network]
phys_br.add_dvr_mac_vlan(mac=mac,
port=self.phys_ofports[physical_network])
def _remove_dvr_mac_for_phys_br(self, physical_network, mac):
# REVISIT(yamamoto): match in_port as well?
self.int_br.remove_dvr_mac_vlan(mac=mac)
phys_br = self.phys_brs[physical_network]
# REVISIT(yamamoto): match in_port as well?
phys_br.remove_dvr_mac_vlan(mac=mac)
def _add_dvr_mac_for_tun_br(self, mac):
self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport)
def _remove_dvr_mac_for_tun_br(self, mac):
self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
# REVISIT(yamamoto): match in_port as well?
self.tun_br.remove_dvr_mac_tun(mac=mac)
def _add_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._add_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._add_dvr_mac_for_tun_br(mac)
LOG.debug("Added DVR MAC flow for %s", mac)
self.registered_dvr_macs.add(mac)
def _remove_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._remove_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._remove_dvr_mac_for_tun_br(mac)
LOG.debug("Removed DVR MAC flow for %s", mac)
self.registered_dvr_macs.remove(mac)
def setup_dvr_mac_flows_on_all_brs(self):
if not self.in_distributed_mode():
LOG.debug("Not in distributed mode, ignoring invocation "
"of get_dvr_mac_address_list() ")
return
dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
for mac in dvr_macs:
if mac['mac_address'] == self.dvr_mac_address:
continue
self._add_dvr_mac(mac['mac_address'])
def dvr_mac_address_update(self, dvr_macs):
if not self.dvr_mac_address:
LOG.debug("Self mac unknown, ignoring this "
"dvr_mac_address_update() ")
return
dvr_host_macs = set()
for entry in dvr_macs:
if entry['mac_address'] == self.dvr_mac_address:
continue
dvr_host_macs.add(entry['mac_address'])
if dvr_host_macs == self.registered_dvr_macs:
LOG.debug("DVR Mac address already up to date")
return
dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
for oldmac in dvr_macs_removed:
self._remove_dvr_mac(oldmac)
for newmac in dvr_macs_added:
self._add_dvr_mac(newmac)
def in_distributed_mode(self):
return self.dvr_mac_address is not None
def is_dvr_router_interface(self, device_owner):
return device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE
def process_tunneled_network(self, network_type, lvid, segmentation_id):
self.tun_br.provision_local_vlan(
network_type=network_type,
lvid=lvid,
segmentation_id=segmentation_id,
distributed=self.in_distributed_mode())
def _bind_distributed_router_interface_port(self, port, lvm,
fixed_ips, device_owner):
# since distributed router port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
subnet_uuid = fixed_ip['subnet_id']
csnat_ofport = constants.OFPORT_INVALID
ldm = None
if subnet_uuid in self.local_dvr_map:
ldm = self.local_dvr_map[subnet_uuid]
csnat_ofport = ldm.get_csnat_ofport()
if csnat_ofport == constants.OFPORT_INVALID:
LOG.error(_LE("DVR: Duplicate DVR router interface detected "
"for subnet %s"), subnet_uuid)
return
else:
# set up LocalDVRSubnetMapping available for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(
self.context, subnet_uuid, fixed_ips=fixed_ips)
if not subnet_info:
LOG.error(_LE("DVR: Unable to retrieve subnet information "
"for subnet_id %s"), subnet_uuid)
return
LOG.debug("get_subnet_for_dvr for subnet %(uuid)s "
"returned with %(info)s",
{"uuid": subnet_uuid, "info": subnet_info})
ldm = LocalDVRSubnetMapping(subnet_info)
self.local_dvr_map[subnet_uuid] = ldm
# DVR takes over
ldm.set_dvr_owned(True)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
local_compute_ports = (
self.plugin_rpc.get_ports_on_host_by_subnet(
self.context, self.host, subnet_uuid))
LOG.debug("DVR: List of ports received from "
"get_ports_on_host_by_subnet %s",
local_compute_ports)
vif_by_id = self.int_br.get_vifs_by_ids(
[prt['id'] for prt in local_compute_ports])
for prt in local_compute_ports:
vif = vif_by_id.get(prt['id'])
if not vif:
continue
ldm.add_compute_ofport(vif.vif_id, vif.ofport)
if vif.vif_id in self.local_ports:
# ensure if a compute port is already on
# a different dvr routed subnet
# if yes, queue this subnet to that port
comp_ovsport = self.local_ports[vif.vif_id]
comp_ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its on
# a dvr routed subnet queue this subnet to that port
comp_ovsport = OVSPort(vif.vif_id, vif.ofport,
vif.vif_mac, prt['device_owner'])
comp_ovsport.add_subnet(subnet_uuid)
self.local_ports[vif.vif_id] = comp_ovsport
# create rule for just this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=comp_ovsport.get_mac(),
dst_port=comp_ovsport.get_ofport())
if lvm.network_type == p_const.TYPE_VLAN:
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
br = self.phys_brs[lvm.physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
if ip_version == 4:
br.install_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.install_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
br.install_dvr_process(
vlan_tag=lvm.vlan, vif_mac=port.vif_mac,
dvr_mac_address=self.dvr_mac_address)
# the dvr router interface is itself a port, so capture it
# queue this subnet to that port. A subnet appears only once as
# a router interface on any given router
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
def _bind_port_on_dvr_subnet(self, port, lvm, fixed_ips,
device_owner):
# Handle new compute port added use-case
subnet_uuid = None
for ips in fixed_ips:
if ips['subnet_id'] not in self.local_dvr_map:
continue
subnet_uuid = ips['subnet_id']
ldm = self.local_dvr_map[subnet_uuid]
if not ldm.is_dvr_owned():
# well this is CSNAT stuff, let dvr come in
# and do plumbing for this vm later
continue
# This confirms that this compute port belongs
# to a dvr hosted subnet.
# Accommodate this VM Port into the existing rule in
# the integration bridge
LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
subnet_info = ldm.get_subnet_info()
ldm.add_compute_ofport(port.vif_id, port.ofport)
if port.vif_id in self.local_ports:
# ensure if a compute port is already on a different
# dvr routed subnet
# if yes, queue this subnet to that port
ovsport = self.local_ports[port.vif_id]
ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its
# on a dvr routed subnet, queue this subnet to that port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# create a rule for this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm,
fixed_ips, device_owner):
# since centralized-SNAT (CSNAT) port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
if port.vif_id in self.local_ports:
# throw an error if CSNAT port is already on a different
# dvr routed subnet
ovsport = self.local_ports[port.vif_id]
subs = list(ovsport.get_subnets())
if subs[0] == fixed_ip['subnet_id']:
return
LOG.error(_LE("Centralized-SNAT port %(port)s on subnet "
"%(port_subnet)s already seen on a different "
"subnet %(orig_subnet)s"), {
"port": port.vif_id,
"port_subnet": fixed_ip['subnet_id'],
"orig_subnet": subs[0],
})
return
subnet_uuid = fixed_ip['subnet_id']
ldm = None
subnet_info = None
if subnet_uuid not in self.local_dvr_map:
# no csnat ports seen on this subnet - create csnat state
# for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(
self.context, subnet_uuid, fixed_ips=fixed_ips)
ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
self.local_dvr_map[subnet_uuid] = ldm
else:
ldm = self.local_dvr_map[subnet_uuid]
subnet_info = ldm.get_subnet_info()
# Store csnat OF Port in the existing DVRSubnetMap
ldm.set_csnat_ofport(port.ofport)
# create ovsPort footprint for csnat port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def bind_port_to_dvr(self, port, local_vlan_map,
fixed_ips, device_owner):
if not self.in_distributed_mode():
return
if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES
+ [p_const.TYPE_VLAN]):
LOG.debug("DVR: Port %s is with network_type %s not supported"
" for dvr plumbing" % (port.vif_id,
local_vlan_map.network_type))
return
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._bind_distributed_router_interface_port(port,
local_vlan_map,
fixed_ips,
device_owner)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._bind_port_on_dvr_subnet(port, local_vlan_map,
fixed_ips,
device_owner)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._bind_centralized_snat_port_on_dvr_subnet(port,
local_vlan_map,
fixed_ips,
device_owner)
def _unbind_distributed_router_interface_port(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# removal of distributed router interface
subnet_ids = ovsport.get_subnets()
subnet_set = set(subnet_ids)
network_type = lvm.network_type
physical_network = lvm.physical_network
vlan_to_use = lvm.vlan
if network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# ensure we process for all the subnets laid on this removed port
for sub_uuid in subnet_set:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
# DVR is no more owner
ldm.set_dvr_owned(False)
# remove all vm rules for this dvr subnet
# clear of compute_ports altogether
compute_ports = ldm.get_compute_ofports()
for vif_id in compute_ports:
comp_port = self.local_ports[vif_id]
self.int_br.delete_dvr_to_src_mac(
network_type=network_type,
vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac())
ldm.remove_all_compute_ofports()
if ldm.get_csnat_ofport() == constants.OFPORT_INVALID:
# if there is no csnat port for this subnet, remove
# this subnet from local_dvr_map, as no dvr (or) csnat
# ports available on this agent anymore
self.local_dvr_map.pop(sub_uuid, None)
if network_type == p_const.TYPE_VLAN:
br = self.phys_brs[physical_network]
if network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
if ip_version == 4:
br.delete_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.delete_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
ovsport.remove_subnet(sub_uuid)
if lvm.network_type == p_const.TYPE_VLAN:
br = self.phys_brs[physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
br.delete_dvr_process(vlan_tag=lvm.vlan, vif_mac=port.vif_mac)
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for compute port %s", port)
subnet_ids = ovsport.get_subnets()
# ensure we process for all the subnets laid on this port
for sub_uuid in subnet_ids:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
ldm.remove_compute_ofport(port.vif_id)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# first remove this vm port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_centralized_snat_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for csnat port %s", port)
sub_uuid = list(ovsport.get_subnets())[0]
# ensure we process for all the subnets laid on this port
if sub_uuid not in self.local_dvr_map:
return
ldm = self.local_dvr_map[sub_uuid]
ldm.set_csnat_ofport(constants.OFPORT_INVALID)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# then remove csnat port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
if not ldm.is_dvr_owned():
# if not owned by DVR (only used for csnat), remove this
# subnet state altogether
self.local_dvr_map.pop(sub_uuid, None)
# release port state
self.local_ports.pop(port.vif_id, None)
def unbind_port_from_dvr(self, vif_port, local_vlan_map):
if not self.in_distributed_mode():
return
# Handle port removed use-case
if vif_port and vif_port.vif_id not in self.local_ports:
LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
return
ovsport = self.local_ports[vif_port.vif_id]
device_owner = ovsport.get_device_owner()
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._unbind_distributed_router_interface_port(vif_port,
local_vlan_map)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._unbind_port_on_dvr_subnet(vif_port,
local_vlan_map)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
local_vlan_map)
|
|
__author__ = 'USER'
import random
from common import math_util
class NeuralNetwork:
@property
def output_matrix(self):
return self._output_matrix
@output_matrix.setter
def output_matrix(self, __output_matrix):
self._output_matrix = __output_matrix
@output_matrix.deleter
def output_matrix(self):
del self._output_matrix
@property
def delta_matrix(self):
return self._delta_matrix
@delta_matrix.setter
def delta_matrix(self, _delta_matrix):
self._delta_matrix = _delta_matrix
@delta_matrix.deleter
def delta_matrix(self):
del self._delta_matrix
@property
def weight_matrix(self):
return self._weight_matrix
@weight_matrix.setter
def weight_matrix(self, _weight_matrix):
self._weight_matrix = _weight_matrix
@weight_matrix.deleter
def weight_matrix(self):
del self._weight_matrix
@property
def number_of_layers(self):
return self._number_of_layers
@number_of_layers.setter
def number_of_layers(self, _number_of_layers):
self._number_of_layers = _number_of_layers
@number_of_layers.deleter
def number_of_layers(self):
del self._number_of_layers
@property
def size_of_layers(self):
return self._size_of_layers
@size_of_layers.setter
def size_of_layers(self, _size_of_layers):
self._size_of_layers = _size_of_layers
@size_of_layers.deleter
def size_of_layers(self):
del self._size_of_layers
@property
def learning_rate(self):
return self._learning_rate
@learning_rate.setter
def learning_rate(self, _learning_rate):
self._learning_rate = _learning_rate
@learning_rate.deleter
def learning_rate(self):
del self._learning_rate
@property
def momentum(self):
return self._momentum
@momentum.setter
def momentum(self, _momentum):
self._momentum = _momentum
@momentum.deleter
def momentum(self):
del self._momentum
@property
def prev_delta_weight(self):
return self._prev_delta_weight
@prev_delta_weight.setter
def prev_delta_weight(self, _prev_delta_weight):
self._prev_delta_weight = _prev_delta_weight
@prev_delta_weight.deleter
def prev_delta_weight(self):
del self._prev_delta_weight
def __init__(self, _number_of_layers, _size_array, _learning_rate=0.3, _momentum=0.1):
"""
Initialize the network
:param _number_of_layers: The number of layers of the network.
:param _size_array: The array includes each sizes of layers.
:param _learning_rate: The learning rate of the network. Default value is 0.3
:param _momentum: The momentum parameter of the network. Default value is 0.1
:return:
"""
self._number_of_layers = _number_of_layers
self._learning_rate = _learning_rate
self._momentum = _momentum
self._delta_matrix = []
self._weight_matrix = []
self._prev_delta_weight = []
self._size_of_layers = _size_array
# self.size_of_layers = _size_array
# Initially assign the memory space needed. output_matrix, delta_matrix => 2-dimensional list space. weight_matrix, prev_delta_weight => 3-dimensional list space.
self.output_matrix = [[0 for j in range(_size_array[i])] for i in range(0, _number_of_layers)]
self.delta_matrix = [[0 for j in range(_size_array[i])] for i in range(1, _number_of_layers)]
self.weight_matrix = [ [[0 for k in range(_size_array[i-1]+1)] for j in range(_size_array[i])] for i in range(1, _number_of_layers) ]
self.prev_delta_weight = [[[0 for k in range(_size_array[i-1]+1)] for j in range(_size_array[i])] for i in range(1, _number_of_layers)]
link_vacancy = None
self.delta_matrix = [link_vacancy] + self.delta_matrix
self.weight_matrix = [link_vacancy] + self.weight_matrix
self.prev_delta_weight = [link_vacancy] + self.prev_delta_weight
for i in range(1, _number_of_layers):
for j in range(0, _size_array[i]):
for k in range(0, _size_array[i-1]+1):
self.weight_matrix[i][j][k] = random.random()
self.prev_delta_weight[i][j][k] = 0.0
def feedforward(self, input_list):
"""
Feed forward activations for one set of inputs.
:param input_list: Input data for network.
:return: Void
"""
# Elements in 0'th output matrix are input data.
for i in range(0, len(input_list)):
self.output_matrix[0][i] = input_list[i]
# Apply activation value to each neuron using sigmoid function.
for i in range(1, self.number_of_layers):
for j in range(0, self.size_of_layers[i]):
multiple_sum = 0
for k in range(0, self.size_of_layers[i-1]):
multiple_sum += self.output_matrix[i-1][k] * self.weight_matrix[i][j][k] # Apply weight to inputs and add to sum.
multiple_sum += self.weight_matrix[i][j][self.size_of_layers[i-1]] # Apply bias.
self.output_matrix[i][j] = math_util.sigmoid(multiple_sum) # Apply sigmoid function.
def backpropagate(self, input_list, target_list):
"""
Back propagate error for one set of input.
:param input_list: Input data for network.
:param target_list: Void
:return:
"""
self.feedforward(input_list) # Update output value for each neuron.
# Find delta for output layer.
for i in range(0, self.size_of_layers[self.number_of_layers-1]):
self.delta_matrix[self.number_of_layers-1][i] = self.output_matrix[self.number_of_layers-1][i] * (1-self.output_matrix[self.number_of_layers-1][i]) * (target_list[i] - self.output_matrix[self.number_of_layers-1][i])
# Find delta for hidden layer.
for i in range(self.number_of_layers-2, 0, -1):
for j in range(0, self.size_of_layers[i]):
multiple_sum = 0
for k in range(0, self.size_of_layers[i+1]):
multiple_sum += self.delta_matrix[i+1][k] * self.weight_matrix[i+1][k][j]
self.delta_matrix[i][j] = self.output_matrix[i][j] * (1-self.output_matrix[i][j]) * multiple_sum
# Iteration for weight matrix.
for i in range(1, self.number_of_layers):
for j in range(0, self.size_of_layers[i]):
for k in range(0, self.size_of_layers[i-1]):
# Apply momentum.
self.weight_matrix[i][j][k] += self.momentum * self.prev_delta_weight[i][j][k]
# Apply momentum of bias to weight matrix.
self.weight_matrix[i][j][self.size_of_layers[i-1]] += self.momentum * self.prev_delta_weight[i][j][self.size_of_layers[i-1]]
# Iteration for previous delta.
for i in range(1, self.number_of_layers):
for j in range(0, self.size_of_layers[i]):
for k in range(0, self.size_of_layers[i-1]):
# Apply weights using steepest descent.
self.prev_delta_weight[i][j][k] = self.learning_rate * self.delta_matrix[i][j] * self.output_matrix[i-1][k]
self.weight_matrix[i][j][k] += self.prev_delta_weight[i][j][k]
# Apply learning rate
self.prev_delta_weight[i][j][self.size_of_layers[i-1]] = self.learning_rate * self.delta_matrix[i][j]
self.weight_matrix[i][j][self.size_of_layers[i-1]] += self.prev_delta_weight[i][j][self.size_of_layers[i-1]]
def mean_squared_error(self, target_list):
"""
Mean squared error for the network.
:param target_list: Target list to compare.
:return: Mean squared error.
"""
mse = 0
for i in range(0, self.size_of_layers[self.number_of_layers-1]):
mse += (target_list[i]-self.output(i)) * (target_list[i] - self.output(i))
return mse / 2
def output(self, index):
"""
Return values of the output layer.
:param index: Specific index of output value.
:return: Value of the output.
"""
return self.output_matrix[self.number_of_layers-1][index]
pass
|
|
from tornado import auth, web, httpclient
from StringIO import StringIO
from operator import itemgetter
import os
import logging
import mimetypes
import zipfile
import yaml
import json
import time
from main.helper import *
from main.db import *
class TestBug(myRequestHandler, Entity):
def get(self):
1 / 0
class ShowGroup(myRequestHandler, Entity):
"""
"""
@web.removeslash
@web.authenticated
def get(self, entity_definition_keyname=None):
"""
Show entities page with menu.
"""
entity_definition_keyname = entity_definition_keyname.strip('/').split('/')[0]
entity_definition = None
quota_entities_used = 0
quota_size_used = 0
add_definitions = {}
if entity_definition_keyname:
entity_definition = self.get_entity_definition(entity_definition_keyname=entity_definition_keyname)
for ad in self.get_definitions_with_optional_parent(entity_definition_keyname):
add_definitions.setdefault(ad.get('related_entity_label'), []).append(ad)
else:
quota_entities_used = self.db_get('SELECT COUNT(*) AS entities FROM entity WHERE is_deleted = 0;').get('entities', 0)
quota_size_used = self.db_get('SELECT SUM(filesize) AS size FROM file;').get('size', 0)
self.render('entity/template/start.html',
page_title = entity_definition[0]['label_plural'] if entity_definition else '',
menu = self.get_menu(),
show_list = True if entity_definition_keyname else False,
entity_definition_label = entity_definition[0]['label_plural'] if entity_definition else '',
entity_definition_keyname = entity_definition_keyname,
add_definitions = add_definitions,
quota_entities_used = int(quota_entities_used),
quota_size = float(self.app_settings('quota-data', 0))*1000000000.0,
quota_size_human = GetHumanReadableBytes(float(self.app_settings('quota-data', 0))*1000000000.0, 1),
quota_size_used = int(quota_size_used) if quota_size_used else 0,
quota_size_used_human = GetHumanReadableBytes(quota_size_used, 1) if quota_size_used else '0B'
)
class ShowTableView(myRequestHandler, Entity):
@web.authenticated
def post(self, entity_definition_keyname=None):
search = self.get_argument('q', None, True)
limit = self.app_settings('tablepagesize', 101)
entities = self.get_entities(search=search, entity_definition_keyname=entity_definition_keyname, full_definition=True, limit=limit)
self.render('entity/template/table.html',
entities = entities,
)
class GetEntities(myRequestHandler, Entity):
"""
"""
@web.authenticated
def get(self):
"""
"""
search = self.get_argument('q', None, True)
entity_definition_keynames = StrToList(self.get_argument('definition', '', True))
exclude_entity_id = self.get_argument('exclude_entity', '0', True)
if not search:
return self.missing()
result = []
for e in self.get_entities(search=search, entity_definition_keyname=entity_definition_keynames, limit=303):
if exclude_entity_id:
if e['id'] in [int(x) for x in exclude_entity_id.split(',')]:
continue
result.append({
'id': e['id'],
'title': e['displayname'],
'info': e['displayinfo'],
'image': e['displaypicture'],
'definition': e['label']
})
self.write({'entities': result})
class GetUsers(myRequestHandler, Entity):
"""
To return list of entities that have
'entu-user' or 'entu-api-key' property.
"""
@web.authenticated
def get(self):
"""
"""
search = self.get_argument('q', None, True)
exclude_entity_id = self.get_argument('exclude_entity', '0', True)
if not search:
return self.missing()
result = []
for e in self.get_users(search=search):
if exclude_entity_id:
if e['id'] in [int(x) for x in exclude_entity_id.split(',')]:
continue
result.append({
'id': e['id'],
'title': e['displayname'],
'info': e['displayinfo'],
'image': e['displaypicture'],
'definition': e['label']
})
self.write({'entities': result})
class ShowEntity(myRequestHandler, Entity):
@web.authenticated
def get(self, entity_id=None, url=None):
"""
Shows Entitiy info.
"""
if not entity_id:
return self.missing()
item = self.get_entities(entity_id=entity_id, limit=1)
if not item:
return self.missing()
if self.request.headers.get('X-Requested-With', '').lower() != 'xmlhttprequest':
self.redirect('/entity/%s/%s' % (item.get('definition_keyname'), entity_id))
parents = self.get_relatives(related_entity_id=item['id'], relationship_definition_keyname='child', reverse_relation=True)
allowed_childs = self.get_allowed_childs(entity_id=item['id'])
allowed_parents = self.get_allowed_parents(entity_id=item['id'])
add_definitions = {}
for ad in self.get_definitions_with_optional_parent(item.get('definition_keyname')):
add_definitions.setdefault(ad.get('related_entity_label'), []).append(ad)
add_relations = {}
for ar in self.get_definitions_with_optional_relative(item.get('definition_keyname')):
add_relations.setdefault(ar.get('related_entity_label'), []).append(ar)
self.render('entity/template/item.html',
page_title = item['displayname'],
entity = item,
parents = parents.values() if parents else [],
allowed_childs = allowed_childs,
allowed_parents = allowed_parents,
add_definitions = add_definitions,
add_relations = add_relations,
public_path = self.get_public_path(entity_id),
)
class ShowEntityEdit(myRequestHandler, Entity):
@web.authenticated
def get(self, entity_id=None):
"""
Shows Entitiy edit form.
"""
item = self.get_entities(entity_id=entity_id, limit=1, full_definition=True)
if not item:
return
try:
AWS_BUCKET = self.app_settings('auth-s3', '\n', True).split('\n')[0]
AWS_ACCESS_KEY = self.app_settings('auth-s3', '\n', True).split('\n')[1]
AWS_SECRET_KEY = self.app_settings('auth-s3', '\n', True).split('\n')[2]
s3upload = True
except Exception, e:
s3upload = False
self.render('entity/template/edit.html',
entity = item,
parent_entity_id = '',
entity_definition_keyname = '',
actions = ['default'],
open_after_add = False,
s3upload = s3upload
)
class ShowEntityAdd(myRequestHandler, Entity):
@web.authenticated
def get(self, entity_id=None, entity_definition_keyname=None):
"""
Shows Entitiy adding form.
"""
item = self.get_entities(entity_id=0, entity_definition_keyname=entity_definition_keyname, limit=1, full_definition=True)
if not item:
return
entity_definition = self.get_entity_definition(entity_definition_keyname=entity_definition_keyname)
actions = StrToList(entity_definition[0].get('actions_add'))
if 'default' not in actions and '-default' not in actions:
actions.append('default')
if '-default' in actions:
actions.remove('-default')
try:
AWS_BUCKET = self.app_settings('auth-s3', '\n', True).split('\n')[0]
AWS_ACCESS_KEY = self.app_settings('auth-s3', '\n', True).split('\n')[1]
AWS_SECRET_KEY = self.app_settings('auth-s3', '\n', True).split('\n')[2]
s3upload = True
except Exception, e:
s3upload = False
self.render('entity/template/edit.html',
entity = item,
parent_entity_id = entity_id,
entity_definition_keyname = entity_definition_keyname,
actions = actions,
open_after_add = True if entity_definition[0].get('open_after_add', 0) == 1 else False,
s3upload = s3upload
)
class ShowEntityRelate(myRequestHandler, Entity):
@web.authenticated
def get(self, entity_id=None):
"""
Shows Entitiy relate form.
"""
item = self.get_entities(entity_id=entity_id, limit=1, full_definition=True)
if not item:
return
self.render('entity/template/edit.html',
entity = item,
parent_entity_id = '',
entity_definition_keyname = '',
)
class SaveEntity(myRequestHandler, Entity):
entity_id = None
new_property_id = None
property_definition_keyname = None
is_file = False
value = None
external_files = {}
@web.authenticated
@web.asynchronous
def post(self):
"""
Saves Entitiy info.
"""
if self.get_argument('is_file', default='false', strip=True).lower() == 'true':
self.is_file = True
self.value = self.request.files.get('value', []) if self.request.files.get('value', None) else None
else:
self.is_file = False
self.value = self.get_argument('value', default=None, strip=True)
self.entity_id = self.get_argument('entity_id', default=None, strip=True)
self.new_property_id = self.get_argument('value_id', default=None, strip=True)
self.property_definition_keyname = self.get_argument('property_definition_keyname', default=None, strip=True)
parent_entity_id = self.get_argument('parent_entity_id', default=None, strip=True)
entity_definition_keyname = self.get_argument('entity_definition_keyname', default=None, strip=True)
property_id = self.get_argument('value_id', default=None, strip=True)
is_counter = self.get_argument('counter', default='false', strip=True)
is_public = self.get_argument('is_public', default='false', strip=True)
self.external_files = json.loads(self.get_argument('external_files', None)) if self.get_argument('external_files', None) else None
external_download = True if self.get_argument('external_download', default='false', strip=True).lower() == 'true' else False
if not self.entity_id and parent_entity_id and entity_definition_keyname:
self.entity_id = self.create_entity(entity_definition_keyname=entity_definition_keyname, parent_entity_id=parent_entity_id)
if is_counter.lower() == 'true':
self.value = self.set_counter(entity_id=self.entity_id)
elif is_public.lower() == 'true':
self.value = True if self.value.lower() == 'true' else False
self.value = self.set_public(entity_id=self.entity_id, is_public=self.value)
elif self.external_files:
self.value = []
for link, filename in self.external_files.iteritems():
self.value.append(filename)
if external_download:
httpclient.AsyncHTTPClient().fetch(link, method = 'GET', request_timeout = 3600, callback=self._got_external_file)
else:
self.new_property_id = self.set_property(entity_id=self.entity_id, property_definition_keyname=self.property_definition_keyname, value={'filename': filename, 'url': link})
if external_download:
return
else:
if type(self.value) is not list:
self.value = [self.value]
for v in self.value:
self.new_property_id = self.set_property(entity_id=self.entity_id, property_definition_keyname=self.property_definition_keyname, value=v, old_property_id=property_id)
if self.is_file:
self.value = [x['filename'] for x in self.value]
self._printout()
@web.asynchronous
def _got_external_file(self, response):
filename = self.external_files[response.request.url]
self.new_property_id = self.set_property(entity_id=self.entity_id, property_definition_keyname=self.property_definition_keyname, value={'filename': filename, 'body': response.body})
del self.external_files[response.request.url]
if not self.external_files:
self._printout()
@web.asynchronous
def _printout(self):
self.write(json.dumps({
'entity_id': self.entity_id,
'property_definition_keyname': self.property_definition_keyname,
'value_id': self.new_property_id,
'value': self.value if not self.is_file else None,
'files': self.value if self.is_file else None,
}))
self.finish()
class DeleteFile(myRequestHandler, Entity):
@web.authenticated
def post(self, file_id=None):
"""
Delete file.
Mandatory arguments:
- property_id
- entity_id
Find entity by id and change file property (by id) to None.
"""
property_id = self.get_argument('property_id', None, True)
entity_id = self.get_argument('entity_id', None, True)
item = self.get_entities(entity_id=entity_id, limit=1)
if not item:
return self.missing()
self.set_property(entity_id=entity_id, old_property_id=property_id)
self.write({'response': 'OK'})
class DeleteEntity(myRequestHandler, Entity):
@web.authenticated
def post(self, id=None):
"""
Delete whole entity.
Also recursively delete its childs
Mandatory arguments:
- entity_id
1. Find childs by parent entity id and call DeleteEntity on them
2. Mark entity's deleted property to current time and deleted_by to current user's id.
"""
entity_id = self.get_argument('entity_id', None, True)
item = self.get_entities(entity_id=entity_id, limit=1)
if not item:
return self.missing()
self.delete_entity(entity_id)
self.write({'response': 'OK'})
class ShareByEmail(myRequestHandler, Entity):
@web.authenticated
def get(self, entity_id=None):
"""
Shows Entitiy share by email form.
"""
self.render('entity/template/email.html',
entity_id = entity_id,
email = self.get_argument('email', '')
)
class EntityRights(myRequestHandler, Entity):
@web.authenticated
def get(self, entity_id=None):
"""
Shows Entitiy rights form.
"""
rights = []
for right, entities in self.get_rights(entity_id=entity_id).iteritems():
for e in entities:
rights.append({
'right': right,
'id': e.get('id'),
'name': e.get('displayname'),
})
entity = self.get_entities(entity_id=entity_id, limit=1)
rights.append({
'right': 'viewer',
'id': None,
'name': 'XXXX',
})
self.render('entity/template/rights.html',
entity_id = entity_id,
sharing = entity.get('sharing'),
sharing_link = '%s://%s/shared/%s/%s' % (self.request.protocol, self.request.host, entity_id, entity.get('sharing_key')) if entity.get('sharing_key') else None,
rights = sorted(rights, key=itemgetter('name')),
)
@web.authenticated
def post(self, entity_id=None):
sharing = self.get_argument('sharing', None)
related_entity_id = self.get_argument('person', None)
right = self.get_argument('right', None)
if entity_id and sharing:
self.set_sharing(entity_id=entity_id, sharing=sharing)
elif entity_id and self.get_argument('generate_link', None):
sharing_key = self.set_sharing_key(entity_id=entity_id, generate=True)
self.write('%s://%s/shared/%s/%s' % (self.request.protocol, self.request.host, entity_id, sharing_key))
elif entity_id and self.get_argument('delete_link', None):
self.set_sharing_key(entity_id=entity_id, generate=False)
self.write('OK')
elif entity_id and related_entity_id:
self.set_rights(entity_id=entity_id, related_entity_id=related_entity_id, right=right)
class EntityParents(myRequestHandler, Entity):
@web.authenticated
def get(self, entity_id=None):
"""
Shows Entitiy rights form.
"""
parents = self.get_relatives(related_entity_id=entity_id, relationship_definition_keyname='child', reverse_relation=True)
allowed_parents = self.get_allowed_parents(entity_id=entity_id)
self.render('entity/template/parents.html',
entity_id = entity_id,
parents = parents.values() if parents else None,
allowed_parents = allowed_parents,
)
@web.authenticated
def post(self, entity_id=None):
parent = self.get_argument('parent', None)
delete = True if self.get_argument('delete', 'false', True).lower() == 'true' else False
if not entity_id or not parent:
return
self.set_parent(entity_id=entity_id, parent=parent, delete=delete)
self.write('OK')
class EntityDuplicate(myRequestHandler, Entity):
@web.authenticated
def get(self, entity_id=None):
"""
Shows Entitiy duplication form.
"""
entity = self.get_entities(entity_id=entity_id, limit=1)
self.render('entity/template/duplicate.html',
entity = entity,
)
@web.authenticated
def post(self, entity_id=None):
copies = self.get_argument('count', None, True)
skip_property_definition_keyname = self.get_arguments('properties[]', True)
if not entity_id or not copies:
return
self.duplicate_entity(entity_id=entity_id, copies=copies, skip_property_definition_keyname=skip_property_definition_keyname)
self.write('OK')
class ShowHTMLproperty(myRequestHandler, Entity):
@web.authenticated
def get(self, entity_id, dataproperty):
"""
Shows HTML property in modal box
"""
item = self.get_entities(entity_id=entity_id, limit=1, full_definition=False)
if not item:
return
self.write('\n'.join([x.get('value', '') for x in item.get('properties', {}).get(dataproperty, {}).get('values') if x.get('value', '')]))
handlers = [
('/test', TestBug),
('/entity/save', SaveEntity),
('/entity/delete-file', DeleteFile),
('/entity/delete-entity', DeleteEntity),
('/entity/search', GetEntities),
('/entity/users', GetUsers),
(r'/entity/table/(.*)', ShowTableView),
(r'/entity-(.*)/edit', ShowEntityEdit),
(r'/entity-(.*)/relate', ShowEntityRelate),
(r'/entity-(.*)/add/(.*)', ShowEntityAdd),
(r'/entity-(.*)/share', ShareByEmail),
(r'/entity-(.*)/rights', EntityRights),
(r'/entity-(.*)/parents', EntityParents),
(r'/entity-(.*)/duplicate', EntityDuplicate),
(r'/entity-(.*)/html-(.*)', ShowHTMLproperty),
(r'/entity-(.*)', ShowEntity),
(r'/entity(.*)', ShowGroup),
]
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Alec Thomas
# Copyright (C) 2007 Eli Carter
# Copyright (C) 2007 Christian Boos <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Eli Carter
from ConfigParser import ParsingError, RawConfigParser
from StringIO import StringIO
from collections import defaultdict
from functools import partial
from pkg_resources import resource_filename
from genshi.builder import tag
from trac.config import Configuration, ConfigSection
from trac.core import *
from trac.env import IEnvironmentSetupParticipant
from trac.perm import PermissionCache, PermissionSystem
from trac.resource import ResourceNotFound
from trac.ticket.api import ITicketActionController, TicketSystem
from trac.ticket.model import Component as TicketComponent, Resolution
from trac.util import get_reporter_id, sub_val, to_list
from trac.util.presentation import separated
from trac.util.translation import _, tag_, cleandoc_
from trac.web.chrome import Chrome, add_script, add_script_data
from trac.wiki.formatter import system_message
from trac.wiki.macros import WikiMacroBase
# -- Utilities for the ConfigurableTicketWorkflow
def parse_workflow_config(rawactions):
"""Given a list of options from [ticket-workflow]"""
required_attrs = {
'oldstates': [],
'newstate': '',
'name': '',
'label': '',
'default': 0,
'operations': [],
'permissions': [],
}
optional_attrs = {
'set_owner': [],
'set_resolution': [],
}
known_attrs = required_attrs.copy()
known_attrs.update(optional_attrs)
actions = defaultdict(dict)
for option, value in rawactions:
parts = option.split('.')
name = parts[0]
if len(parts) == 1:
try:
# Base name, of the syntax: old,states,here -> newstate
oldstates, newstate = [x.strip() for x in value.split('->')]
except ValueError:
continue # Syntax error, a warning will be logged later
actions[name]['oldstates'] = to_list(oldstates)
actions[name]['newstate'] = newstate
else:
attribute = parts[1]
if attribute not in known_attrs.keys() or \
isinstance(known_attrs[attribute], str):
actions[name][attribute] = value
elif isinstance(known_attrs[attribute], int):
actions[name][attribute] = int(value)
elif isinstance(known_attrs[attribute], list):
actions[name][attribute] = to_list(value)
for action, attributes in actions.items():
if 'label' not in attributes:
if 'name' in attributes: # backwards-compatibility, #11828
attributes['label'] = attributes['name']
else:
attributes['label'] = action.replace("_", " ").strip()
for key, val in required_attrs.items():
attributes.setdefault(key, val)
return actions
def get_workflow_config(config):
"""Usually passed self.config, this will return the parsed ticket-workflow
section.
"""
raw_actions = list(config.options('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
return actions
def load_workflow_config_snippet(config, filename):
"""Loads the ticket-workflow section from the given file (expected to be in
the 'workflows' tree) into the provided config.
"""
filename = resource_filename('trac.ticket', 'workflows/%s' % filename)
new_config = Configuration(filename)
for name, value in new_config.options('ticket-workflow'):
config.set('ticket-workflow', name, value)
class ConfigurableTicketWorkflow(Component):
"""Ticket action controller which provides actions according to a
workflow defined in trac.ini.
The workflow is defined in the `[ticket-workflow]` section of the
[wiki:TracIni#ticket-workflow-section trac.ini] configuration file.
"""
implements(IEnvironmentSetupParticipant, ITicketActionController)
ticket_workflow_section = ConfigSection('ticket-workflow',
"""The workflow for tickets is controlled by plugins. By default,
there's only a `ConfigurableTicketWorkflow` component in charge.
That component allows the workflow to be configured via this section
in the `trac.ini` file. See TracWorkflow for more details.
""")
def __init__(self):
self.actions = self.get_all_actions()
self.log.debug('Workflow actions at initialization: %s\n',
self.actions)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""When an environment is created, we provide the basic-workflow,
unless a ticket-workflow section already exists.
"""
if 'ticket-workflow' not in self.config.sections():
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.save()
self.actions = self.get_all_actions()
def environment_needs_upgrade(self):
"""The environment needs an upgrade if there is no [ticket-workflow]
section in the config.
"""
return not list(self.config.options('ticket-workflow'))
def upgrade_environment(self):
"""Insert a [ticket-workflow] section using the original-workflow"""
load_workflow_config_snippet(self.config, 'original-workflow.ini')
self.config.save()
self.actions = self.get_all_actions()
info_message = """
==== Upgrade Notice ====
The ticket Workflow is now configurable.
Your environment has been upgraded, but configured to use the original
workflow. It is recommended that you look at changing this configuration to use
basic-workflow.
Read TracWorkflow for more information (don't forget to 'wiki upgrade' as well)
"""
self.log.info(info_message.replace('\n', ' ').replace('==', ''))
print(info_message)
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
"""Returns a list of (weight, action) tuples that are valid for this
request and this ticket."""
# Get the list of actions that can be performed
# Determine the current status of this ticket. If this ticket is in
# the process of being modified, we need to base our information on the
# pre-modified state so that we don't try to do two (or more!) steps at
# once and get really confused.
status = ticket._old.get('status', ticket['status'])
exists = status is not None
ticket_perm = req.perm(ticket.resource)
allowed_actions = []
for action_name, action_info in self.actions.items():
oldstates = action_info['oldstates']
if exists and oldstates == ['*'] or status in oldstates:
# This action is valid in this state. Check permissions.
required_perms = action_info['permissions']
if self._is_action_allowed(ticket_perm, required_perms):
allowed_actions.append((action_info['default'],
action_name))
# Append special `_reset` action if status is invalid.
if exists and status not in TicketSystem(self.env).get_all_status():
reset = self.actions['_reset']
required_perms = reset['permissions']
if self._is_action_allowed(ticket_perm, required_perms):
allowed_actions.append((reset['default'], '_reset'))
return allowed_actions
def _is_action_allowed(self, ticket_perm, required_perms):
if not required_perms:
return True
for permission in required_perms:
if permission in ticket_perm:
return True
return False
def get_all_status(self):
"""Return a list of all states described by the configuration.
"""
all_status = set()
for attributes in self.actions.values():
all_status.update(attributes['oldstates'])
all_status.add(attributes['newstate'])
all_status.discard('*')
all_status.discard('')
all_status.discard(None)
return all_status
def render_ticket_action_control(self, req, ticket, action):
self.log.debug('render_ticket_action_control: action "%s"', action)
this_action = self.actions[action]
status = this_action['newstate']
operations = this_action['operations']
current_owner = ticket._old.get('owner', ticket['owner'])
author = get_reporter_id(req, 'author')
author_info = partial(Chrome(self.env).authorinfo, req,
resource=ticket.resource)
format_author = partial(Chrome(self.env).format_author, req,
resource=ticket.resource)
formatted_current_owner = author_info(current_owner)
exists = ticket._old.get('status', ticket['status']) is not None
control = [] # default to nothing
hints = []
if 'reset_workflow' in operations:
control.append(_("from invalid state"))
hints.append(_("Current state no longer exists"))
if 'del_owner' in operations:
hints.append(_("The ticket will be disowned"))
if 'set_owner' in operations or 'may_set_owner' in operations:
if 'set_owner' in this_action:
owners = self._to_users(this_action['set_owner'], ticket)
elif self.config.getbool('ticket', 'restrict_owner'):
perm = PermissionSystem(self.env)
owners = perm.get_users_with_permission('TICKET_MODIFY')
owners = [user for user in owners
if 'TICKET_MODIFY'
in PermissionCache(self.env, user,
ticket.resource)]
owners = sorted(owners)
else:
owners = None
if 'set_owner' in operations:
default_owner = author
elif 'may_set_owner' in operations:
if not exists:
default_owner = TicketSystem(self.env).default_owner
else:
default_owner = ticket._old.get('owner',
ticket['owner'] or None)
if owners is not None and default_owner not in owners:
owners.insert(0, default_owner)
else:
# Protect against future modification for case that another
# operation is added to the outer conditional
raise AssertionError(operations)
id = 'action_%s_reassign_owner' % action
if not owners:
owner = req.args.get(id, default_owner)
control.append(
tag_("to %(owner)s",
owner=tag.input(type='text', id=id, name=id,
value=owner)))
if not exists or current_owner is None:
hints.append(_("The owner will be the specified user"))
else:
hints.append(tag_("The owner will be changed from "
"%(current_owner)s to the specified "
"user",
current_owner=formatted_current_owner))
elif len(owners) == 1:
owner = tag.input(type='hidden', id=id, name=id,
value=owners[0])
formatted_new_owner = author_info(owners[0])
control.append(tag_("to %(owner)s",
owner=tag(formatted_new_owner, owner)))
if not exists or current_owner is None:
hints.append(tag_("The owner will be %(new_owner)s",
new_owner=formatted_new_owner))
elif ticket['owner'] != owners[0]:
hints.append(tag_("The owner will be changed from "
"%(current_owner)s to %(new_owner)s",
current_owner=formatted_current_owner,
new_owner=formatted_new_owner))
else:
selected_owner = req.args.get(id, default_owner)
control.append(tag_("to %(owner)s", owner=tag.select(
[tag.option(format_author(x),
value=x if x is not None else '',
selected=(x == selected_owner or None))
for x in owners],
id=id, name=id)))
if not exists or current_owner is None:
hints.append(_("The owner will be the selected user"))
else:
hints.append(tag_("The owner will be changed from "
"%(current_owner)s to the selected user",
current_owner=formatted_current_owner))
elif 'set_owner_to_self' in operations and \
ticket._old.get('owner', ticket['owner']) != author:
formatted_author = author_info(author)
if not exists or current_owner is None:
hints.append(tag_("The owner will be %(new_owner)s",
new_owner=formatted_author))
else:
hints.append(tag_("The owner will be changed from "
"%(current_owner)s to %(new_owner)s",
current_owner=formatted_current_owner,
new_owner=formatted_author))
if 'set_resolution' in operations:
if 'set_resolution' in this_action:
resolutions = this_action['set_resolution']
else:
resolutions = [r.name for r in Resolution.select(self.env)]
if not resolutions:
raise TracError(_("Your workflow attempts to set a resolution "
"but none is defined (configuration issue, "
"please contact your Trac admin)."))
id = 'action_%s_resolve_resolution' % action
if len(resolutions) == 1:
resolution = tag.input(type='hidden', id=id, name=id,
value=resolutions[0])
control.append(tag_("as %(resolution)s",
resolution=tag(resolutions[0],
resolution)))
hints.append(tag_("The resolution will be set to %(name)s",
name=resolutions[0]))
else:
selected_option = req.args.get(id,
TicketSystem(self.env).default_resolution)
control.append(tag_("as %(resolution)s",
resolution=tag.select(
[tag.option(x, value=x,
selected=(x == selected_option or None))
for x in resolutions],
id=id, name=id)))
hints.append(_("The resolution will be set"))
if 'del_resolution' in operations:
hints.append(_("The resolution will be deleted"))
if 'leave_status' in operations:
control.append(tag_("as %(status)s",
status=ticket._old.get('status',
ticket['status'])))
if len(operations) == 1:
hints.append(tag_("The owner will remain %(current_owner)s",
current_owner=formatted_current_owner)
if current_owner else
_("The ticket will remain with no owner"))
else:
if ticket['status'] is None:
hints.append(tag_("The status will be '%(name)s'",
name=status))
elif status != '*':
hints.append(tag_("Next status will be '%(name)s'",
name=status))
return (this_action['label'], tag(separated(control, ' ')),
tag(separated(hints, '. ', '.') if hints else ''))
def get_ticket_changes(self, req, ticket, action):
this_action = self.actions[action]
# Enforce permissions
if not self._has_perms_for_action(req, this_action, ticket.resource):
# The user does not have any of the listed permissions, so we won't
# do anything.
return {}
updated = {}
# Status changes
status = this_action['newstate']
if status != '*':
updated['status'] = status
for operation in this_action['operations']:
if operation == 'del_owner':
updated['owner'] = ''
elif operation in ('set_owner', 'may_set_owner'):
set_owner = this_action.get('set_owner')
newowner = req.args.get('action_%s_reassign_owner' % action,
set_owner[0] if set_owner else '')
# If there was already an owner, we get a list, [new, old],
# but if there wasn't we just get new.
if type(newowner) == list:
newowner = newowner[0]
updated['owner'] = self._sub_owner_keyword(newowner, ticket)
elif operation == 'set_owner_to_self':
updated['owner'] = get_reporter_id(req, 'author')
elif operation == 'del_resolution':
updated['resolution'] = ''
elif operation == 'set_resolution':
set_resolution = this_action.get('set_resolution')
newresolution = req.args.get('action_%s_resolve_resolution'
% action,
set_resolution[0]
if set_resolution else '')
updated['resolution'] = newresolution
# reset_workflow is just a no-op here, so we don't look for it.
# leave_status is just a no-op here, so we don't look for it.
# Set owner to component owner for 'new' ticket if:
# - ticket doesn't exist and owner is < default >
# - component is changed
# - owner isn't explicitly changed
# - ticket owner is equal to owner of previous component
# - new component has an owner
if not ticket.exists and 'owner' not in updated:
updated['owner'] = self._sub_owner_keyword(ticket['owner'], ticket)
elif ticket['status'] == 'new' and \
'component' in ticket.values and \
'component' in ticket._old and \
'owner' not in updated:
try:
old_comp = TicketComponent(self.env, ticket._old['component'])
except ResourceNotFound:
# If the old component has been removed from the database
# we just leave the owner as is.
pass
else:
old_owner = old_comp.owner or ''
current_owner = ticket['owner'] or ''
if old_owner == current_owner:
new_comp = TicketComponent(self.env, ticket['component'])
if new_comp.owner:
updated['owner'] = new_comp.owner
return updated
def apply_action_side_effects(self, req, ticket, action):
pass
def _has_perms_for_action(self, req, action, resource):
required_perms = action['permissions']
if required_perms:
for permission in required_perms:
if permission in req.perm(resource):
break
else:
# The user does not have any of the listed permissions
return False
return True
# Public methods (for other ITicketActionControllers that want to use
# our config file and provide an operation for an action)
def get_all_actions(self):
actions = parse_workflow_config(self.ticket_workflow_section.options())
# Special action that gets enabled if the current status no longer
# exists, as no other action can then change its state. (#5307/#11850)
reset = {
'default': 0,
'label': 'reset',
'newstate': 'new',
'oldstates': [],
'operations': ['reset_workflow'],
'permissions': ['TICKET_ADMIN']
}
for key, val in reset.items():
actions['_reset'].setdefault(key, val)
for name, info in actions.iteritems():
for val in ('<none>', '< none >'):
sub_val(actions[name]['oldstates'], val, None)
if not info['newstate']:
self.log.warning("Ticket workflow action '%s' doesn't define "
"any transitions", name)
return actions
def get_actions_by_operation(self, operation):
"""Return a list of all actions with a given operation
(for use in the controller's get_all_status())
"""
actions = [(info['default'], action) for action, info
in self.actions.items()
if operation in info['operations']]
return actions
def get_actions_by_operation_for_req(self, req, ticket, operation):
"""Return list of all actions with a given operation that are valid
in the given state for the controller's get_ticket_actions().
If state='*' (the default), all actions with the given operation are
returned.
"""
# Be sure to look at the original status.
status = ticket._old.get('status', ticket['status'])
actions = [(info['default'], action)
for action, info in self.actions.items()
if operation in info['operations'] and
('*' in info['oldstates'] or
status in info['oldstates']) and
self._has_perms_for_action(req, info, ticket.resource)]
return actions
# Internal methods
def _sub_owner_keyword(self, owner, ticket):
"""Substitute keywords from the default_owner field.
< default > -> component owner
"""
if owner in ('< default >', '<default>'):
default_owner = ''
if ticket['component']:
try:
component = TicketComponent(self.env, ticket['component'])
except ResourceNotFound:
pass # No such component exists
else:
default_owner = component.owner # May be empty
return default_owner
return owner
def _to_users(self, users_perms_and_groups, ticket):
"""Finds all users contained in the list of `users_perms_and_groups`
by recursive lookup of users when a `group` is encountered.
"""
ps = PermissionSystem(self.env)
groups = ps.get_groups_dict()
def append_owners(users_perms_and_groups):
for user_perm_or_group in users_perms_and_groups:
if user_perm_or_group == 'authenticated':
owners.update(set(u[0] for u in self.env.get_known_users()))
elif user_perm_or_group.isupper():
perm = user_perm_or_group
for user in ps.get_users_with_permission(perm):
if perm in PermissionCache(self.env, user,
ticket.resource):
owners.add(user)
elif user_perm_or_group not in groups:
owners.add(user_perm_or_group)
else:
append_owners(groups[user_perm_or_group])
owners = set()
append_owners(users_perms_and_groups)
return sorted(owners)
class WorkflowMacro(WikiMacroBase):
_domain = 'messages'
_description = cleandoc_(
"""Render a workflow graph.
This macro accepts a TracWorkflow configuration and renders the states
and transitions as a directed graph. If no parameters are given, the
current ticket workflow is rendered. In WikiProcessors mode the `width`
and `height` arguments can be specified.
(Defaults: `width = 800` and `height = 600`)
Examples:
{{{
[[Workflow()]]
[[Workflow(go = here -> there; return = there -> here)]]
{{{
#!Workflow width=700 height=700
leave = * -> *
leave.operations = leave_status
leave.default = 1
create = <none> -> new
create.default = 1
create_and_assign = <none> -> assigned
create_and_assign.label = assign
create_and_assign.permissions = TICKET_MODIFY
create_and_assign.operations = may_set_owner
accept = new,assigned,accepted,reopened -> accepted
accept.permissions = TICKET_MODIFY
accept.operations = set_owner_to_self
resolve = new,assigned,accepted,reopened -> closed
resolve.permissions = TICKET_MODIFY
resolve.operations = set_resolution
reassign = new,assigned,accepted,reopened -> assigned
reassign.permissions = TICKET_MODIFY
reassign.operations = set_owner
reopen = closed -> reopened
reopen.permissions = TICKET_CREATE
reopen.operations = del_resolution
}}}
}}}
""")
def expand_macro(self, formatter, name, text, args):
if not text:
raw_actions = self.config.options('ticket-workflow')
else:
if args is None:
text = '\n'.join([line.lstrip() for line in text.split(';')])
if '[ticket-workflow]' not in text:
text = '[ticket-workflow]\n' + text
parser = RawConfigParser()
try:
parser.readfp(StringIO(text))
except ParsingError as e:
return system_message(_("Error parsing workflow."),
unicode(e))
raw_actions = list(parser.items('ticket-workflow'))
actions = parse_workflow_config(raw_actions)
states = list(set(
[state for action in actions.itervalues()
for state in action['oldstates']] +
[action['newstate'] for action in actions.itervalues()]))
action_labels = [attrs['label'] for attrs in actions.values()]
action_names = actions.keys()
edges = []
for name, action in actions.items():
new_index = states.index(action['newstate'])
name_index = action_names.index(name)
for old_state in action['oldstates']:
old_index = states.index(old_state)
edges.append((old_index, new_index, name_index))
args = args or {}
width = args.get('width', 800)
height = args.get('height', 600)
graph = {'nodes': states, 'actions': action_labels, 'edges': edges,
'width': width, 'height': height}
graph_id = '%012x' % id(graph)
req = formatter.req
add_script(req, 'common/js/excanvas.js', ie_if='IE')
add_script(req, 'common/js/workflow_graph.js')
add_script_data(req, {'graph_%s' % graph_id: graph})
return tag(
tag.div('', class_='trac-workflow-graph trac-noscript',
id='trac-workflow-graph-%s' % graph_id,
style="display:inline-block;width:%spx;height:%spx" %
(width, height)),
tag.noscript(
tag.div(_("Enable JavaScript to display the workflow graph."),
class_='system-message')))
|
|
from django.utils.translation import ugettext_lazy as _
COUNTRIES = (
('AD', _('Andorra')),
('AE', _('United Arab Emirates')),
('AF', _('Afghanistan')),
('AG', _('Antigua & Barbuda')),
('AI', _('Anguilla')),
('AL', _('Albania')),
('AM', _('Armenia')),
('AN', _('Netherlands Antilles')),
('AO', _('Angola')),
('AQ', _('Antarctica')),
('AR', _('Argentina')),
('AS', _('American Samoa')),
('AT', _('Austria')),
('AU', _('Australia')),
('AW', _('Aruba')),
('AZ', _('Azerbaijan')),
('BA', _('Bosnia and Herzegovina')),
('BB', _('Barbados')),
('BD', _('Bangladesh')),
('BE', _('Belgium')),
('BF', _('Burkina Faso')),
('BG', _('Bulgaria')),
('BH', _('Bahrain')),
('BI', _('Burundi')),
('BJ', _('Benin')),
('BM', _('Bermuda')),
('BN', _('Brunei Darussalam')),
('BO', _('Bolivia')),
('BR', _('Brazil')),
('BS', _('Bahama')),
('BT', _('Bhutan')),
('BV', _('Bouvet Island')),
('BW', _('Botswana')),
('BY', _('Belarus')),
('BZ', _('Belize')),
('CA', _('Canada')),
('CC', _('Cocos (Keeling) Islands')),
('CF', _('Central African Republic')),
('CG', _('Congo')),
('CH', _('Switzerland')),
('CI', _('Ivory Coast')),
('CK', _('Cook Iislands')),
('CL', _('Chile')),
('CM', _('Cameroon')),
('CN', _('China')),
('CO', _('Colombia')),
('CR', _('Costa Rica')),
('CU', _('Cuba')),
('CV', _('Cape Verde')),
('CX', _('Christmas Island')),
('CY', _('Cyprus')),
('CZ', _('Czech Republic')),
('DE', _('Germany')),
('DJ', _('Djibouti')),
('DK', _('Denmark')),
('DM', _('Dominica')),
('DO', _('Dominican Republic')),
('DZ', _('Algeria')),
('EC', _('Ecuador')),
('EE', _('Estonia')),
('EG', _('Egypt')),
('EH', _('Western Sahara')),
('ER', _('Eritrea')),
('ES', _('Spain')),
('ET', _('Ethiopia')),
('FI', _('Finland')),
('FJ', _('Fiji')),
('FK', _('Falkland Islands (Malvinas)')),
('FM', _('Micronesia')),
('FO', _('Faroe Islands')),
('FR', _('France')),
('FX', _('France, Metropolitan')),
('GA', _('Gabon')),
('GB', _('United Kingdom (Great Britain)')),
('GD', _('Grenada')),
('GE', _('Georgia')),
('GF', _('French Guiana')),
('GH', _('Ghana')),
('GI', _('Gibraltar')),
('GL', _('Greenland')),
('GM', _('Gambia')),
('GN', _('Guinea')),
('GP', _('Guadeloupe')),
('GQ', _('Equatorial Guinea')),
('GR', _('Greece')),
('GS', _('South Georgia and the South Sandwich Islands')),
('GT', _('Guatemala')),
('GU', _('Guam')),
('GW', _('Guinea-Bissau')),
('GY', _('Guyana')),
('HK', _('Hong Kong')),
('HM', _('Heard & McDonald Islands')),
('HN', _('Honduras')),
('HR', _('Croatia')),
('HT', _('Haiti')),
('HU', _('Hungary')),
('ID', _('Indonesia')),
('IE', _('Ireland')),
('IL', _('Israel')),
('IN', _('India')),
('IO', _('British Indian Ocean Territory')),
('IQ', _('Iraq')),
('IR', _('Islamic Republic of Iran')),
('IS', _('Iceland')),
('IT', _('Italy')),
('JM', _('Jamaica')),
('JO', _('Jordan')),
('JP', _('Japan')),
('KE', _('Kenya')),
('KG', _('Kyrgyzstan')),
('KH', _('Cambodia')),
('KI', _('Kiribati')),
('KM', _('Comoros')),
('KN', _('St. Kitts and Nevis')),
('KP', _('Korea, Democratic People\'s Republic of')),
('KR', _('Korea, Republic of')),
('KW', _('Kuwait')),
('KY', _('Cayman Islands')),
('KZ', _('Kazakhstan')),
('LA', _('Lao People\'s Democratic Republic')),
('LB', _('Lebanon')),
('LC', _('Saint Lucia')),
('LI', _('Liechtenstein')),
('LK', _('Sri Lanka')),
('LR', _('Liberia')),
('LS', _('Lesotho')),
('LT', _('Lithuania')),
('LU', _('Luxembourg')),
('LV', _('Latvia')),
('LY', _('Libyan Arab Jamahiriya')),
('MA', _('Morocco')),
('MC', _('Monaco')),
('MD', _('Moldova, Republic of')),
('MG', _('Madagascar')),
('MH', _('Marshall Islands')),
('ML', _('Mali')),
('MN', _('Mongolia')),
('MM', _('Myanmar')),
('MO', _('Macau')),
('MP', _('Northern Mariana Islands')),
('MQ', _('Martinique')),
('MR', _('Mauritania')),
('MS', _('Monserrat')),
('MT', _('Malta')),
('MU', _('Mauritius')),
('MV', _('Maldives')),
('MW', _('Malawi')),
('MX', _('Mexico')),
('MY', _('Malaysia')),
('MZ', _('Mozambique')),
('NA', _('Namibia')),
('NC', _('New Caledonia')),
('NE', _('Niger')),
('NF', _('Norfolk Island')),
('NG', _('Nigeria')),
('NI', _('Nicaragua')),
('NL', _('Netherlands')),
('NO', _('Norway')),
('NP', _('Nepal')),
('NR', _('Nauru')),
('NU', _('Niue')),
('NZ', _('New Zealand')),
('OM', _('Oman')),
('PA', _('Panama')),
('PE', _('Peru')),
('PF', _('French Polynesia')),
('PG', _('Papua New Guinea')),
('PH', _('Philippines')),
('PK', _('Pakistan')),
('PL', _('Poland')),
('PM', _('St. Pierre & Miquelon')),
('PN', _('Pitcairn')),
('PR', _('Puerto Rico')),
('PT', _('Portugal')),
('PW', _('Palau')),
('PY', _('Paraguay')),
('QA', _('Qatar')),
('RE', _('Reunion')),
('RO', _('Romania')),
('RU', _('Russian Federation')),
('RW', _('Rwanda')),
('SA', _('Saudi Arabia')),
('SB', _('Solomon Islands')),
('SC', _('Seychelles')),
('SD', _('Sudan')),
('SE', _('Sweden')),
('SG', _('Singapore')),
('SH', _('St. Helena')),
('SI', _('Slovenia')),
('SJ', _('Svalbard & Jan Mayen Islands')),
('SK', _('Slovakia')),
('SL', _('Sierra Leone')),
('SM', _('San Marino')),
('SN', _('Senegal')),
('SO', _('Somalia')),
('SR', _('Suriname')),
('ST', _('Sao Tome & Principe')),
('SV', _('El Salvador')),
('SY', _('Syrian Arab Republic')),
('SZ', _('Swaziland')),
('TC', _('Turks & Caicos Islands')),
('TD', _('Chad')),
('TF', _('French Southern Territories')),
('TG', _('Togo')),
('TH', _('Thailand')),
('TJ', _('Tajikistan')),
('TK', _('Tokelau')),
('TM', _('Turkmenistan')),
('TN', _('Tunisia')),
('TO', _('Tonga')),
('TP', _('East Timor')),
('TR', _('Turkey')),
('TT', _('Trinidad & Tobago')),
('TV', _('Tuvalu')),
('TW', _('Taiwan, Province of China')),
('TZ', _('Tanzania, United Republic of')),
('UA', _('Ukraine')),
('UG', _('Uganda')),
('UM', _('United States Minor Outlying Islands')),
('US', _('United States of America')),
('UY', _('Uruguay')),
('UZ', _('Uzbekistan')),
('VA', _('Vatican City State (Holy See)')),
('VC', _('St. Vincent & the Grenadines')),
('VE', _('Venezuela')),
('VG', _('British Virgin Islands')),
('VI', _('United States Virgin Islands')),
('VN', _('Viet Nam')),
('VU', _('Vanuatu')),
('WF', _('Wallis & Futuna Islands')),
('WS', _('Samoa')),
('YE', _('Yemen')),
('YT', _('Mayotte')),
('YU', _('Yugoslavia')),
('ZA', _('South Africa')),
('ZM', _('Zambia')),
('ZR', _('Zaire')),
('ZW', _('Zimbabwe')),
('ZZ', _('Other')),
)
|
|
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import functools
import logging
import threading
from botocore.exceptions import MissingParametersError
from botocore.exceptions import UnknownParameterError
from botocore.exceptions import NoRegionError
from botocore.paginate import DeprecatedPaginator
from botocore.signers import RequestSigner
from botocore import serialize
from botocore import BotoCoreObject, xform_name
from botocore.validate import ParamValidator
from botocore.exceptions import ParamValidationError
logger = logging.getLogger(__name__)
class Operation(BotoCoreObject):
_DEFAULT_PAGINATOR_CLS = DeprecatedPaginator
def __init__(self, service, op_data, model, paginator_cls=None):
self.input = {}
self.output = {}
self._model = model
BotoCoreObject.__init__(self, **op_data)
self.service = service
if self.service:
self.session = self.service.session
else:
self.session = None
self.type = 'operation'
self._params = None
if paginator_cls is None:
paginator_cls = self._DEFAULT_PAGINATOR_CLS
self._paginator_cls = paginator_cls
self._lock = threading.Lock()
def __repr__(self):
return 'Operation:%s' % self.name
@property
def model(self):
return self._model
@property
def output_shape(self):
return self._model.output_shape
@property
def signature_version(self):
return self.service.signature_version
def _get_signature_version_and_region(self, endpoint, service_model):
# An endpoint-aware signature version and region check
scoped_config = self.session.get_scoped_config()
resolver = self.session.get_component('endpoint_resolver')
scheme = endpoint.host.split(':')[0]
if endpoint.region_name is None:
raise NoRegionError(env_var='region')
endpoint_config = resolver.construct_endpoint(
service_model.endpoint_prefix,
endpoint.region_name, scheme=scheme)
# Signature version override from endpoint
signature_version = self.service.signature_version
if 'signatureVersion' in endpoint_config.get('properties', {}):
signature_version = endpoint_config['properties']\
['signatureVersion']
# Signature overrides from a configuration file
if scoped_config is not None:
service_config = scoped_config.get(service_model.endpoint_prefix)
if service_config is not None and isinstance(service_config, dict):
override = service_config.get('signature_version')
if override:
logger.debug(
"Switching signature version for service %s "
"to version %s based on config file override.",
service_model.endpoint_prefix, override)
signature_version = override
return signature_version, endpoint.region_name
def call(self, endpoint, **kwargs):
logger.debug("%s called with kwargs: %s", self, kwargs)
# It probably seems a little weird to be firing two different
# events here. The reason is that the first event is fired
# with the parameters exactly as supplied. The second event
# is fired with the built parameters. Generally, it's easier
# to manipulate the former but at times, like with ReST operations
# that build an XML or JSON payload, you have to wait for
# build_parameters to do it's job and the latter is necessary.
event = self.session.create_event('before-parameter-build',
self.service.endpoint_prefix,
self.name)
self.session.emit(event, endpoint=endpoint,
model=self.model,
params=kwargs)
request_dict = self.build_parameters(**kwargs)
service_name = self.service.service_name
service_model = self.session.get_service_model(service_name)
signature_version, region_name = \
self._get_signature_version_and_region(
endpoint, service_model)
credentials = self.session.get_credentials()
event_emitter = self.session.get_component('event_emitter')
signer = RequestSigner(service_model.service_name,
region_name, service_model.signing_name,
signature_version, credentials,
event_emitter)
event = self.session.create_event('before-call',
self.service.endpoint_prefix,
self.name)
# The operation kwargs is being passed in kwargs to support
# handlers that still depend on this value. Eventually
# everything should move over to the model/endpoint args.
self.session.emit(event, endpoint=endpoint,
model=self.model,
params=request_dict,
operation=self,
request_signer=signer)
# Here we register to the specific request-created event
# for this operation. Since it's possible to run the same
# operation in multiple threads, we used a lock to prevent
# issues. It's possible a request will be signed more than
# once. Once the request has been made, we unregister the
# handler.
def request_created(request, **kwargs):
# This first check lets us quickly determine when
# a request has already been signed without needing
# to acquire the lock.
if not getattr(request, '_is_signed', False):
with self._lock:
if not getattr(request, '_is_signed', False):
signer.sign(self.name, request)
request._is_signed = True
event_emitter.register('request-created.{0}.{1}'.format(
self.service.endpoint_prefix, self.name), request_created)
try:
response = endpoint.make_request(self.model, request_dict)
finally:
event_emitter.unregister('request-created.{0}.{1}'.format(
self.service.endpoint_prefix, self.name), request_created)
event = self.session.create_event('after-call',
self.service.endpoint_prefix,
self.name)
self.session.emit(event,
http_response=response[0],
model=self.model,
operation=self,
parsed=response[1])
return response
@property
def pagination(self):
try:
return self._load_pagination_config()
except Exception as e:
return {}
@property
def can_paginate(self):
try:
self._load_pagination_config()
except Exception as e:
return False
return True
def paginate(self, endpoint, **kwargs):
"""Iterate over the responses of an operation.
This will return an iterator with each element
being a tuple of (``http_response``, ``parsed_response``).
If the operation does not paginate, a ``TypeError`` will
be raised. You can check if an operation can be paginated
by using the ``can_paginate`` arg.
"""
if not self.can_paginate:
raise TypeError("Operation cannot be paginated: %s" % self)
config = self._load_pagination_config()
paginator = self._paginator_cls(self, config)
return paginator.paginate(endpoint, **kwargs)
def _load_pagination_config(self):
loader = self.session.get_component('data_loader')
api_version = self.service.api_version
config = loader.load_data('aws/%s/%s.paginators' %
(self.service.service_name, api_version))
return config['pagination'][self.name]
@property
def params(self):
raise RuntimeError(
"Attempted to access removed parameter objects in botocore.")
if self._params is None:
self._params = self._create_parameter_objects()
return self._params
def _create_parameter_objects(self):
"""
Build the list of Parameter objects for this operation.
"""
logger.debug("Creating parameter objects for: %s", self)
params = []
return params
def _find_payload(self):
"""
Searches the parameters for an operation to find the payload
parameter, if it exists. Returns that param or None.
"""
payload = None
for param in self.params:
if hasattr(param, 'payload') and param.payload:
payload = param
break
return payload
def build_parameters(self, **kwargs):
"""
Returns a dictionary containing the kwargs for the
given operation formatted as required to pass to the service
in a request.
"""
protocol = self._model.metadata['protocol']
input_shape = self._model.input_shape
if input_shape is not None:
self._convert_kwargs_to_correct_casing(kwargs)
validator = ParamValidator()
errors = validator.validate(kwargs, self._model.input_shape)
if errors.has_errors():
raise ParamValidationError(report=errors.generate_report())
serializer = serialize.create_serializer(protocol)
request_dict = serializer.serialize_to_request(kwargs, self._model)
return request_dict
def _convert_kwargs_to_correct_casing(self, kwargs):
# XXX: This will be removed in botocore 1.0, but we should
# support snake casing for now.
# First we're going to build a map of snake_casing -> service casing
actual_casing = list(self._model.input_shape.members)
mapping = {}
for key in actual_casing:
transformed = xform_name(key)
if key != transformed:
mapping[xform_name(key)] = key
# Look for anything in the user provided kwargs that is in the mapping
# dict and convert appropriately.
for key in list(kwargs):
if key in mapping:
# TODO: add a pending deprecation warning.
value = kwargs[key]
kwargs[mapping[key]] = value
del kwargs[key]
def _check_for_unknown_params(self, kwargs):
valid_names = [p.py_name for p in self.params]
for key in kwargs:
if key not in valid_names:
raise UnknownParameterError(name=key, operation=self,
choices=', '.join(valid_names))
def is_streaming(self):
# TODO: add deprecation warning
return self._model.has_streaming_output
@property
def has_streaming_output(self):
return self._model.has_streaming_output
|
|
"""Implementation of the Seabreeze Transport layer.
Some spectrometers can support different transports (usb, network, rs232, etc.)
"""
from __future__ import annotations
import importlib
import inspect
import logging
import warnings
from functools import partialmethod
from typing import TYPE_CHECKING
from typing import Any
from typing import Iterable
from typing import Tuple
import usb.backend
import usb.core
import usb.util
from seabreeze.pyseabreeze.types import PySeaBreezeProtocol
from seabreeze.pyseabreeze.types import PySeaBreezeTransport
if TYPE_CHECKING:
from seabreeze.pyseabreeze.devices import EndPointMap
# encapsulate usb.core.USBError
class USBTransportError(Exception):
def __init__(
self, *args: Any, errno: int | None = None, error_code: int | None = None
) -> None:
super().__init__(*args)
self.errno = errno
self.backend_error_code = error_code
@classmethod
def from_usberror(cls, err: usb.core.USBError) -> USBTransportError:
return cls(str(err), errno=err.errno, error_code=err.backend_error_code)
class USBTransportDeviceInUse(Exception):
pass
DeviceIdentity = Tuple[int, int, int, int]
# this can and should be opaque to pyseabreeze
class USBTransportHandle:
def __init__(self, pyusb_device: usb.core.Device) -> None:
"""encapsulation for pyusb device classes
Parameters
----------
pyusb_device
"""
self.pyusb_device: usb.core.Device = pyusb_device
# noinspection PyUnresolvedReferences
self.identity: DeviceIdentity = (
pyusb_device.idVendor,
pyusb_device.idProduct,
pyusb_device.bus,
pyusb_device.address,
)
self.pyusb_backend = get_name_from_pyusb_backend(pyusb_device.backend)
def close(self) -> None:
try:
self.pyusb_device.reset()
except usb.core.USBError:
logging.debug(
"USBError while calling USBTransportHandle.close on {:04x}:{:04x}".format(
self.identity[0], self.identity[1]
),
exc_info=True,
)
def __del__(self) -> None:
if self.pyusb_backend == "libusb1":
# have to check if .finalize() has been called
# -> todo: maybe better to fix this in the api initialization of cseabreeze
# -> todo: will probably have to check pyusb versions and only do this when necessary
if not getattr(self.pyusb_device.backend, "_finalize_called", False):
# if usb.core.Device.reset() gets called but the backend has been finalized already
# (this happens only during interpreter shutdown)
self.close()
else:
self.close()
self.pyusb_device = None
class USBTransport(PySeaBreezeTransport[USBTransportHandle]):
"""implementation of the usb transport interface for spectrometers"""
_required_init_kwargs = ("usb_product_id", "usb_endpoint_map", "usb_protocol")
vendor_id = 0x2457
product_ids: dict[int, str] = {}
# add logging
_log = logging.getLogger(__name__)
def __init__(
self,
usb_product_id: int,
usb_endpoint_map: EndPointMap,
usb_protocol: type[PySeaBreezeProtocol],
) -> None:
super().__init__()
self._product_id = usb_product_id
self._endpoint_map = usb_endpoint_map
self._protocol_cls = usb_protocol
# internal settings
self._default_read_size = {
"low_speed": 64,
"high_speed": 512,
"high_speed_alt": 512,
}
self._read_endpoints = {
"low_speed": "lowspeed_in",
"high_speed": "highspeed_in",
"high_speed_alt": "highspeed_in2",
}
self._default_read_endpoint = "low_speed"
self._default_read_spectrum_endpoint = "high_speed"
# internal state
self._device: USBTransportHandle | None = None
self._opened: bool | None = None
self._protocol: PySeaBreezeProtocol | None = None
def open_device(self, device: USBTransportHandle) -> None:
if not isinstance(device, USBTransportHandle):
raise TypeError("device needs to be a USBTransportHandle")
# device.reset()
self._device = device
pyusb_device = self._device.pyusb_device
try:
if pyusb_device.is_kernel_driver_active(0):
pyusb_device.detach_kernel_driver(0)
except NotImplementedError:
pass # unavailable on some systems/backends
try:
pyusb_device.set_configuration()
except usb.core.USBError as err:
if err.errno == 16:
# TODO: warn as in cseabreeze
self._opened = True
raise USBTransportDeviceInUse(
"device probably used by another thread/process"
)
raise USBTransportError.from_usberror(err)
else:
self._opened = True
# This will initialize the communication protocol
if self._opened:
self._protocol = self._protocol_cls(self)
@property
def is_open(self) -> bool:
return self._opened or False
def close_device(self) -> None:
if self._device is not None:
self._device.close()
self._device = None
self._opened = False
self._protocol = None
def write(self, data: bytes, timeout_ms: int | None = None, **kwargs: Any) -> int:
if self._device is None:
raise RuntimeError("device not opened")
if kwargs:
warnings.warn(f"kwargs provided but ignored: {kwargs}")
return self._device.pyusb_device.write( # type: ignore
self._endpoint_map.ep_out, data, timeout=timeout_ms
)
def read(
self,
size: int | None = None,
timeout_ms: int | None = None,
mode: str | None = None,
**kwargs: Any,
) -> bytes:
if self._device is None:
raise RuntimeError("device not opened")
mode = mode if mode is not None else self._default_read_endpoint
endpoint = getattr(self._endpoint_map, self._read_endpoints[mode])
if size is None:
size = self._default_read_size[mode]
if kwargs:
warnings.warn(f"kwargs provided but ignored: {kwargs}")
ret: bytes = self._device.pyusb_device.read(
endpoint, size, timeout=timeout_ms
).tobytes()
return ret
@property
def default_timeout_ms(self) -> int:
if not self._device:
raise RuntimeError("no protocol instance available")
return self._device.pyusb_device.default_timeout # type: ignore
@property
def protocol(self) -> PySeaBreezeProtocol:
if self._protocol is None:
raise RuntimeError("no protocol instance available")
return self._protocol
@classmethod
def list_devices(cls, **kwargs: Any) -> Iterable[USBTransportHandle]:
"""list pyusb devices for all available spectrometers
Note: this includes spectrometers that are currently opened in other
processes on the machine.
Yields
------
devices : USBTransportHandle
unique pyusb devices for each available spectrometer
"""
# check if a specific pyusb backend is requested
_pyusb_backend = kwargs.get("pyusb_backend", None)
# get all matching devices
try:
pyusb_devices = usb.core.find(
find_all=True,
custom_match=lambda dev: (
dev.idVendor == cls.vendor_id and dev.idProduct in cls.product_ids
),
backend=get_pyusb_backend_from_name(name=_pyusb_backend),
)
except usb.core.NoBackendError:
raise RuntimeError("No pyusb backend found")
# encapsulate
for pyusb_device in pyusb_devices:
yield USBTransportHandle(pyusb_device)
@classmethod
def register_model(cls, model_name: str, **kwargs: Any) -> None:
product_id = kwargs.get("usb_product_id")
if not isinstance(product_id, int):
raise TypeError(f"product_id {product_id:r} not an integer")
if product_id in cls.product_ids:
raise ValueError(f"product_id 0x{product_id:04x} already in registry")
cls.product_ids[product_id] = model_name
@classmethod
def supported_model(cls, device: USBTransportHandle) -> str | None:
"""return supported model
Parameters
----------
device : USBTransportHandle
"""
if not isinstance(device, USBTransportHandle):
return None
# noinspection PyUnresolvedReferences
return cls.product_ids[device.pyusb_device.idProduct]
@classmethod
def specialize(cls, model_name: str, **kwargs: Any) -> type[USBTransport]:
assert set(kwargs) == set(cls._required_init_kwargs)
# usb transport register automatically on registration
cls.register_model(model_name, **kwargs)
specialized_class = type(
f"USBTransport{model_name}",
(cls,),
{"__init__": partialmethod(cls.__init__, **kwargs)},
)
return specialized_class
@classmethod
def initialize(cls, **_kwargs: Any) -> None:
for device in cls.list_devices(**_kwargs):
try:
device.pyusb_device.reset()
# usb.util.dispose_resources(device) <- already done by device.reset()
except Exception as err:
cls._log.debug(
"initialize failed: {}('{}')".format(
err.__class__.__name__, getattr(err, "message", "no message")
)
)
@classmethod
def shutdown(cls, **_kwargs: Any) -> None:
# dispose usb resources
for device in cls.list_devices(**_kwargs):
try:
usb.util.dispose_resources(device.pyusb_device)
except Exception as err:
cls._log.debug(
"shutdown failed: {}('{}')".format(
err.__class__.__name__, getattr(err, "message", "no message")
)
)
_pyusb_backend_instances: dict[str, usb.backend.IBackend] = {}
def get_pyusb_backend_from_name(name: str) -> usb.backend.IBackend:
"""internal: allow requesting a specific pyusb backend for testing"""
if name is None:
# default is pick first that works: ('libusb1', 'libusb0', 'openusb')
_backend = None
else:
try:
_backend = _pyusb_backend_instances[name]
except KeyError:
try:
m = importlib.import_module(f"usb.backend.{name}")
except ImportError:
raise RuntimeError(f"unknown pyusb backend: {name!r}")
# noinspection PyUnresolvedReferences
_backend = m.get_backend()
# raise if a pyusb backend was requested but can't be loaded
if _backend is None:
raise RuntimeError(f"pyusb backend failed to load: {name!r}")
_pyusb_backend_instances[name] = _backend
return _backend
def get_name_from_pyusb_backend(backend: usb.backend.IBackend) -> str | None:
"""internal: return backend name from loaded backend"""
module = inspect.getmodule(backend)
if not module:
return None
return module.__name__.split(".")[-1]
|
|
"""
Helper methods for components within Home Assistant.
"""
from datetime import datetime
from homeassistant import NoEntitySpecifiedError
from homeassistant.loader import get_component
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME, STATE_ON, STATE_OFF, CONF_PLATFORM,
CONF_TYPE, DEVICE_DEFAULT_NAME)
from homeassistant.util import ensure_unique_string, slugify
def generate_entity_id(entity_id_format, name, current_ids=None, hass=None):
""" Generate a unique entity ID based on given entity IDs or used ids. """
if current_ids is None:
if hass is None:
raise RuntimeError("Missing required parameter currentids or hass")
current_ids = hass.states.entity_ids()
return ensure_unique_string(
entity_id_format.format(slugify(name.lower())), current_ids)
def extract_entity_ids(hass, service):
"""
Helper method to extract a list of entity ids from a service call.
Will convert group entity ids to the entity ids it represents.
"""
if not (service.data and ATTR_ENTITY_ID in service.data):
return []
group = get_component('group')
# Entity ID attr can be a list or a string
service_ent_id = service.data[ATTR_ENTITY_ID]
if isinstance(service_ent_id, str):
return group.expand_entity_ids(hass, [service_ent_id.lower()])
return [ent_id for ent_id in group.expand_entity_ids(hass, service_ent_id)]
# pylint: disable=too-few-public-methods, attribute-defined-outside-init
class TrackStates(object):
"""
Records the time when the with-block is entered. Will add all states
that have changed since the start time to the return list when with-block
is exited.
"""
def __init__(self, hass):
self.hass = hass
self.states = []
def __enter__(self):
self.now = datetime.now()
return self.states
def __exit__(self, exc_type, exc_value, traceback):
self.states.extend(self.hass.states.get_since(self.now))
def validate_config(config, items, logger):
"""
Validates if all items are available in the configuration.
config is the general dictionary with all the configurations.
items is a dict with per domain which attributes we require.
logger is the logger from the caller to log the errors to.
Returns True if all required items were found.
"""
errors_found = False
for domain in items.keys():
config.setdefault(domain, {})
errors = [item for item in items[domain] if item not in config[domain]]
if errors:
logger.error(
"Missing required configuration items in {}: {}".format(
domain, ", ".join(errors)))
errors_found = True
return not errors_found
def config_per_platform(config, domain, logger):
"""
Generator to break a component config into different platforms.
For example, will find 'switch', 'switch 2', 'switch 3', .. etc
"""
config_key = domain
found = 1
while config_key in config:
platform_config = config[config_key]
platform_type = platform_config.get(CONF_PLATFORM)
# DEPRECATED, still supported for now.
if platform_type is None:
platform_type = platform_config.get(CONF_TYPE)
if platform_type is not None:
logger.warning((
'Please update your config for {}.{} to use "platform" '
'instead of "type"').format(domain, platform_type))
if platform_type is None:
logger.warning('No platform specified for %s', config_key)
break
yield platform_type, platform_config
found += 1
config_key = "{} {}".format(domain, found)
def platform_devices_from_config(config, domain, hass,
entity_id_format, logger):
""" Parses the config for specified domain.
Loads different platforms and retrieve domains. """
devices = []
for p_type, p_config in config_per_platform(config, domain, logger):
platform = get_component('{}.{}'.format(domain, p_type))
if platform is None:
logger.error("Unknown %s type specified: %s", domain, p_type)
else:
try:
p_devices = platform.get_devices(hass, p_config)
except AttributeError:
# DEPRECATED, still supported for now
logger.warning(
'Platform %s should migrate to use the method get_devices',
p_type)
if domain == 'light':
p_devices = platform.get_lights(hass, p_config)
elif domain == 'switch':
p_devices = platform.get_switches(hass, p_config)
else:
raise
logger.info("Found %d %s %ss", len(p_devices), p_type, domain)
devices.extend(p_devices)
# Setup entity IDs for each device
device_dict = {}
no_name_count = 0
for device in devices:
device.hass = hass
# Get the name or set to default if none given
name = device.name or DEVICE_DEFAULT_NAME
if name == DEVICE_DEFAULT_NAME:
no_name_count += 1
name = "{} {}".format(domain, no_name_count)
entity_id = generate_entity_id(
entity_id_format, name, device_dict.keys())
device.entity_id = entity_id
device_dict[entity_id] = device
return device_dict
class Device(object):
""" ABC for Home Assistant devices. """
# pylint: disable=no-self-use
hass = None
entity_id = None
@property
def should_poll(self):
"""
Return True if device has to be polled for state.
False if device pushes its state to HA.
"""
return True
@property
def unique_id(self):
""" Returns a unique id. """
return "{}.{}".format(self.__class__, id(self))
@property
def name(self):
""" Returns the name of the device. """
return self.get_name()
@property
def state(self):
""" Returns the state of the device. """
return self.get_state()
@property
def state_attributes(self):
""" Returns the state attributes. """
return {}
# DEPRECATION NOTICE:
# Device is moving from getters to properties.
# For now the new properties will call the old functions
# This will be removed in the future.
def get_name(self):
""" Returns the name of the device if any. """
return DEVICE_DEFAULT_NAME
def get_state(self):
""" Returns state of the device. """
return "Unknown"
def get_state_attributes(self):
""" Returns optional state attributes. """
return None
def update(self):
""" Retrieve latest state from the real device. """
pass
def update_ha_state(self, force_refresh=False):
"""
Updates Home Assistant with current state of device.
If force_refresh == True will update device before setting state.
"""
if self.hass is None:
raise RuntimeError("Attribute hass is None for {}".format(self))
if self.entity_id is None:
raise NoEntitySpecifiedError(
"No entity specified for device {}".format(self.name))
if force_refresh:
self.update()
attr = self.state_attributes or {}
if ATTR_FRIENDLY_NAME not in attr and self.name:
attr[ATTR_FRIENDLY_NAME] = self.name
return self.hass.states.set(self.entity_id, self.state, attr)
def __eq__(self, other):
return (isinstance(other, Device) and
other.unique_id == self.unique_id)
def __repr__(self):
return "<Device {}: {}>".format(self.name, self.state)
class ToggleDevice(Device):
""" ABC for devices that can be turned on and off. """
# pylint: disable=no-self-use
@property
def state(self):
""" Returns the state. """
return STATE_ON if self.is_on else STATE_OFF
@property
def is_on(self):
""" True if device is on. """
return False
def turn_on(self, **kwargs):
""" Turn the device on. """
pass
def turn_off(self, **kwargs):
""" Turn the device off. """
pass
|
|
#!/usr/bin/python3 -u
helptext = """IFTTT Webhook for json requests to control a windows PC.
Supported commands: wake, suspend, poweroff, poweroff_linux, reboot_linux"""
import argparse
import configparser
import pprint
pp = pprint.PrettyPrinter(width=1)
import logging
import binascii
import socket
import struct
import http.server
import socketserver
import ssl
import os.path
import cgi
import json
import time
import subprocess
parser = argparse.ArgumentParser(description=helptext)
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-m", "--mock", action="store_true",
help="""don't actually cause any actions within the network""")
parser.add_argument('command', nargs='?',
help="""command to execute directly instead of starting the server""")
parser.add_argument("-q", "--quiet", help="set logging to ERROR",
action="store_const", dest="loglevel",
const=logging.ERROR, default=logging.INFO)
parser.add_argument("-d", "--debug", help="set logging to DEBUG",
action="store_const", dest="loglevel",
const=logging.DEBUG, default=logging.INFO)
args = parser.parse_args()
config = configparser.ConfigParser()
config.read('webhook.ini', encoding='utf-8')
# WOL send_magic_packet conf
MAC = config['wake']['mac']
BROADCAST_IP = '255.255.255.255' # for config['win']['host'] comment out sock.setsockopt below
DEFAULT_PORT = int(config['wake']['port'])
logging.basicConfig(level=args.loglevel,
format='%(levelname)-8s %(message)s')
_log = logging.getLogger('webhook')
# Command dispatcher and implementations
def command(cmd):
"""Command dispatcher. Only interpret known commands, otherwise return False."""
if cmd is None:
return False
_log.info("Received command {0}".format(cmd))
if cmd == 'wake':
wake()
elif cmd == 'suspend':
suspend()
elif cmd == 'poweroff':
poweroff()
elif cmd == 'poweroff_linux':
poweroff_linux()
elif cmd == 'reboot_linux':
reboot_linux()
else:
_log.error("Unknown command {0}".format(cmd))
return False
return True
def wake():
_log.debug("Wake up {0}".format(MAC))
if not args.mock:
send_magic_packet(MAC)
def suspend():
_log.debug("Suspend {0}".format(config['win']['host']))
remote_command("psshutdown -d -t 00 -v 00",
config['win']['user'],
config['win']['host'])
def poweroff():
_log.debug("Poweroff {0}".format(config['win']['host']))
remote_command("psshutdown -k -t 00 -v 00",
config['win']['user'],
config['win']['host'])
def poweroff_linux():
_log.debug("Poweroff {0}".format(config['linux']['host']))
remote_command("sudo chvt 1 ; sudo halt",
config['linux']['user'],
config['linux']['host'])
def reboot_linux():
_log.debug("Poweroff {0}".format(config['linux']['host']))
remote_command("sudo chvt 1 ; sudo reboot",
config['linux']['user'],
config['linux']['host'])
# SSH to the Windows PC
def remote_command(cmd, remote_user, remote_host):
"""Ececute a command on the remote host."""
ssh = "su {0} -c 'ssh {1}@{2} \"{3}\" '".format(
config['webhook']['ssh_user'], remote_user, remote_host, cmd)
_log.debug(ssh)
if not args.mock:
try:
subprocess.run(ssh, shell=True, timeout=30)
except subprocess.TimeoutExpired:
pass
# WOL from https://github.com/remcohaszing/pywakeonlan/blob/master/wakeonlan.py
def create_magic_packet(macaddress):
"""
Create a magic packet.
A magic packet is a packet that can be used with the for wake on lan
protocol to wake up a computer. The packet is constructed from the
mac address given as a parameter.
Args:
macaddress (str): the mac address that should be parsed into a
magic packet.
"""
if len(macaddress) == 12:
pass
elif len(macaddress) == 17:
sep = macaddress[2]
macaddress = macaddress.replace(sep, '')
else:
raise ValueError('Incorrect MAC address format')
# Pad the synchronization stream
data = b'FFFFFFFFFFFF' + (macaddress * 16).encode()
send_data = b''
# Split up the hex values in pack
for i in range(0, len(data), 2):
send_data += struct.pack(b'B', int(data[i: i + 2], 16))
return send_data
def send_magic_packet(*macs, **kwargs):
"""
Wake up computers having any of the given mac addresses.
Wake on lan must be enabled on the host device.
Args:
macs (str): One or more macaddresses of machines to wake.
Keyword Args:
ip_address (str): the ip address of the host to send the magic packet
to (default "255.255.255.255")
port (int): the port of the host to send the magic packet to
(default 9)
"""
packets = []
ip = kwargs.pop('ip_address', BROADCAST_IP)
port = kwargs.pop('port', DEFAULT_PORT)
for k in kwargs:
raise TypeError('send_magic_packet() got an unexpected keyword '
'argument {!r}'.format(k))
for mac in macs:
packet = create_magic_packet(mac)
packets.append(packet)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# comment out for config['win']['host'] insead of 255.255.255.255:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
_log.debug("connected to {0}".format(ip))
for packet in packets:
sock.send(packet)
_log.debug("sent packet {0}".format(binascii.hexlify(packet)))
sock.close()
# HTTPS server
class Handler(http.server.BaseHTTPRequestHandler):
def sendresponse(self, code):
self.send_response(code)
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
"""Only respond with a HTML page to GET requests in verbose mode"""
if not args.verbose:
self.sendresponse(400)
return
self.sendresponse(200)
self.wfile.write("<html><head><title>IFTTT Webhook</title></head>".encode("utf-8"))
self.wfile.write("<body><p>IFTTT Webhook</p>".encode("utf-8"))
self.wfile.write("</body></html>".encode("utf-8"))
def do_POST(self):
if args.verbose:
pp.pprint(self.headers.as_string())
ctype, _ = cgi.parse_header(self.headers['content-type'])
if ctype != 'application/json':
self.sendresponse(400)
return
length = int(self.headers['content-length'])
raw = self.rfile.read(length).decode('utf-8')
msg = json.loads(raw)
if args.verbose:
pp.pprint(msg)
if msg.get('password') != config['webhook']['password']:
_log.error("Authentication failure")
time.sleep(10)
self.sendresponse(403)
return
if not command(msg.get('command')):
_log.error("Unknown command {0}".format(msg.get('command')))
self.sendresponse(400)
return
self.sendresponse(200)
def start_http():
httpd = socketserver.TCPServer(("", int(config['webhook']['https_port'])), Handler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=os.path.join(config['webhook']['ssl_dir'], config['webhook']['ssl_key']),
certfile=os.path.join(config['webhook']['ssl_dir'], config['webhook']['ssl_cert']),
server_side=True)
_log.info("Starting the webhook.py server")
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
finally:
httpd.server_close()
if __name__ == '__main__':
if args.command:
command(args.command)
else:
start_http()
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
bzr vcs support.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import email.utils # For email parsing
import dateutil.parser # Date string parsing
# first try python3, then python2
try:
from urllib.request import url2pathname
except ImportError:
from urllib2 import url2pathname
from vcstools.vcs_base import VcsClientBase, VcsError
from vcstools.common import sanitized, normalized_rel_path, \
run_shell_command, ensure_dir_notexists
def _get_bzr_version():
"""Looks up bzr version by calling bzr --version.
:raises: VcsError if bzr is not installed"""
try:
value, output, _ = run_shell_command('bzr --version',
shell=True,
us_env=True)
if value == 0 and output is not None and len(output.splitlines()) > 0:
version = output.splitlines()[0]
else:
raise VcsError("bzr --version returned %s," +
" maybe bzr is not installed" %
value)
except VcsError as e:
raise VcsError("Coud not determine whether bzr is installed: %s" % e)
return version
class BzrClient(VcsClientBase):
def __init__(self, path):
"""
:raises: VcsError if bzr not detected
"""
VcsClientBase.__init__(self, 'bzr', path)
_get_bzr_version()
@staticmethod
def get_environment_metadata():
metadict = {}
try:
metadict["version"] = _get_bzr_version()
except:
metadict["version"] = "no bzr installed"
return metadict
def get_url(self):
"""
:returns: BZR URL of the branch (output of bzr info command),
or None if it cannot be determined
"""
result = None
if self.detect_presence():
cmd = 'bzr info %s' % self._path
_, output, _ = run_shell_command(cmd, shell=True, us_env=True)
matches = [l for l in output.splitlines() if l.startswith(' parent branch: ')]
if matches:
ppath = url2pathname(matches[0][len(' parent branch: '):])
# when it can, bzr substitues absolute path for relative paths
if (ppath is not None and os.path.isdir(ppath) and not os.path.isabs(ppath)):
result = os.path.abspath(os.path.join(os.getcwd(), ppath))
else:
result = ppath
return result
def url_matches(self, url, url_or_shortcut):
if super(BzrClient, self).url_matches(url, url_or_shortcut):
return True
# if we got a shortcut (e.g. launchpad url), we compare using
# bzr info and return that one if result matches.
result = False
if url_or_shortcut is not None:
cmd = 'bzr info %s' % url_or_shortcut
value, output, _ = run_shell_command(cmd, shell=True, us_env=True)
if value == 0:
for line in output.splitlines():
sline = line.strip()
for prefix in ['shared repository: ',
'repository branch: ',
'branch root: ']:
if sline.startswith(prefix):
if super(BzrClient, self).url_matches(url, sline[len(prefix):]):
result = True
break
return result
@staticmethod
def static_detect_presence(path):
return os.path.isdir(os.path.join(path, '.bzr'))
def checkout(self, url, version=None, verbose=False,
shallow=False, timeout=None):
if url is None or url.strip() == '':
raise ValueError('Invalid empty url : "%s"' % url)
# bzr 2.5.1 fails if empty directory exists
if not ensure_dir_notexists(self.get_path()):
self.logger.error("Can't remove %s" % self.get_path())
return False
cmd = 'bzr branch'
if version:
cmd += ' -r %s' % version
cmd += ' %s %s' % (url, self._path)
value, _, msg = run_shell_command(cmd,
shell=True,
show_stdout=verbose,
verbose=verbose)
if value != 0:
if msg:
self.logger.error('%s' % msg)
return False
return True
def update(self, version='', verbose=False, timeout=None):
if not self.detect_presence():
return False
value, _, _ = run_shell_command("bzr pull",
cwd=self._path,
shell=True,
show_stdout=True,
verbose=verbose)
if value != 0:
return False
# Ignore verbose param, bzr is pretty verbose on update anyway
if version is not None and version != '':
cmd = "bzr update -r %s" % (version)
else:
cmd = "bzr update"
value, _, _ = run_shell_command(cmd,
cwd=self._path,
shell=True,
show_stdout=True,
verbose=verbose)
if value == 0:
return True
return False
def get_version(self, spec=None):
"""
:param spec: (optional) revisionspec of desired version. May
be any revisionspec as returned by 'bzr help revisionspec',
e.g. a tagname or 'revno:<number>'
:returns: the current revision number of the repository. Or if
spec is provided, the number of a revision specified by some
token.
"""
if self.detect_presence():
if spec is not None:
command = ['bzr log -r %s .' % sanitized(spec)]
_, output, _ = run_shell_command(command,
shell=True,
cwd=self._path,
us_env=True)
if output is None or output.strip() == '' or output.startswith("bzr:"):
return None
else:
matches = [l for l in output.split('\n') if l.startswith('revno: ')]
if len(matches) == 1:
return matches[0].split()[1]
else:
_, output, _ = run_shell_command('bzr revno --tree',
shell=True,
cwd=self._path,
us_env=True)
return output.strip()
def get_diff(self, basepath=None):
response = None
if basepath is None:
basepath = self._path
if self.path_exists():
rel_path = sanitized(normalized_rel_path(self._path, basepath))
command = "bzr diff %s" % rel_path
command += " -p1 --prefix %s/:%s/" % (rel_path, rel_path)
_, response, _ = run_shell_command(command, shell=True, cwd=basepath)
return response
def get_log(self, relpath=None, limit=None):
response = []
if relpath is None:
relpath = ''
# Compile regexes
id_regex = re.compile('^revno: ([0-9]+)$', flags=re.MULTILINE)
committer_regex = re.compile('^committer: (.+)$', flags=re.MULTILINE)
timestamp_regex = re.compile('^timestamp: (.+)$', flags=re.MULTILINE)
message_regex = re.compile('^ (.+)$', flags=re.MULTILINE)
if self.path_exists() and os.path.exists(os.path.join(self._path, relpath)):
# Get the log
limit_cmd = (("--limit=%d" % (int(limit))) if limit else "")
command = "bzr log %s %s" % (sanitized(relpath), limit_cmd)
return_code, text_response, stderr = run_shell_command(command, shell=True, cwd=self._path)
if return_code == 0:
revno_match = id_regex.findall(text_response)
committer_match = committer_regex.findall(text_response)
timestamp_match = timestamp_regex.findall(text_response)
message_match = message_regex.findall(text_response)
# Extract the entries
for revno, committer, timestamp, message in zip(revno_match,
committer_match,
timestamp_match,
message_match):
author, email_address = email.utils.parseaddr(committer)
date = dateutil.parser.parse(timestamp)
log_data = {'id': revno,
'author': author,
'email': email_address,
'message': message,
'date': date}
response.append(log_data)
return response
def get_status(self, basepath=None, untracked=False):
response = None
if basepath is None:
basepath = self._path
if self.path_exists():
rel_path = normalized_rel_path(self._path, basepath)
command = "bzr status %s -S" % sanitized(rel_path)
if not untracked:
command += " -V"
_, response, _ = run_shell_command(command, shell=True, cwd=basepath)
response_processed = ""
for line in response.split('\n'):
if len(line.strip()) > 0:
response_processed += line[0:4] + rel_path + '/'
response_processed += line[4:] + '\n'
response = response_processed
return response
def export_repository(self, version, basepath):
# execute the bzr export cmd
cmd = 'bzr export --format=tgz {0} '.format(basepath + '.tar.gz')
cmd += '{0}'.format(version)
result, _, _ = run_shell_command(cmd, shell=True, cwd=self._path)
if result:
return False
return True
BZRClient = BzrClient
|
|
# coding: utf-8
"""
Helpers to use OpenMath as a DSL in Python
EXAMPLES::
We first define:
>>> cd = CDBaseHelper('http://www.openmath.org/cd')
and then be able to use this as follows:
>>> one_plus_one = cd.arith1.plus(1, 1)
>>> print(one_plus_one)
OMApplication(
elem=OMSymbol(name='plus', cd='arith1', cdbase='http://www.openmath.org/cd'),
arguments=[
OMInteger(integer=1),
OMInteger(integer=1)])
The use for this becomes apparent for larger terms, such as the quadratic formula
example as found on Wikipedia:
>>> mathops = CDBaseHelper('http://www.example.com/mathops')
>>> x = om.OMVariable('x'); a = om.OMVariable('a'); b = om.OMVariable('b'); c = om.OMVariable('c')
>>> arith1 = cd.arith1; relation1 = cd.relation1; multiops = mathops.multiops
>>> quadratic_formula = relation1.eq(
... x,
... arith1.divide(
... multiops.plusminus(
... arith1.unary_minus(b),
... arith1.root(
... arith1.minus(
... arith1.power(
... b,
... 2
... ),
... arith1.times(
... 4,
... a,
... c
... )
... )
... )
... ),
... arith1.times(
... 2,
... a
... )
... )
... )
>>> print(quadratic_formula)
OMApplication(
elem=OMSymbol(name='eq', cd='relation1', cdbase='http://www.openmath.org/cd'),
arguments=[
OMVariable(name='x'),
OMApplication(
elem=OMSymbol(name='divide', cd='arith1', cdbase='http://www.openmath.org/cd'),
arguments=[
OMApplication(
elem=OMSymbol(name='plusminus', cd='multiops', cdbase='http://www.example.com/mathops'),
arguments=[
OMApplication(
elem=OMSymbol(name='unary_minus', cd='arith1', cdbase='http://www.openmath.org/cd'),
arguments=[OMVariable(name='b')]),
OMApplication(
elem=OMSymbol(name='root', cd='arith1', cdbase='http://www.openmath.org/cd'),
arguments=[OMApplication(
elem=OMSymbol(name='minus', cd='arith1', cdbase='http://www.openmath.org/cd'),
arguments=[
OMApplication(
elem=OMSymbol(name='power', cd='arith1', cdbase='http://www.openmath.org/cd'),
arguments=[
OMVariable(name='b'),
OMInteger(integer=2)]),
OMApplication(
elem=OMSymbol(name='times', cd='arith1', cdbase='http://www.openmath.org/cd'),
arguments=[
OMInteger(integer=4),
OMVariable(name='a'),
OMVariable(name='c')])])])]),
OMApplication(
elem=OMSymbol(name='times', cd='arith1', cdbase='http://www.openmath.org/cd'),
arguments=[
OMInteger(integer=2),
OMVariable(name='a')])])])
Content Dictionary Base::
>>> openmath_org = CDBaseHelper('http://www.openmath.org')
>>> openmath_org
CDBaseHelper('http://www.openmath.org', converter=None, cdhook=None, symbolhook=None)
>>> openmath_cd = openmath_org / 'cd'
>>> openmath_cd
CDBaseHelper('http://www.openmath.org/cd', converter=None, cdhook=None, symbolhook=None)
Content Dictionary::
>>> logic1 = openmath_cd.logic1
>>> logic1
CDHelper('http://www.openmath.org/cd', 'logic1', converter=None, symbolhook=None)
>>> logic2 = openmath_cd["logic1"]
>>> logic2
CDHelper('http://www.openmath.org/cd', 'logic1', converter=None, symbolhook=None)
>>> logic1.true
OMSymbol(name='true', cd='logic1', id=None, cdbase='http://www.openmath.org/cd')
>>> logic2["false"]
OMSymbol(name='false', cd='logic1', id=None, cdbase='http://www.openmath.org/cd')
"""
from . import openmath as om
from .convert import CannotConvertError
import six
import inspect
class _Helper(object):
""" Helper class used to indicate an object is a helper object """
def _toOM(self):
pass
class CDBaseHelper(_Helper):
""" Helper object pointing to a content dictionary base """
def __init__(self, cdbase, converter = None, cdhook = None, symbolhook = None):
self._ishelper = True
self._cdbase = cdbase
self._uri = cdbase
self._converter = converter
self._cdhook = cdhook
self._symbolhook = symbolhook
def __repr__(self):
""" returns a unique representation of this object """
return 'CDBaseHelper(%r, converter=%r, cdhook=%r, symbolhook=%r)' % (self._cdbase, self._converter, self._cdhook, self._symbolhook)
def __str__(self):
""" returns a human-readable representation of this object """
return 'CDBaseHelper(%s, converter=%s, cdook=%s, symbolhook=%s)' % (self._cdbase, self._converter, self._cdhook, self._symbolhook)
def __div__(self, other):
""" returns a new CDBaseHelper with other appended to the base url """
return CDBaseHelper('%s/%s' % (self._cdbase, other), self._converter, self._cdhook, self._symbolhook)
def __truediv__(self, other):
""" same as self.__div__ """
return self.__div__(other)
def __getattr__(self, name):
""" returns a CDHelper object with the given name and this as the base """
if self._cdhook is not None:
return self._cdhook(self._cdbase, name, self._converter, self._symbolhook)
return CDHelper(self._cdbase, name, self._converter, self._symbolhook)
def __getitem__(self, name):
""" same as self.__getattr__ """
return self.__getattr__(name)
def _toOM(self):
return self.__getattr__("")._toOM()
class CDHelper(_Helper):
""" Helper object pointing to a content dictionary path """
def __init__(self, cdbase, cd, converter=None, hook=None):
self._ishelper = True
self._cdbase = cdbase
self._cd = cd
self._uri = '%s?%s' % (cdbase, cd)
self._converter = converter
self._hook = hook
def __repr__(self):
""" returns a unique representation of this object """
return 'CDHelper(%r, %r, converter=%r, symbolhook=%r)' % (self._cdbase, self._cd, self._converter, self._hook)
def __str__(self):
""" returns a human-readable representation of this object """
return 'CDHelper(%s, %s, converter=%s, symbolhook=%s)' % (self._cdbase, self._cd, self._converter, self._hook)
def __getattr__(self, name):
""" returns an OpenMath Symbol with self as the content dictonary and the given name """
# if we have a hook, return whatever the hook returns instead of the symbol
if self._hook is not None:
return self._hook(name, cd, cdbase, converter)
return OMSymbol(name=name, cd=self._cd, cdbase=self._cdbase, converter=self._converter)
def __getitem__(self, name):
""" same as self.__getattr__ """
return self.__getattr__(name)
def _toOM(self):
""" Turns this object into an OpenMath symbol """
return self.__getattr__("")
def __call__(self, *args, **kwargs):
return self._toOM()(*args, **kwargs)
class WrappedHelper():
"""mixin for classes that wrap around an OM object to provide additional functionality"""
def __init__(self, obj):
self.obj = obj
def toOM(self):
return self.obj
class OMSymbol(om.OMSymbol):
def __init__(self, converter=None, **kwargs):
super(OMSymbol, self).__init__(**kwargs)
self._converter = converter
def _convert(self, term):
return convertAsOpenMath(term, self._converter)
def __call__(self, *args, **kwargs):
args = [self._convert(a) for a in args]
return self._toOM().__call__(*args, **kwargs)
def __eq__(self, other):
if isinstance(other, OMSymbol):
return self._toOM() == other._toOM()
else:
return self._toOM() == other
def _toOM(self):
return om.OMSymbol(name=self.name, cd=self.cd, id=self.id, cdbase=self.cdbase)
lambdaOM = CDBaseHelper("http://www.python.org")["lambda"] # .lambda not allowed because it's a reserved word
def interpretAsOpenMath(x):
"""tries to convert a Python object into an OpenMath object
this is not a replacement for using a Converter for exporting Python objects
instead, it is used conveniently building OM objects in DSL embedded in Python
inparticular, it converts Python functions into OMBinding objects using lambdaOM as the binder"""
if hasattr(x, "_ishelper") and x._ishelper:
# wrapped things in this class -> unwrap
return x._toOM()
elif isinstance(x, om.OMAny):
# already OM
return x
elif isinstance(x, six.integer_types):
# integers -> OMI
return om.OMInteger(x)
elif isinstance(x, float):
# floats -> OMF
return om.OMFloat(x)
elif isinstance(x, six.string_types):
# strings -> OMSTR
return om.OMString(x)
elif isinstance(x, WrappedHelper):
# wrapper -> wrapped object
return x.toOM()
elif inspect.isfunction(x):
# function -> OMBIND(lambda,...)
# get all the parameters of the function
paramMap = inspect.signature(x).parameters
params = [v for k, v in six.iteritems(paramMap)]
# make sure that all of them are positional
posArgKinds = [inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD]
if not all([p.kind in posArgKinds for p in params]):
raise CannotInterpretAsOpenMath("no sequence arguments allowed")
# call the function with appropriate OMVariables
paramsOM = [om.OMVariable(name=p.name) for p in params]
bodyOM = interpretAsOpenMath(x(*paramsOM))
return OMBinding(om.OMSymbol(name="lambda", cd="python", cdbase="http://python.org"), paramsOM, bodyOM)
else:
# fail
raise CannotInterpretAsOpenMath("unknown kind of object: " + str(x))
def convertAsOpenMath(term, converter):
""" Converts a term into OpenMath, using either a converter or the interpretAsOpenMath method """
# if we already have openmath, or have some of our magic helpers, use interpretAsOpenMath
if hasattr(term, "_ishelper") and term._ishelper or isinstance(term, om.OMAny):
return interpretAsOpenMath(term)
# next try to convert using the converter
if converter is not None:
try:
_converted = converter.to_openmath(term)
except Exception as e:
_converted = None
if isinstance(_converted, om.OMAny):
return _converted
# fallback to the openmath helper
return interpretAsOpenMath(term)
class CannotInterpretAsOpenMath(CannotConvertError):
"""thrown when an object can not be interpreted as OpenMath """
pass
__all__ = ["CDBaseHelper", "CDHelper", "WrappedHelper", "OMSymbol", "interpretAsOpenMath", "convertAsOpenMath", "CannotInterpretAsOpenMath"]
|
|
"""Module for testing date/time variables."""
import datetime
import time
class TestDateTimeVar(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.rawData = []
self.dataByKey = {}
for i in range(1, 11):
timeTuple = (2002, 12, 9, 0, 0, 0, 0, 0, -1)
timeInTicks = time.mktime(timeTuple) + i * 86400 + i * 8640
dateCol = cx_Oracle.TimestampFromTicks(int(timeInTicks))
if i % 2:
timeInTicks = time.mktime(timeTuple) + i * 86400 * 2 + \
i * 12960
nullableCol = cx_Oracle.TimestampFromTicks(int(timeInTicks))
else:
nullableCol = None
tuple = (i, dateCol, nullableCol)
self.rawData.append(tuple)
self.dataByKey[i] = tuple
def testBindDate(self):
"test binding in a date"
self.cursor.execute(u"""
select * from TestDates
where DateCol = :value""",
value = cx_Oracle.Timestamp(2002, 12, 13, 9, 36, 0))
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[4]])
def testBindDateTime(self):
"test binding in a Python 2.3 and higher date time"
self.cursor.execute(u"""
select * from TestDates
where DateCol = :value""",
value = datetime.datetime(2002, 12, 13, 9, 36, 0))
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[4]])
def testBindDateAfterString(self):
"test binding in a date after setting input sizes to a string"
self.cursor.setinputsizes(value = 15)
self.cursor.execute(u"""
select * from TestDates
where DateCol = :value""",
value = cx_Oracle.Timestamp(2002, 12, 14, 12, 0, 0))
self.failUnlessEqual(self.cursor.fetchall(), [self.dataByKey[5]])
def testBindNull(self):
"test binding in a null"
self.cursor.setinputsizes(value = cx_Oracle.DATETIME)
self.cursor.execute(u"""
select * from TestDates
where DateCol = :value""",
value = None)
self.failUnlessEqual(self.cursor.fetchall(), [])
def testBindDateArrayDirect(self):
"test binding in a date array"
returnValue = self.cursor.var(cx_Oracle.NUMBER)
array = [r[1] for r in self.rawData]
statement = u"""
begin
:returnValue := pkg_TestDateArrays.TestInArrays(
:startValue, :baseDate, :array);
end;"""
self.cursor.execute(statement,
returnValue = returnValue,
startValue = 5,
baseDate = cx_Oracle.Date(2002, 12, 12),
array = array)
self.failUnlessEqual(returnValue.getvalue(), 35.5)
array = array + array[:5]
self.cursor.execute(statement,
startValue = 7,
baseDate = cx_Oracle.Date(2002, 12, 13),
array = array)
self.failUnlessEqual(returnValue.getvalue(), 24.0)
def testBindDateArrayBySizes(self):
"test binding in a date array (with setinputsizes)"
returnValue = self.cursor.var(cx_Oracle.NUMBER)
self.cursor.setinputsizes(array = [cx_Oracle.DATETIME, 10])
array = [r[1] for r in self.rawData]
self.cursor.execute(u"""
begin
:returnValue := pkg_TestDateArrays.TestInArrays(
:startValue, :baseDate, :array);
end;""",
returnValue = returnValue,
startValue = 6,
baseDate = cx_Oracle.Date(2002, 12, 13),
array = array)
self.failUnlessEqual(returnValue.getvalue(), 26.5)
def testBindDateArrayByVar(self):
"test binding in a date array (with arrayvar)"
returnValue = self.cursor.var(cx_Oracle.NUMBER)
array = self.cursor.arrayvar(cx_Oracle.DATETIME, 10, 20)
array.setvalue(0, [r[1] for r in self.rawData])
self.cursor.execute(u"""
begin
:returnValue := pkg_TestDateArrays.TestInArrays(
:startValue, :baseDate, :array);
end;""",
returnValue = returnValue,
startValue = 7,
baseDate = cx_Oracle.Date(2002, 12, 14),
array = array)
self.failUnlessEqual(returnValue.getvalue(), 17.5)
def testBindInOutDateArrayByVar(self):
"test binding in/out a date array (with arrayvar)"
array = self.cursor.arrayvar(cx_Oracle.DATETIME, 10, 100)
originalData = [r[1] for r in self.rawData]
array.setvalue(0, originalData)
self.cursor.execute(u"""
begin
pkg_TestDateArrays.TestInOutArrays(:numElems, :array);
end;""",
numElems = 5,
array = array)
self.failUnlessEqual(array.getvalue(),
[ cx_Oracle.Timestamp(2002, 12, 17, 2, 24, 0),
cx_Oracle.Timestamp(2002, 12, 18, 4, 48, 0),
cx_Oracle.Timestamp(2002, 12, 19, 7, 12, 0),
cx_Oracle.Timestamp(2002, 12, 20, 9, 36, 0),
cx_Oracle.Timestamp(2002, 12, 21, 12, 0, 0) ] + \
originalData[5:])
def testBindOutDateArrayByVar(self):
"test binding out a date array (with arrayvar)"
array = self.cursor.arrayvar(cx_Oracle.DATETIME, 6, 100)
self.cursor.execute(u"""
begin
pkg_TestDateArrays.TestOutArrays(:numElems, :array);
end;""",
numElems = 6,
array = array)
self.failUnlessEqual(array.getvalue(),
[ cx_Oracle.Timestamp(2002, 12, 13, 4, 48, 0),
cx_Oracle.Timestamp(2002, 12, 14, 9, 36, 0),
cx_Oracle.Timestamp(2002, 12, 15, 14, 24, 0),
cx_Oracle.Timestamp(2002, 12, 16, 19, 12, 0),
cx_Oracle.Timestamp(2002, 12, 18, 0, 0, 0),
cx_Oracle.Timestamp(2002, 12, 19, 4, 48, 0) ])
def testBindOutSetInputSizes(self):
"test binding out with set input sizes defined"
vars = self.cursor.setinputsizes(value = cx_Oracle.DATETIME)
self.cursor.execute(u"""
begin
:value := to_date(20021209, 'YYYYMMDD');
end;""")
self.failUnlessEqual(vars["value"].getvalue(),
cx_Oracle.Timestamp(2002, 12, 9))
def testBindInOutSetInputSizes(self):
"test binding in/out with set input sizes defined"
vars = self.cursor.setinputsizes(value = cx_Oracle.DATETIME)
self.cursor.execute(u"""
begin
:value := :value + 5.25;
end;""",
value = cx_Oracle.Timestamp(2002, 12, 12, 10, 0, 0))
self.failUnlessEqual(vars["value"].getvalue(),
cx_Oracle.Timestamp(2002, 12, 17, 16, 0, 0))
def testBindOutVar(self):
"test binding out with cursor.var() method"
var = self.cursor.var(cx_Oracle.DATETIME)
self.cursor.execute(u"""
begin
:value := to_date('20021231 12:31:00',
'YYYYMMDD HH24:MI:SS');
end;""",
value = var)
self.failUnlessEqual(var.getvalue(),
cx_Oracle.Timestamp(2002, 12, 31, 12, 31, 0))
def testBindInOutVarDirectSet(self):
"test binding in/out with cursor.var() method"
var = self.cursor.var(cx_Oracle.DATETIME)
var.setvalue(0, cx_Oracle.Timestamp(2002, 12, 9, 6, 0, 0))
self.cursor.execute(u"""
begin
:value := :value + 5.25;
end;""",
value = var)
self.failUnlessEqual(var.getvalue(),
cx_Oracle.Timestamp(2002, 12, 14, 12, 0, 0))
def testCursorDescription(self):
"test cursor description is accurate"
self.cursor.execute(u"select * from TestDates")
self.failUnlessEqual(self.cursor.description,
[ (u'INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
(u'DATECOL', cx_Oracle.DATETIME, 23, 7, 0, 0, 0),
(u'NULLABLECOL', cx_Oracle.DATETIME, 23, 7, 0, 0, 1) ])
def testFetchAll(self):
"test that fetching all of the data returns the correct results"
self.cursor.execute(u"select * From TestDates order by IntCol")
self.failUnlessEqual(self.cursor.fetchall(), self.rawData)
self.failUnlessEqual(self.cursor.fetchall(), [])
def testFetchMany(self):
"test that fetching data in chunks returns the correct results"
self.cursor.execute(u"select * From TestDates order by IntCol")
self.failUnlessEqual(self.cursor.fetchmany(3), self.rawData[0:3])
self.failUnlessEqual(self.cursor.fetchmany(2), self.rawData[3:5])
self.failUnlessEqual(self.cursor.fetchmany(4), self.rawData[5:9])
self.failUnlessEqual(self.cursor.fetchmany(3), self.rawData[9:])
self.failUnlessEqual(self.cursor.fetchmany(3), [])
def testFetchOne(self):
"test that fetching a single row returns the correct results"
self.cursor.execute(u"""
select *
from TestDates
where IntCol in (3, 4)
order by IntCol""")
self.failUnlessEqual(self.cursor.fetchone(), self.dataByKey[3])
self.failUnlessEqual(self.cursor.fetchone(), self.dataByKey[4])
self.failUnlessEqual(self.cursor.fetchone(), None)
|
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic EC2 Resource Tag / Filters and actions
These work for the whole family of resources associated
to ec2 (subnets, vpc, security-groups, volumes, instances,
snapshots).
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from concurrent.futures import as_completed
from datetime import datetime, timedelta
from dateutil.parser import parse
from dateutil.tz import tzutc
import itertools
from c7n.actions import BaseAction as Action, AutoTagUser
from c7n.filters import Filter, OPERATORS, FilterValidationError
from c7n import utils
DEFAULT_TAG = "maid_status"
universal_tag_retry = utils.get_retry((
'Throttled',
'RequestLimitExceeded',
'Client.RequestLimitExceeded'
))
def register_ec2_tags(filters, actions):
filters.register('marked-for-op', TagActionFilter)
filters.register('tag-count', TagCountFilter)
actions.register('auto-tag-user', AutoTagUser)
actions.register('mark-for-op', TagDelayedAction)
actions.register('tag-trim', TagTrim)
actions.register('mark', Tag)
actions.register('tag', Tag)
actions.register('unmark', RemoveTag)
actions.register('untag', RemoveTag)
actions.register('remove-tag', RemoveTag)
actions.register('rename-tag', RenameTag)
actions.register('normalize-tag', NormalizeTag)
def register_universal_tags(filters, actions):
filters.register('marked-for-op', TagActionFilter)
filters.register('tag-count', TagCountFilter)
actions.register('mark', UniversalTag)
actions.register('tag', UniversalTag)
actions.register('auto-tag-user', AutoTagUser)
actions.register('mark-for-op', UniversalTagDelayedAction)
actions.register('unmark', UniversalUntag)
actions.register('untag', UniversalUntag)
actions.register('remove-tag', UniversalUntag)
def universal_augment(self, resources):
client = utils.local_session(
self.session_factory).client('resourcegroupstaggingapi')
paginator = client.get_paginator('get_resources')
resource_type = self.get_model().service
if self.get_model().type:
resource_type += ":" + self.get_model().type
resource_tag_map_list = list(itertools.chain(
*[p['ResourceTagMappingList'] for p in paginator.paginate(
ResourceTypeFilters=[resource_type])]))
resource_tag_map = {r['ResourceARN']: r for r in resource_tag_map_list}
for r in resources:
arn = self.get_arns([r])[0]
t = resource_tag_map.get(arn)
if t:
r['Tags'] = t['Tags']
return resources
def _common_tag_processer(executor_factory, batch_size, concurrency,
process_resource_set, id_key, resources, tags,
log):
with executor_factory(max_workers=concurrency) as w:
futures = []
for resource_set in utils.chunks(resources, size=batch_size):
futures.append(
w.submit(process_resource_set, resource_set, tags))
for f in as_completed(futures):
if f.exception():
log.error(
"Exception with tags: %s on resources: %s \n %s" % (
tags,
", ".join([r[id_key] for r in resource_set]),
f.exception()))
class TagTrim(Action):
"""Automatically remove tags from an ec2 resource.
EC2 Resources have a limit of 10 tags, in order to make
additional tags space on a set of resources, this action can
be used to remove enough tags to make the desired amount of
space while preserving a given set of tags.
.. code-block :: yaml
- policies:
- name: ec2-tag-trim
comment: |
Any instances with 8 or more tags get tags removed until
they match the target tag count, in this case 7 so we
that we free up a tag slot for another usage.
resource: ec2
filters:
# Filter down to resources which already have 8 tags
# as we need space for 3 more, this also ensures that
# metrics reporting is correct for the policy.
type: value
key: "[length(Tags)][0]"
op: ge
value: 8
actions:
- type: tag-trim
space: 3
preserve:
- OwnerContact
- ASV
- CMDBEnvironment
- downtime
- custodian_status
"""
max_tag_count = 50
schema = utils.type_schema(
'tag-trim',
space={'type': 'integer'},
preserve={'type': 'array', 'items': {'type': 'string'}})
permissions = ('ec2:DeleteTags',)
def process(self, resources):
self.id_key = self.manager.get_model().id
self.preserve = set(self.data.get('preserve'))
self.space = self.data.get('space', 3)
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_resource, resources))
def process_resource(self, i):
# Can't really go in batch parallel without some heuristics
# without some more complex matching wrt to grouping resources
# by common tags populations.
tag_map = {
t['Key']: t['Value'] for t in i.get('Tags', [])
if not t['Key'].startswith('aws:')}
# Space == 0 means remove all but specified
if self.space and len(tag_map) + self.space <= self.max_tag_count:
return
keys = set(tag_map)
preserve = self.preserve.intersection(keys)
candidates = keys - self.preserve
if self.space:
# Free up slots to fit
remove = len(candidates) - (
self.max_tag_count - (self.space + len(preserve)))
candidates = list(sorted(candidates))[:remove]
if not candidates:
self.log.warning(
"Could not find any candidates to trim %s" % i[self.id_key])
return
self.process_tag_removal(i, candidates)
def process_tag_removal(self, resource, tags):
client = utils.local_session(
self.manager.session_factory).client('ec2')
self.manager.retry(
client.delete_tags,
Tags=[{'Key': c} for c in tags],
Resources=[resource[self.id_key]],
DryRun=self.manager.config.dryrun)
class TagActionFilter(Filter):
"""Filter resources for tag specified future action
Filters resources by a 'custodian_status' tag which specifies a future
date for an action.
The filter parses the tag values looking for an 'op@date'
string. The date is parsed and compared to do today's date, the
filter succeeds if today's date is gte to the target date.
The optional 'skew' parameter provides for incrementing today's
date a number of days into the future. An example use case might
be sending a final notice email a few days before terminating an
instance, or snapshotting a volume prior to deletion.
.. code-block :: yaml
- policies:
- name: ec2-stop-marked
resource: ec2
filters:
- type: marked-for-op
# The default tag used is custodian_status
# but that is configurable
tag: custodian_status
op: stop
# Another optional tag is skew
actions:
- stop
"""
schema = utils.type_schema(
'marked-for-op',
tag={'type': 'string'},
skew={'type': 'number', 'minimum': 0},
op={'type': 'string'})
current_date = None
def validate(self):
op = self.data.get('op')
if self.manager and op not in self.manager.action_registry.keys():
raise FilterValidationError("Invalid marked-for-op op:%s" % op)
return self
def __call__(self, i):
tag = self.data.get('tag', DEFAULT_TAG)
op = self.data.get('op', 'stop')
skew = self.data.get('skew', 0)
v = None
for n in i.get('Tags', ()):
if n['Key'] == tag:
v = n['Value']
break
if v is None:
return False
if ':' not in v or '@' not in v:
return False
msg, tgt = v.rsplit(':', 1)
action, action_date_str = tgt.strip().split('@', 1)
if action != op:
return False
try:
action_date = parse(action_date_str)
except:
self.log.warning("could not parse tag:%s value:%s on %s" % (
tag, v, i['InstanceId']))
if self.current_date is None:
self.current_date = datetime.now()
return self.current_date >= (action_date - timedelta(skew))
class TagCountFilter(Filter):
"""Simplify tag counting..
ie. these two blocks are equivalent
.. code-block :: yaml
- filters:
- type: value
key: "[length(Tags)][0]"
op: gte
value: 8
- filters:
- type: tag-count
value: 8
"""
schema = utils.type_schema(
'tag-count',
count={'type': 'integer', 'minimum': 0},
op={'enum': list(OPERATORS.keys())})
def __call__(self, i):
count = self.data.get('count', 10)
op_name = self.data.get('op', 'gte')
op = OPERATORS.get(op_name)
tag_count = len([
t['Key'] for t in i.get('Tags', [])
if not t['Key'].startswith('aws:')])
return op(tag_count, count)
class Tag(Action):
"""Tag an ec2 resource.
"""
batch_size = 25
concurrency = 2
schema = utils.type_schema(
'tag', aliases=('mark',),
tags={'type': 'object'},
key={'type': 'string'},
value={'type': 'string'},
tag={'type': 'string'},
)
permissions = ('ec2:CreateTags',)
def validate(self):
if self.data.get('key') and self.data.get('tag'):
raise FilterValidationError(
"Can't specify both key and tag, choose one")
return self
def process(self, resources):
self.id_key = self.manager.get_model().id
# Legacy
msg = self.data.get('msg')
msg = self.data.get('value') or msg
tag = self.data.get('tag', DEFAULT_TAG)
tag = self.data.get('key') or tag
# Support setting multiple tags in a single go with a mapping
tags = self.data.get('tags')
if tags is None:
tags = []
else:
tags = [{'Key': k, 'Value': v} for k, v in tags.items()]
if msg:
tags.append({'Key': tag, 'Value': msg})
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, resource_set, tags):
client = utils.local_session(
self.manager.session_factory).client('ec2')
self.manager.retry(
client.create_tags,
Resources=[v[self.id_key] for v in resource_set],
Tags=tags,
DryRun=self.manager.config.dryrun)
class RemoveTag(Action):
"""Remove tags from ec2 resources.
"""
batch_size = 100
concurrency = 2
schema = utils.type_schema(
'untag', aliases=('unmark', 'remove-tag'),
tags={'type': 'array', 'items': {'type': 'string'}})
permissions = ('ec2:DeleteTags',)
def process(self, resources):
self.id_key = self.manager.get_model().id
tags = self.data.get('tags', [DEFAULT_TAG])
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, vol_set, tag_keys):
client = utils.local_session(
self.manager.session_factory).client('ec2')
return self.manager.retry(
client.delete_tags,
Resources=[v[self.id_key] for v in vol_set],
Tags=[{'Key': k for k in tag_keys}],
DryRun=self.manager.config.dryrun)
class RenameTag(Action):
""" Create a new tag with identical value & remove old tag
"""
schema = utils.type_schema(
'rename-tag',
old_key={'type': 'string'},
new_key={'type': 'string'})
permissions = ('ec2:CreateTags', 'ec2:DeleteTags')
tag_count_max = 50
def delete_tag(self, client, ids, key, value):
client.delete_tags(
Resources=ids,
Tags=[{'Key': key, 'Value': value}])
def create_tag(self, client, ids, key, value):
client.create_tags(
Resources=ids,
Tags=[{'Key': key, 'Value': value}])
def process_rename(self, tag_value, resource_set):
"""
Move source tag value to destination tag value
- Collect value from old tag
- Delete old tag
- Create new tag & assign stored value
"""
self.log.info("Renaming tag on %s instances" % (len(resource_set)))
old_key = self.data.get('old_key')
new_key = self.data.get('new_key')
c = utils.local_session(self.manager.session_factory).client('ec2')
# We have a preference to creating the new tag when possible first
resource_ids = [r[self.id_key] for r in resource_set if len(
r.get('Tags', [])) < self.tag_count_max]
if resource_ids:
self.create_tag(c, resource_ids, new_key, tag_value)
self.delete_tag(
c, [r[self.id_key] for r in resource_set], old_key, tag_value)
# For resources with 50 tags, we need to delete first and then create.
resource_ids = [r[self.id_key] for r in resource_set if len(
r.get('Tags', [])) > self.tag_count_max - 1]
if resource_ids:
self.create_tag(c, resource_ids, new_key, tag_value)
def create_set(self, instances):
old_key = self.data.get('old_key', None)
resource_set = {}
for r in instances:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if tags[old_key] not in resource_set:
resource_set[tags[old_key]] = []
resource_set[tags[old_key]].append(r)
return resource_set
def filter_resources(self, resources):
old_key = self.data.get('old_key', None)
res = 0
for r in resources:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if old_key not in tags.keys():
resources.pop(res)
res += 1
return resources
def process(self, resources):
count = len(resources)
resources = self.filter_resources(resources)
self.log.info(
"Filtered from %s resources to %s" % (count, len(resources)))
self.id_key = self.manager.get_model().id
resource_set = self.create_set(resources)
with self.executor_factory(max_workers=3) as w:
futures = []
for r in resource_set:
futures.append(
w.submit(self.process_rename, r, resource_set[r]))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception renaming tag set \n %s" % (
f.exception()))
return resources
class TagDelayedAction(Action):
"""Tag resources for future action.
.. code-block :: yaml
- policies:
- name: ec2-stop-marked
resource: ec2
filters:
- type: marked-for-op
# The default tag used is custodian_status
# but that is configurable
tag: custodian_status
op: stop
# Another optional tag is skew
actions:
- stop
"""
schema = utils.type_schema(
'mark-for-op',
tag={'type': 'string'},
msg={'type': 'string'},
days={'type': 'number', 'minimum': 0, 'exclusiveMinimum': True},
op={'type': 'string'})
permissions = ('ec2:CreateTags',)
batch_size = 200
concurrency = 2
default_template = 'Resource does not meet policy: {op}@{action_date}'
def validate(self):
op = self.data.get('op')
if self.manager and op not in self.manager.action_registry.keys():
raise FilterValidationError(
"mark-for-op specifies invalid op:%s" % op)
return self
def process(self, resources):
self.id_key = self.manager.get_model().id
# Move this to policy? / no resources bypasses actions?
if not len(resources):
return
msg_tmpl = self.data.get('msg', self.default_template)
op = self.data.get('op', 'stop')
tag = self.data.get('tag', DEFAULT_TAG)
date = self.data.get('days', 4)
n = datetime.now(tz=tzutc())
action_date = n + timedelta(days=date)
msg = msg_tmpl.format(
op=op, action_date=action_date.strftime('%Y/%m/%d'))
self.log.info("Tagging %d resources for %s on %s" % (
len(resources), op, action_date.strftime('%Y/%m/%d')))
tags = [{'Key': tag, 'Value': msg}]
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, resource_set, tags):
client = utils.local_session(self.manager.session_factory).client('ec2')
return self.manager.retry(
client.create_tags,
Resources=[v[self.id_key] for v in resource_set],
Tags=tags,
DryRun=self.manager.config.dryrun)
class NormalizeTag(Action):
"""Transform the value of a tag.
Set the tag value to uppercase, title, lowercase, or strip text
from a tag key.
.. code-block :: yaml
policies:
- name: ec2-service-transform-lower
resource: ec2
comment: |
ec2-service-tag-value-to-lower
query:
- instance-state-name: running
filters:
- "tag:testing8882": present
actions:
- type: normalize-tag
key: lower_key
action: lower
- name: ec2-service-strip
resource: ec2
comment: |
ec2-service-tag-strip-blah
query:
- instance-state-name: running
filters:
- "tag:testing8882": present
actions:
- type: normalize-tag
key: strip_key
action: strip
value: blah
"""
schema = utils.type_schema(
'normalize-tag',
key={'type': 'string'},
action={'type': 'string',
'items': {
'enum': ['upper', 'lower', 'title' 'strip', 'replace']}},
value={'type': 'string'})
permissions = ('ec2:CreateTags',)
def create_tag(self, client, ids, key, value):
self.manager.retry(
client.create_tags,
Resources=ids,
Tags=[{'Key': key, 'Value': value}])
def process_transform(self, tag_value, resource_set):
"""
Transform tag value
- Collect value from tag
- Transform Tag value
- Assign new value for key
"""
self.log.info("Transforming tag value on %s instances" % (
len(resource_set)))
key = self.data.get('key')
c = utils.local_session(self.manager.session_factory).client('ec2')
self.create_tag(
c,
[r[self.id_key] for r in resource_set if len(
r.get('Tags', [])) < 50],
key, tag_value)
def create_set(self, instances):
key = self.data.get('key', None)
resource_set = {}
for r in instances:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if tags[key] not in resource_set:
resource_set[tags[key]] = []
resource_set[tags[key]].append(r)
return resource_set
def filter_resources(self, resources):
key = self.data.get('key', None)
res = 0
for r in resources:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if key not in tags.keys():
resources.pop(res)
res += 1
return resources
def process(self, resources):
count = len(resources)
resources = self.filter_resources(resources)
self.log.info(
"Filtered from %s resources to %s" % (count, len(resources)))
self.id_key = self.manager.get_model().id
resource_set = self.create_set(resources)
with self.executor_factory(max_workers=3) as w:
futures = []
for r in resource_set:
action = self.data.get('action')
value = self.data.get('value')
new_value = False
if action == 'lower' and not r.islower():
new_value = r.lower()
elif action == 'upper' and not r.isupper():
new_value = r.upper()
elif action == 'title' and not r.istitle():
new_value = r.title()
elif action == 'strip' and value and value in r:
new_value = r.strip(value)
if new_value:
futures.append(
w.submit(self.process_transform, new_value, resource_set[r]))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception renaming tag set \n %s" % (
f.exception()))
return resources
class UniversalTag(Tag):
"""Applies one or more tags to the specified resources.
"""
batch_size = 20
permissions = ('resourcegroupstaggingapi:TagResources',)
def process(self, resources):
self.id_key = self.manager.get_model().id
# Legacy
msg = self.data.get('msg')
msg = self.data.get('value') or msg
tag = self.data.get('tag', DEFAULT_TAG)
tag = self.data.get('key') or tag
# Support setting multiple tags in a single go with a mapping
tags = self.data.get('tags', {})
if msg:
tags[tag] = msg
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, resource_set, tags):
client = utils.local_session(
self.manager.session_factory).client('resourcegroupstaggingapi')
arns = self.manager.get_arns(resource_set)
response = universal_tag_retry(
client.tag_resources,
ResourceARNList=arns,
Tags=tags)
for f in response.get('FailedResourcesMap', ()):
raise Exception("Resource:{} ".format(f) +
"ErrorCode:{} ".format(
response['FailedResourcesMap'][f]['ErrorCode']) +
"StatusCode:{} ".format(
response['FailedResourcesMap'][f]['StatusCode']) +
"ErrorMessage:{}".format(
response['FailedResourcesMap'][f]['ErrorMessage']))
class UniversalUntag(RemoveTag):
"""Removes the specified tags from the specified resources.
"""
batch_size = 20
permissions = ('resourcegroupstaggingapi:UntagResources',)
def process_resource_set(self, resource_set, tag_keys):
client = utils.local_session(
self.manager.session_factory).client('resourcegroupstaggingapi')
arns = self.manager.get_arns(resource_set)
response = universal_tag_retry(
client.untag_resources,
ResourceARNList=arns,
TagKeys=tag_keys)
for f in response.get('FailedResourcesMap', ()):
raise Exception("Resource:{} ".format(f) +
"ErrorCode:{} ".format(
response['FailedResourcesMap'][f]['ErrorCode']) +
"StatusCode:{} ".format(
response['FailedResourcesMap'][f]['StatusCode']) +
"ErrorMessage:{}".format(
response['FailedResourcesMap'][f]['ErrorMessage']))
class UniversalTagDelayedAction(TagDelayedAction):
"""Tag resources for future action.
:example:
.. code-block :: yaml
policies:
- name: ec2-mark-stop
resource: ec2
filters:
- type: image-age
op: ge
days: 90
actions:
- type: mark-for-op
tag: custodian_cleanup
op: terminate
days: 4
"""
batch_size = 20
concurrency = 2
permissions = ('resourcegroupstaggingapi:TagResources',)
def process(self, resources):
self.id_key = self.manager.get_model().id
# Move this to policy? / no resources bypasses actions?
if not len(resources):
return
msg_tmpl = self.data.get('msg', self.default_template)
op = self.data.get('op', 'stop')
tag = self.data.get('tag', DEFAULT_TAG)
date = self.data.get('days', 4)
n = datetime.now(tz=tzutc())
action_date = n + timedelta(days=date)
msg = msg_tmpl.format(
op=op, action_date=action_date.strftime('%Y/%m/%d'))
self.log.info("Tagging %d resources for %s on %s" % (
len(resources), op, action_date.strftime('%Y/%m/%d')))
tags = {tag: msg}
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, resource_set, tags):
client = utils.local_session(
self.manager.session_factory).client('resourcegroupstaggingapi')
arns = self.manager.get_arns(resource_set)
response = universal_tag_retry(
client.tag_resources,
ResourceARNList=arns,
Tags=tags)
for f in response.get('FailedResourcesMap', ()):
raise Exception("Resource:{} ".format(f) +
"ErrorCode:{} ".format(
response['FailedResourcesMap'][f]['ErrorCode']) +
"StatusCode:{} ".format(
response['FailedResourcesMap'][f]['StatusCode']) +
"ErrorMessage:{}".format(
response['FailedResourcesMap'][f]['ErrorMessage']))
|
|
from __future__ import division, print_function
# Multicut Pipeline implemented with luigi
# Multicut Problem Tasks
import luigi
import os
from .dataTasks import StackedRegionAdjacencyGraph
from .learningTasks import EdgeProbabilities
from .customTargets import HDF5DataTarget
from .defectHandlingTasks import ModifiedAdjacency, SkipEdgeLengths
from .pipelineParameter import PipelineParameter
from .tools import config_logger, run_decorator
import logging
import numpy as np
# import the proper nifty version
try:
import nifty
except ImportError:
try:
import nifty_with_cplex as nifty
except ImportError:
import nifty_with_gurobi as nifty
# init the workflow logger
workflow_logger = logging.getLogger(__name__)
config_logger(workflow_logger)
# get weights and uvids of the MC problem
class MulticutProblem(luigi.Task):
pathToSeg = luigi.Parameter()
# this can either contain a single path (classifier trained for xy - and z - edges jointly)
# or two paths (classfier trained for xy - edges + classifier trained for z - edges separately)
pathsToClassifier = luigi.Parameter()
keyToSeg = luigi.Parameter(default='data')
def requires(self):
return_tasks = {"edge_probabilities": EdgeProbabilities(self.pathToSeg,
self.pathsToClassifier,
self.keyToSeg),
"rag": StackedRegionAdjacencyGraph(self.pathToSeg,
self.keyToSeg)}
# TODO these should also take the key to seg !
if PipelineParameter().defectPipeline:
assert False, "Defect mode is currently not supported !"
return_tasks['modified_adjacency'] = ModifiedAdjacency(self.pathToSeg)
return_tasks['skip_edge_lengths'] = SkipEdgeLengths(self.pathToSeg)
return return_tasks
@run_decorator
def run(self):
inp = self.input()
edge_costs = inp["edge_probabilities"].read()
assert edge_costs.ndim == 1
workflow_logger.info("MulticutProblem: loaded edge probs of len %i" % len(edge_costs))
if PipelineParameter().defectPipeline:
workflow_logger.info("MulticutProblem: computing MulticutProblem for defect correction pipeline.")
if inp['modified_adjacency'].read('has_defects'):
self._modified_multicut_proplem(edge_costs)
else:
self._standard_multicut_problem(edge_costs)
else:
workflow_logger.info("MulticutProblem: computing MulticutProblem for standard pipeline.")
self._standard_multicut_problem(edge_costs)
# TODO parallelise ?!
def _probabilities_to_costs(self, edge_costs):
inp = self.input()
# scale the probabilities
# this is pretty arbitrary, it used to be 1. / n_tress, but this does not make that much sense for sklearn impl
p_min = 0.001
p_max = 1. - p_min
edge_costs = (p_max - p_min) * edge_costs + p_min
beta = PipelineParameter().multicutBeta
# probabilities to energies, second term is boundary bias
edge_costs = np.log((1. - edge_costs) / edge_costs) + np.log((1. - beta) / beta)
workflow_logger.info(
"MulticutProblem: cost statistics before weighting: mean: %f, std: %f, min: %f, max: %f" % (
np.mean(edge_costs),
np.std(edge_costs),
edge_costs.min(),
edge_costs.max()
)
)
# weight edge costs
weighting_scheme = PipelineParameter().multicutWeightingScheme
weight = PipelineParameter().multicutWeight
edgeLens = inp['rag'].readKey('edgeLengths')
if PipelineParameter().defectPipeline:
if inp["modified_adjacency"].read("has_defects"):
skipLens = inp["skip_edge_lengths"].read()
delete_edges = inp["modified_adjacency"].read("delete_edges")
edgeLens = np.delete(edgeLens, delete_edges)
edgeLens = np.concatenate([edgeLens, skipLens])
workflow_logger.info("MulticutProblem: removed delete edges and added skipLens to edgeLens")
assert edgeLens.shape[0] == edge_costs.shape[0], str(edgeLens.shape[0]) + " , " + str(edge_costs.shape[0])
if weighting_scheme == "z":
workflow_logger.info("MulticutProblem: weighting edge costs with scheme z and weight " + str(weight))
edgeTransition = inp['rag'].readKey('totalNumberOfInSliceEdges')
z_max = float(np.max(edgeLens[edgeTransition:]))
# we only weight the z edges !
w = weight * edgeLens[edgeTransition:] / z_max
edge_costs[edgeTransition:] = np.multiply(w, edge_costs[edgeTransition:])
elif weighting_scheme == "xyz":
workflow_logger.info("MulticutProblem: weighting edge costs with scheme xyz and weight " + str(weight))
edgeTransition = inp['rag'].readKey('totalNumberOfInSliceEdges')
z_max = float(np.max(edgeLens[edgeTransition:]))
xy_max = float(np.max(edgeLens[:edgeTransition]))
w_z = weight * edgeLens[edgeTransition:] / z_max
w_xy = weight * edgeLens[:edgeTransition] / xy_max
edge_costs[edgeTransition:] = np.multiply(w_z, edge_costs[edgeTransition:])
edge_costs[:edgeTransition] = np.multiply(w_xy, edge_costs[:edgeTransition])
elif weighting_scheme == "all":
workflow_logger.info("MulticutProblem: weighting edge costs with scheme all and weight " + str(weight))
edge_max = float(np.max(edgeLens))
w = weight * edgeLens / edge_max
edge_costs = np.multiply(w, edge_costs)
else:
workflow_logger.info("MulticutProblem: using non-weighted edge costs")
if weighting_scheme in ("z", "xyz", "all"):
workflow_logger.info(
"MulticutProblem: cost statistics after weighting: mean: %f, std: %f, min: %f, max: %f" % (
np.mean(edge_costs),
np.std(edge_costs),
edge_costs.min(),
edge_costs.max()
)
)
return edge_costs
def _modified_multicut_proplem(self, edge_costs):
inp = self.input()
# get the plain graph for the multicut problem, modified for
inp = self.input()
modified_adjacency = inp['modified_adjacency']
g = nifty.graph.UndirectedGraph()
g.deserialize(modified_adjacency.read('modified_adjacency'))
# transform edge costs to probabilities
edge_costs = self._probabilities_to_costs(edge_costs)
# modify the edges costs by setting the ignore edges to be maximally repulsive
ignore_edges = modified_adjacency.read('ignore_edges')
if ignore_edges.size:
max_repulsive = 2 * edge_costs.min() # TODO min correct here !?!
edge_costs[ignore_edges] = max_repulsive
# TODO we might also want to weight down the skip-edges according to their range
# skip_ranges = modified_adjacency.read('skip_ranges')
# skip_edges_begin = rag.numberOfEdges
# assert edge_costs.shape[0] - skip_edges_begin == len(skip_ranges), '%i, %i' % (
# edge_costs.shape[0] - skip_edges_begin,
# len(skip_ranges)
# )
# edge_costs[skip_edges_begin:] /= skip_ranges
assert edge_costs.shape[0] == g.numberOfEdges, "%i, %i" % (edge_costs.shape[0], g.numberOfEdges)
assert np.isfinite(edge_costs.min()), str(edge_costs.min())
assert np.isfinite(edge_costs.max()), str(edge_costs.max())
out = self.output()
out.write(edge_costs, 'costs')
out.write(g.serialize(), 'graph')
out.write(g.numberOfNodes, 'number_of_nodes')
def _standard_multicut_problem(self, edge_costs):
inp = self.input()
# construct the plain graph for the multicut problem
uv_ids = inp['rag'].readKey('uvIds')
n_vars = uv_ids.max() + 1
assert n_vars == inp['rag'].readKey('numberOfNodes')
g = nifty.graph.UndirectedGraph(int(n_vars))
g.insertEdges(uv_ids)
# transform edge costs to probabilities
edge_costs = self._probabilities_to_costs(edge_costs)
assert edge_costs.shape[0] == uv_ids.shape[0]
assert np.isfinite(edge_costs.min()), str(edge_costs.min())
assert np.isfinite(edge_costs.max()), str(edge_costs.max())
# write concatenation of uvids and edge costs
out = self.output()
assert g.numberOfEdges == edge_costs.shape[0]
out.write(g.serialize(), "graph")
out.write(edge_costs, "costs")
out.write(g.numberOfNodes, 'number_of_nodes')
def output(self):
save_path = os.path.join(
PipelineParameter().cache,
"MulticutProblem_%s.h5" % (
"modified" if PipelineParameter().defectPipeline else "standard",
)
)
return HDF5DataTarget(save_path)
|
|
import logging
import netaddr
from ryu.services.protocols.bgp.base import SUPPORTED_GLOBAL_RF
from ryu.services.protocols.bgp.model import OutgoingRoute
from ryu.services.protocols.bgp.peer import Peer
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_MULTI_EXIT_DISC
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_COMMUNITIES
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv6_UC
from ryu.lib.packet.bgp import RF_IPv4_VPN
from ryu.lib.packet.bgp import RF_IPv6_VPN
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.lib.packet.bgp import RouteTargetMembershipNLRI
from ryu.services.protocols.bgp.utils.bgp \
import clone_path_and_update_med_for_target_neighbor
LOG = logging.getLogger('bgpspeaker.core_managers.peer_manager')
class PeerManager(object):
def __init__(
self, core_service, neighbors_conf,
):
self._core_service = core_service
self._signal_bus = core_service.signal_bus
self._table_manager = core_service.table_manager
self._rt_manager = core_service.rt_manager
self._peers = {}
# Peer to RTFilter map
# Key: Peer instance
# Value: set of RTs that constitute RT filter for this peer
self._peer_to_rtfilter_map = {}
self._neighbors_conf = neighbors_conf
@property
def iterpeers(self):
return self._peers.itervalues()
def set_peer_to_rtfilter_map(self, new_map):
self._peer_to_rtfilter_map = new_map
def add_peer(self, neigh_conf, common_conf):
peer = Peer(common_conf, neigh_conf, self._core_service,
self._signal_bus, self)
self._peers[neigh_conf.ip_address] = peer
self._core_service.on_peer_added(peer)
def remove_peer(self, neigh_conf):
neigh_ip_address = neigh_conf.ip_address
peer = self._peers.get(neigh_ip_address)
peer.stop()
del self._peers[neigh_ip_address]
self._core_service.on_peer_removed(peer)
def get_by_addr(self, addr):
return self._peers.get(str(netaddr.IPAddress(addr)))
def on_peer_down(self, peer):
"""Peer down handler.
Cleans up the paths in global tables that was received from this peer.
"""
LOG.debug('Cleaning obsolete paths whose source/version: %s/%s' %
(peer.ip_address, peer.version_num))
# Launch clean-up for each global tables.
self._table_manager.clean_stale_routes(peer)
def _get_non_rtc_peers(self):
non_rtc_peer_list = set()
for peer in self._peers.itervalues():
if (peer.in_established() and
not peer.is_mpbgp_cap_valid(RF_RTC_UC)):
non_rtc_peer_list.add(peer)
return non_rtc_peer_list
def curr_peer_rtfilter(self, peer):
return self._peer_to_rtfilter_map.get(peer)
def get_peers_in_established(self):
"""Returns list of peers in established state."""
est_peers = []
for peer in self._peers.itervalues():
if peer.in_established:
est_peers.append(peer)
return est_peers
def resend_sent(self, route_family, peer):
"""For given `peer` re-send sent paths.
Parameters:
- `route-family`: (RouteFamily) of the sent paths to re-send
- `peer`: (Peer) peer for which we need to re-send sent paths
"""
if peer not in self._peers.values():
raise ValueError('Could not find given peer (%s)' % peer)
if route_family not in SUPPORTED_GLOBAL_RF:
raise ValueError(
'Given route family (%s) is not supported.' % route_family
)
# Iterate over the global table for given afi, safi and enqueue
# out-going routes.
table = self._table_manager.get_global_table_by_route_family(
route_family
)
for destination in table.itervalues():
# Check if this destination's sent - routes include this peer.
# i.e. check if this destinations was advertised and enqueue
# the path only if it was. If the current best-path has not been
# advertised before, it might already have a OutgoingRoute queued
# to be sent to the peer.
sent_routes = destination.sent_routes
if sent_routes is None or len(sent_routes) == 0:
continue
for sent_route in sent_routes:
if sent_route.sent_peer == peer:
# update med - if previously med was set per neighbor or
# wasn't set at all now it could have changed and we may
# need to set new value there
p = sent_route.path
if p.med_set_by_target_neighbor or p.get_pattr(
BGP_ATTR_TYPE_MULTI_EXIT_DISC) is None:
sent_route.path = \
clone_path_and_update_med_for_target_neighbor(
sent_route.path, peer.med
)
ogr = OutgoingRoute(sent_route.path,
for_route_refresh=True)
peer.enque_outgoing_msg(ogr)
def req_rr_to_non_rtc_peers(self, route_family):
"""Makes refresh request to all peers for given address family.
Skips making request to peer that have valid RTC capability.
"""
assert route_family != RF_RTC_UC
for peer in self._peers.itervalues():
# First check if peer is in established state
if (peer.in_established and
# Check if peer has valid capability for given address
# family
peer.is_mbgp_cap_valid(route_family) and
# Check if peer has valid capability for RTC
not peer.is_mbgp_cap_valid(RF_RTC_UC)):
peer.request_route_refresh(route_family)
def make_route_refresh_request(self, peer_ip, *route_families):
"""Request route-refresh for peer with `peer_ip` for given
`route_families`.
Will make route-refresh request for a given `route_family` only if such
capability is supported and if peer is in ESTABLISHED state. Else, such
requests are ignored. Raises appropriate error in other cases. If
`peer_ip` is equal to 'all' makes refresh request to all valid peers.
"""
LOG.debug('Route refresh requested for peer %s and route families %s'
% (peer_ip, route_families))
if not SUPPORTED_GLOBAL_RF.intersection(route_families):
raise ValueError('Given route family(s) % is not supported.' %
route_families)
peer_list = []
# If route-refresh is requested for all peers.
if peer_ip == 'all':
peer_list.extend(self.get_peers_in_established())
else:
given_peer = self._peers.get(peer_ip)
if not given_peer:
raise ValueError('Invalid/unrecognized peer %s' % peer_ip)
if not given_peer.in_established:
raise ValueError('Peer currently do not have established'
' session.')
peer_list.append(given_peer)
# Make route refresh request to valid peers.
for peer in peer_list:
peer.request_route_refresh(*route_families)
return True
def comm_all_rt_nlris(self, peer):
"""Shares/communicates current best rt_nlri paths with this peers.
Can be used to send initial updates after we have established session
with `peer` with which RTC capability is valid. Takes into account
peers RTC_AS setting and filters all RT NLRIs whose origin AS do not
match this setting.
"""
# First check if for this peer mpbgp-rtc is valid.
if not peer.is_mbgp_cap_valid(RF_RTC_UC):
return
neigh_conf = self._neighbors_conf.get_neighbor_conf(peer.ip_address)
peer_rtc_as = neigh_conf.rtc_as
# Iterate over all RT_NLRI destination communicate qualifying RT_NLRIs
rtc_table = self._table_manager.get_rtc_table()
for dest in rtc_table.itervalues():
best_path = dest.best_path
# Ignore a destination that currently does not have best path
if not best_path:
continue
# If this is a local path
if best_path.source is None:
# Check RT NLRI's origin AS matches peer RTC_AS setting
origin_as = best_path.nlri.origin_as
if origin_as == peer_rtc_as:
peer.communicate_path(best_path)
else:
# Communicate all remote RT NLRIs
peer.communicate_path(best_path)
# Also communicate EOR as per RFC
peer.enque_end_of_rib(RF_RTC_UC)
def comm_all_best_paths(self, peer):
"""Shares/communicates current best paths with this peers.
Can be used to send initial updates after we have established session
with `peer`.
"""
LOG.debug('Communicating current best path for all afi/safi except'
' 1/132')
# We will enqueue best path from all global destination.
for route_family, table in self._table_manager.iter:
if route_family == RF_RTC_UC:
continue
if peer.is_mbgp_cap_valid(route_family):
for dest in table.itervalues():
if dest.best_path:
peer.communicate_path(dest.best_path)
def comm_new_best_to_bgp_peers(self, new_best_path):
"""Communicates/enqueues given best path to be sent to all qualifying
bgp peers.
If this path came from iBGP peers, it is not sent to other iBGP peers.
If this path has community-attribute, and if settings for recognize-
well-know attributes is set, we do as per [RFC1997], and queue outgoing
route only to qualifying BGP peers.
"""
# Filter based on standard community
# If new best path has community attribute, it should be taken into
# account when sending UPDATE to peers.
comm_attr = new_best_path.get_pattr(BGP_ATTR_TYPE_COMMUNITIES)
if comm_attr:
comm_attr_na = comm_attr.has_comm_attr(
BGPPathAttributeCommunities.NO_ADVERTISE
)
# If we have NO_ADVERTISE attribute is present, we do not send
# UPDATE to any peers
if comm_attr_na:
LOG.debug('New best path has community attr. NO_ADVERTISE = %s'
'. Hence not advertising to any peer' % comm_attr_na)
return
qualified_peers = self._collect_peers_of_interest(
new_best_path
)
# Distribute new best-path to qualified peers.
for peer in qualified_peers:
peer.communicate_path(new_best_path)
def _collect_peers_of_interest(self, new_best_path):
"""Collect all peers that qualify for sharing a path with given RTs.
"""
path_rts = new_best_path.get_rts()
qualified_peers = set(self._peers.values())
# Filter out peers based on RTC_AS setting if path is for RT_NLRI
qualified_peers = self._rt_manager.filter_by_origin_as(
new_best_path, qualified_peers
)
# We continue to filter out qualified peer based on path RTs
# If new best path has RTs, we need to share this UPDATE with
# qualifying peers
if path_rts:
# We add Default_RTC_NLRI to path RTs so that we can send it to
# peers that have expressed interest in all paths
path_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT)
# All peers that do not have RTC capability qualify
qualified_peers = set(self._get_non_rtc_peers())
# Peers that have RTC capability and have common RT with the path
# also qualify
peer_to_rtfilter_map = self._peer_to_rtfilter_map
for peer, rt_filter in peer_to_rtfilter_map.iteritems():
# Ignore Network Controller (its not a BGP peer)
if peer is None:
continue
if rt_filter is None:
qualified_peers.add(peer)
elif rt_filter.intersection(path_rts):
qualified_peers.add(peer)
return qualified_peers
def schedule_rr_to_non_rtc_peers(self):
for route_family in SUPPORTED_GLOBAL_RF:
# Since we are dealing with peers that do not support RTC,
# ignore this address family
if route_family == RF_RTC_UC:
continue
self.req_rr_to_non_rtc_peers(route_family)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilites for `Model.compile`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.keras import losses as losses_mod
from tensorflow.python.keras import metrics as metrics_mod
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import util as tf_losses_utils
from tensorflow.python.util import nest
class Container(object):
"""Base Container class."""
def __init__(self, output_names=None):
self._output_names = output_names
def _build(self, y_pred):
if self._output_names is None:
# In Subclass API, output names like 'output_1' are used for
# `Metric` names.
self._output_names = create_pseudo_output_names(y_pred)
def _conform_to_outputs(self, outputs, struct):
"""Convenience method to conform `struct` to `outputs` structure.
Mappings performed:
(1) Map a dict to a list of outputs, using the output names.
(2) Fill missing keys in a dict w/ `None`s.
(3) Map a single item to all outputs.
Arguments:
outputs: Model predictions.
struct: Arbitrary nested structure (e.g. of labels, sample_weights,
losses, or metrics).
Returns:
Mapping of `struct` to `outputs` structure.
"""
struct = map_to_output_names(outputs, self._output_names, struct)
struct = map_missing_dict_keys(outputs, struct)
# Allow passing one object that applies to all outputs.
if not nest.is_sequence(struct) and nest.is_sequence(outputs):
struct = nest.map_structure(lambda _: struct, outputs)
return struct
def _maybe_broadcast_to_outputs(self, outputs, objects):
"""Determines if losses / metrics should be applied to all outputs.
NOTE: This method should only be called for Metrics / Losses, not for
y_true / sample_weight.
Arguments:
outputs: Model predictions.
objects: Arbitrary nested structure (e.g. of losses or metrics)
Returns:
Arbitrary nested structure of objects, maybe copied to each output.
Applies a Loss / Metric to all outputs.
"""
if not self._should_broadcast(objects):
return objects
# When there is more than one Model output, this is needed to keep
# each Metric / Loss separate. When there is only one Model output,
# the user-supplied object should be used.
should_copy_objects = len(nest.flatten(outputs)) > 1
def _broadcast_fn():
if should_copy_objects:
return nest.map_structure(self._copy_object, objects)
return objects
return nest.map_structure(lambda _: _broadcast_fn(), outputs)
def _should_broadcast(self, objects):
raise NotImplementedError
def _copy_object(self, obj):
raise NotImplementedError
class LossesContainer(Container):
"""A container class for losses passed to `Model.compile`."""
def __init__(self, losses, loss_weights=None, output_names=None):
super(LossesContainer, self).__init__(output_names=output_names)
# Keep user-supplied values untouched for recompiling and serialization.
self._user_losses = losses
self._user_loss_weights = loss_weights
self._losses = losses
self._loss_weights = loss_weights
self._per_output_metrics = None # Per-output losses become metrics.
self._loss_metric = metrics_mod.Mean(name='loss') # Total loss.
self._built = False
@property
def metrics(self):
"""Per-output loss metrics."""
if not self._built:
return []
per_output_metrics = [
metric_obj for metric_obj in nest.flatten(self._per_output_metrics)
if metric_obj is not None
]
return [self._loss_metric] + per_output_metrics
def _build(self, y_pred):
"""One-time setup of loss objects."""
super(LossesContainer, self)._build(y_pred)
self._losses = self._maybe_broadcast_to_outputs(y_pred, self._losses)
self._losses = self._conform_to_outputs(y_pred, self._losses)
self._losses = nest.map_structure(self._get_loss_object, self._losses)
self._losses = nest.flatten(self._losses)
self._loss_weights = self._maybe_broadcast_to_outputs(
y_pred, self._loss_weights)
self._loss_weights = self._conform_to_outputs(y_pred, self._loss_weights)
self._loss_weights = nest.flatten(self._loss_weights)
self._create_metrics()
self._built = True
def _create_metrics(self):
"""Creates per-output loss metrics, but only for multi-output Models."""
if len(self._output_names) == 1:
self._per_output_metrics = [None]
else:
self._per_output_metrics = []
for loss_obj, output_name in zip(self._losses, self._output_names):
if loss_obj is None:
self._per_output_metrics.append(None)
else:
self._per_output_metrics.append(
metrics_mod.Mean(output_name + '_loss'))
def __call__(self,
y_true,
y_pred,
sample_weight=None,
regularization_losses=None):
"""Computes the overall loss.
Arguments:
y_true: An arbitrary structure of Tensors representing the ground truth.
y_pred: An arbitrary structure of Tensors representing a Model's outputs.
sample_weight: An arbitrary structure of Tensors representing the
per-sample loss weights. If one Tensor is passed, it is used for all
losses. If multiple Tensors are passed, the structure should match
`y_pred`.
regularization_losses: Additional losses to be added to the total loss.
Returns:
Tuple of `(total_loss, per_output_loss_list)`
"""
y_true = self._conform_to_outputs(y_pred, y_true)
sample_weight = self._conform_to_outputs(y_pred, sample_weight)
if not self._built:
self._build(y_pred)
y_pred = nest.flatten(y_pred)
y_true = nest.flatten(y_true)
sample_weight = nest.flatten(sample_weight)
loss_values = [] # Used for gradient calculation.
loss_metric_values = [] # Used for loss metric calculation.
batch_dim = None
zip_args = (y_true, y_pred, sample_weight, self._losses, self._loss_weights,
self._per_output_metrics)
for y_t, y_p, sw, loss_obj, loss_weight, metric_obj in zip(*zip_args):
if y_t is None or loss_obj is None: # Ok to have no loss for an output.
continue
y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw)
sw = apply_mask(y_p, sw)
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
loss_metric_value = loss_value
# Correct for the `Mean` loss metrics counting each replica as a batch.
if loss_obj.reduction == losses_utils.ReductionV2.SUM:
loss_metric_value *= ds_context.get_strategy().num_replicas_in_sync
if batch_dim is None:
batch_dim = array_ops.shape(y_t)[0]
if metric_obj is not None:
metric_obj.update_state(loss_metric_value, sample_weight=batch_dim)
if loss_weight is not None:
loss_value *= loss_weight
loss_metric_value *= loss_weight
if (loss_obj.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE or
loss_obj.reduction == losses_utils.ReductionV2.AUTO):
loss_value = losses_utils.scale_loss_for_distribution(loss_value)
loss_values.append(loss_value)
loss_metric_values.append(loss_metric_value)
if regularization_losses:
regularization_losses = losses_utils.cast_losses_to_common_dtype(
regularization_losses)
reg_loss = math_ops.add_n(regularization_losses)
loss_metric_values.append(reg_loss)
loss_values.append(losses_utils.scale_loss_for_distribution(reg_loss))
if loss_values:
loss_metric_values = losses_utils.cast_losses_to_common_dtype(
loss_metric_values)
total_loss_metric_value = math_ops.add_n(loss_metric_values)
self._loss_metric.update_state(
total_loss_metric_value, sample_weight=batch_dim)
loss_values = losses_utils.cast_losses_to_common_dtype(loss_values)
total_loss = math_ops.add_n(loss_values)
return total_loss
else:
# Ok for a model to have no compiled loss.
return array_ops.zeros(shape=())
def _get_loss_object(self, loss):
"""Returns a `Loss` object.
Converts the user-supplied loss to a `Loss` object. Also allows
`SUM_OVER_BATCH_SIZE` reduction to be used for this loss.
Arguments:
loss: A string, function, or `Loss` object.
Returns:
A `Loss` object.
"""
if loss is None:
return None # Ok to have no loss for an output.
loss = losses_mod.get(loss)
if not isinstance(loss, losses_mod.Loss):
loss_name = loss.__name__
loss = losses_mod.LossFunctionWrapper(loss, name=loss_name)
loss._allow_sum_over_batch_size = True # pylint: disable=protected-access
return loss
def _should_broadcast(self, obj):
return not nest.is_sequence(obj)
def _copy_object(self, obj):
return obj # Losses don't need to be copied.
class MetricsContainer(Container):
"""A container class for metrics passed to `Model.compile`."""
def __init__(self, metrics=None, weighted_metrics=None, output_names=None):
super(MetricsContainer, self).__init__(output_names=output_names)
# Keep user-supplied values untouched for recompiling and serialization.
self._user_metrics = metrics
self._user_weighted_metrics = weighted_metrics
self._metrics = metrics
self._weighted_metrics = weighted_metrics
self._built = False
@property
def metrics(self):
"""Metrics created by this container."""
if not self._built:
return []
return self._metrics_in_order
def _build(self, y_pred, y_true):
"""One-time setup of metric objects."""
super(MetricsContainer, self)._build(y_pred)
self._metrics = self._maybe_broadcast_to_outputs(y_pred, self._metrics)
self._metrics = self._conform_to_outputs(y_pred, self._metrics)
self._weighted_metrics = self._maybe_broadcast_to_outputs(
y_pred, self._weighted_metrics)
self._weighted_metrics = self._conform_to_outputs(y_pred,
self._weighted_metrics)
# Standardize on tuple since `tf.data` turns lists into `Tensor`s.
y_pred = nest.list_to_tuple(y_pred)
y_true = nest.list_to_tuple(y_true)
self._metrics = nest.list_to_tuple(self._metrics)
self._weighted_metrics = nest.list_to_tuple(self._weighted_metrics)
# Convert to `Metric` objects, potentially disambiguating based on output
# properties.
self._metrics = nest.map_structure_up_to(y_pred, self._get_metric_objects,
self._metrics, y_true, y_pred)
self._weighted_metrics = nest.map_structure_up_to(y_pred,
self._get_metric_objects,
self._weighted_metrics,
y_true, y_pred)
self._metrics = nest.flatten_up_to(y_pred, self._metrics, check_types=False)
self._weighted_metrics = nest.flatten_up_to(
y_pred, self._weighted_metrics, check_types=False)
# Assumes metrics, weighted_metrics have been flattened up to outputs.
self._set_metric_names()
self._create_ordered_metrics()
self._built = True
def _set_metric_names(self):
"""Sets unique metric names."""
# For multi-output models, prepend the output name to the metric name.
# For weighted metrics, prepend "weighted_" if the name would be non-unique.
# pylint: disable=protected-access
metric_names = set()
is_multi_output = len(self._output_names) > 1
zip_args = (self._output_names, self._metrics, self._weighted_metrics)
for output_name, output_metrics, weighted_output_metrics in zip(*zip_args):
for m in output_metrics:
if m is None:
continue
if is_multi_output:
m._name = output_name + '_' + m._name
if m._name in metric_names:
raise ValueError('Found two metrics with the same name: {}'.format(
m._name))
metric_names.add(m._name)
for wm in weighted_output_metrics:
if wm is None:
continue
if is_multi_output:
if output_name + '_' + wm._name in metric_names:
wm._name = output_name + '_weighted_' + wm._name
else:
wm._name = output_name + '_' + wm._name
elif wm._name in metric_names:
wm._name = 'weighted_' + wm._name
if wm._name in metric_names:
raise ValueError('Found two metrics with the same name: {}'.format(
wm._name))
metric_names.add(wm._name)
# pylint: enable=protected-access
def _create_ordered_metrics(self):
"""Cache the flat order needed when returning metrics, for backwards compat."""
self._metrics_in_order = []
for output_metrics, output_weighted_metrics in zip(self._metrics,
self._weighted_metrics):
for m in nest.flatten(output_metrics):
if m is not None:
self._metrics_in_order.append(m)
for wm in nest.flatten(output_weighted_metrics):
if wm is not None:
self._metrics_in_order.append(wm)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Updates the state of per-output metrics."""
y_true = self._conform_to_outputs(y_pred, y_true)
sample_weight = self._conform_to_outputs(y_pred, sample_weight)
if not self._built:
self._build(y_pred, y_true)
y_pred = nest.flatten(y_pred)
y_true = nest.flatten(y_true) if y_true is not None else []
sample_weight = nest.flatten(sample_weight)
zip_args = (y_true, y_pred, sample_weight, self._metrics,
self._weighted_metrics)
for y_t, y_p, sw, metric_objs, weighted_metric_objs in zip(*zip_args):
# Ok to have no metrics for an output.
if (y_t is None or (all(m is None for m in metric_objs) and
all(wm is None for wm in weighted_metric_objs))):
continue
y_t, y_p, sw = match_dtype_and_rank(y_t, y_p, sw)
sw = apply_mask(y_p, sw)
for metric_obj in metric_objs:
if metric_obj is None:
continue
metric_obj.update_state(y_t, y_p)
for weighted_metric_obj in weighted_metric_objs:
if weighted_metric_obj is None:
continue
weighted_metric_obj.update_state(y_t, y_p, sample_weight=sw)
def _get_metric_objects(self, metrics, y_t, y_p):
"""Convert user-supplied metrics to `Metric` objects."""
metrics = nest.flatten(metrics)
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
def _get_metric_object(self, metric, y_t, y_p):
"""Converts user-supplied metric to a `Metric` object.
Arguments:
metric: A string, function, or `Metric` object.
y_t: Sample of label.
y_p: Sample of output.
Returns:
A `Metric` object.
"""
if metric is None:
return None # Ok to have no metric for an output.
# Convenience feature for selecting b/t binary, categorical,
# and sparse categorical.
if metric not in ['accuracy', 'acc', 'crossentropy', 'ce']:
metric_obj = metrics_mod.get(metric)
else:
y_t_rank = len(y_t.shape.as_list())
y_p_rank = len(y_p.shape.as_list())
y_t_last_dim = y_t.shape.as_list()[-1]
y_p_last_dim = y_p.shape.as_list()[-1]
is_binary = y_p_last_dim == 1
is_sparse_categorical = (
y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1)
if metric in ['accuracy', 'acc']:
if is_binary:
metric_obj = metrics_mod.binary_accuracy
elif is_sparse_categorical:
metric_obj = metrics_mod.sparse_categorical_accuracy
else:
metric_obj = metrics_mod.categorical_accuracy
else:
if is_binary:
metric_obj = metrics_mod.binary_crossentropy
elif is_sparse_categorical:
metric_obj = metrics_mod.sparse_categorical_crossentropy
else:
metric_obj = metrics_mod.categorical_crossentropy
if not isinstance(metric_obj, metrics_mod.Metric):
if isinstance(metric, six.string_types):
metric_name = metric
elif hasattr(metric, 'name'):
metric_name = metric.name # TODO(omalleyt): Is this needed?
else:
# function was passed.
metric_name = metric.__name__
metric_obj = metrics_mod.MeanMetricWrapper(metric_obj, name=metric_name)
return metric_obj
def _should_broadcast(self, obj):
# e.g. 'mse'.
if not nest.is_sequence(obj):
return True
# e.g. ['mse'] or ['mse', 'mae'].
return (isinstance(obj, (list, tuple)) and
not any(nest.is_sequence(o) for o in obj))
def _copy_object(self, obj):
if isinstance(obj, metrics_mod.Metric):
return obj.__class__.from_config(obj.get_config())
return obj # Can be a function or `None`.
def create_pseudo_output_names(outputs):
"""Create pseudo output names for a subclassed Model."""
return _create_pseudo_names(outputs, prefix='output_')
def create_pseudo_input_names(inputs):
"""Create pseudo input names for a subclassed Model."""
return _create_pseudo_names(inputs, prefix='input_')
def _create_pseudo_names(tensors, prefix):
"""Creates pseudo {input | output} names for subclassed Models.
Warning: this function should only be used to define default
names for `Metics` and `SavedModel`. No other use cases should
rely on a `Model`'s input or output names.
Example with dict:
`{'a': [x1, x2], 'b': x3}` becomes:
`['a_1', 'a_2', 'b']`
Example with list:
`[x, y]` becomes:
`['output_1', 'output_2']`
Arguments:
tensors: `Model`'s outputs or inputs.
prefix: 'output_' for outputs, 'input_' for inputs.
Returns:
Flattened list of pseudo names.
"""
def one_index(ele):
# Start with "output_1" instead of "output_0".
if isinstance(ele, int):
return ele + 1
return ele
flat_paths = list(nest.yield_flat_paths(tensors))
flat_paths = nest.map_structure(one_index, flat_paths)
names = []
for path in flat_paths:
if not path:
name = prefix + '1' # Single output.
else:
name = '_'.join(str(p) for p in path)
if isinstance(path[0], int):
name = prefix + name
names.append(name)
return names
def map_to_output_names(y_pred, output_names, struct):
"""Maps a dict to a list using `output_names` as keys.
This is a convenience feature only. When a `Model`'s outputs
are a list, you can specify per-output losses and metrics as
a dict, where the keys are the output names. If you specify
per-output losses and metrics via the same structure as the
`Model`'s outputs (recommended), no mapping is performed.
For the Functional API, the output names are the names of the
last layer of each output. For the Subclass API, the output names
are determined by `create_pseudo_output_names` (For example:
`['output_1', 'output_2']` for a list of outputs).
This mapping preserves backwards compatibility for `compile` and
`fit`.
Arguments:
y_pred: Sample outputs of the Model, to determine if this convenience
feature should be applied (`struct` is returned unmodified if `y_pred`
isn't a flat list).
output_names: List. The names of the outputs of the Model.
struct: The structure to map.
Returns:
`struct` mapped to a list in same order as `output_names`.
"""
single_output = not nest.is_sequence(y_pred)
outputs_are_flat_list = (not single_output and
isinstance(y_pred, (list, tuple)) and
not any(nest.is_sequence(y_p) for y_p in y_pred))
if (single_output or outputs_are_flat_list) and isinstance(struct, dict):
output_names = output_names or create_pseudo_output_names(y_pred)
struct = copy.copy(struct)
new_struct = [struct.pop(name, None) for name in output_names]
if struct:
raise ValueError('Found unexpected keys that do not correspond '
'to any Model output: {}. Expected: {}'.format(
struct.keys(), output_names))
if len(new_struct) == 1:
return new_struct[0]
return new_struct
else:
return struct
def map_missing_dict_keys(y_pred, struct):
"""Replaces missing dict keys in `struct` with `None` placeholders."""
if not isinstance(y_pred, dict) or not isinstance(struct, dict):
return struct
for k in y_pred.keys():
if k not in struct:
struct[k] = None
return struct
def match_dtype_and_rank(y_t, y_p, sw):
"""Match dtype and rank of predictions."""
if y_t.shape.rank == 1 and y_p.shape.rank == 2:
y_t = array_ops.expand_dims_v2(y_t, axis=-1)
if sw is not None:
if sw.shape.rank == 1 and y_p.shape.rank == 2:
sw = array_ops.expand_dims_v2(sw, axis=-1)
# Dtype.
y_t = math_ops.cast(y_t, y_p.dtype)
if sw is not None:
sw = math_ops.cast(sw, y_p.dtype)
return y_t, y_p, sw
def apply_mask(y_p, sw):
"""Applies any mask on predictions to sample weights."""
# Handle Keras mask on outputs.
mask = getattr(y_p, '_keras_mask', None)
if mask is not None:
mask = math_ops.cast(mask, y_p.dtype)
if sw is not None:
mask, _, sw = (
tf_losses_utils.squeeze_or_expand_dimensions(mask, sample_weight=sw))
sw *= mask
else:
sw = mask
return sw
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_set_service_properties_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-08-01"
file_services_name = "default"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"FileServicesName": _SERIALIZER.url("file_services_name", file_services_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_service_properties_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01"
file_services_name = "default"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"FileServicesName": _SERIALIZER.url("file_services_name", file_services_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class FileServicesOperations(object):
"""FileServicesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.FileServiceItems":
"""List all file services in storage accounts.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceItems, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_08_01.models.FileServiceItems
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServiceItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileServiceItems', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices'} # type: ignore
@distributed_trace
def set_service_properties(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.FileServiceProperties",
**kwargs: Any
) -> "_models.FileServiceProperties":
"""Sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The properties of file services in storage accounts, including CORS
(Cross-Origin Resource Sharing) rules.
:type parameters: ~azure.mgmt.storage.v2021_08_01.models.FileServiceProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_08_01.models.FileServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'FileServiceProperties')
request = build_set_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.set_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}'} # type: ignore
@distributed_trace
def get_service_properties(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.FileServiceProperties":
"""Gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_08_01.models.FileServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.get_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}'} # type: ignore
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional, Any
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.compute.base import VolumeSnapshot
from libcloud.utils.py3 import httplib
__all__ = [
"API_HOST",
"VultrConnection",
"VultrException",
"VultrResponse",
"DEFAULT_API_VERSION",
"VultrResponseV2",
"VultrConnectionV2",
"VultrNetwork",
"VultrNodeSnapshot",
]
# Endpoint for the Vultr API
API_HOST = "api.vultr.com"
DEFAULT_API_VERSION = "2"
class VultrResponse(JsonResponse):
objects = None
error_dict = {} # type: Dict[str, str]
errors = None
ERROR_CODE_MAP = {
400: "Invalid API location. Check the URL that you are using.",
403: "Invalid or missing API key. Check that your API key is present"
+ " and matches your assigned key.",
405: "Invalid HTTP method. Check that the method (POST|GET) matches"
+ " what the documentation indicates.",
412: "Request failed. Check the response body for a more detailed"
+ " description.",
500: "Internal server error. Try again at a later time.",
503: "Rate limit hit. API requests are limited to an average of 1/s."
+ " Try your request again later.",
}
def __init__(self, response, connection):
self.errors = []
super(VultrResponse, self).__init__(response=response, connection=connection)
self.objects, self.errors = self.parse_body_and_errors()
if not self.success():
raise self._make_excp(self.errors[0])
def parse_body_and_errors(self):
"""
Returns JSON data in a python list.
"""
json_objects = []
errors = []
if self.status in self.ERROR_CODE_MAP:
self.error_dict["ERRORCODE"] = self.status
self.error_dict["ERRORMESSAGE"] = self.ERROR_CODE_MAP[self.status]
errors.append(self.error_dict)
js = super(VultrResponse, self).parse_body()
if isinstance(js, dict):
js = [js]
json_objects.append(js)
return (json_objects, errors)
def _make_excp(self, error):
"""
Convert API error to a VultrException instance
"""
return VultrException(error["ERRORCODE"], error["ERRORMESSAGE"])
def success(self):
return len(self.errors) == 0
class VultrConnection(ConnectionKey):
"""
A connection to the Vultr API
"""
host = API_HOST
responseCls = VultrResponse
def add_default_params(self, params):
"""
Returns default params such as api_key which is
needed to perform an action.Returns a dictionary.
Example:/v1/server/upgrade_plan?api_key=self.key
"""
params["api_key"] = self.key
return params
def add_default_headers(self, headers):
"""
Returns default headers such as content-type.
Returns a dictionary.
"""
headers["Content-Type"] = "application/x-www-form-urlencoded"
headers["Accept"] = "text/plain"
return headers
def set_path(self):
self.path = "/v/"
return self.path
class VultrResponseV2(JsonResponse):
valid_response_codes = [
httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.NO_CONTENT,
]
def parse_error(self):
"""
Parse the error body and raise the appropriate exception
"""
status = self.status
data = self.parse_body()
error_msg = data.get("error", "")
raise VultrException(code=status, message=error_msg)
def success(self):
"""Check the response for success
:return: ``bool`` indicating a successful request
"""
return self.status in self.valid_response_codes
class VultrConnectionV2(ConnectionKey):
"""
A connection to the Vultr API v2
"""
host = API_HOST
responseCls = VultrResponseV2
def add_default_headers(self, headers):
headers["Authorization"] = "Bearer %s" % (self.key)
headers["Content-Type"] = "application/json"
return headers
def add_default_params(self, params):
params["per_page"] = 500
return params
class VultrException(Exception):
"""
Error originating from the Vultr API
"""
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return "(%u) %s" % (self.code, self.message)
def __repr__(self):
return "VultrException code %u '%s'" % (self.code, self.message)
class VultrNetwork:
"""
Represents information about a Vultr private network.
"""
def __init__(
self,
id: str,
cidr_block: str,
location: str,
extra: Optional[Dict[str, Any]] = None,
) -> None:
self.id = id
self.cidr_block = cidr_block
self.location = location
self.extra = extra or {}
def __repr__(self):
return "<Vultrnetwork: id=%s cidr_block=%s location=%s>" % (
self.id,
self.cidr_block,
self.location,
)
class VultrNodeSnapshot(VolumeSnapshot):
def __repr__(self):
return "<VultrNodeSnapshot id=%s size=%s driver=%s state=%s>" % (
self.id,
self.size,
self.driver.name,
self.state,
)
|
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for decode_proto op."""
# Python3 preparedness imports.
import itertools
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.kernel_tests.proto import proto_op_test_base as test_base
from tensorflow.python.kernel_tests.proto import test_example_pb2
class DecodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
"""Base class for testing proto decoding ops."""
def __init__(self, decode_module, methodName='runTest'): # pylint: disable=invalid-name
"""DecodeProtoOpTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(DecodeProtoOpTestBase, self).__init__(methodName)
self._decode_module = decode_module
def _compareValues(self, fd, vs, evs):
"""Compare lists/arrays of field values."""
if len(vs) != len(evs):
self.fail('Field %s decoded %d outputs, expected %d' %
(fd.name, len(vs), len(evs)))
for i, ev in enumerate(evs):
# Special case fuzzy match for float32. TensorFlow seems to mess with
# MAX_FLT slightly and the test doesn't work otherwise.
# TODO(nix): ask on TF list about why MAX_FLT doesn't pass through.
if fd.cpp_type == fd.CPPTYPE_FLOAT:
# Numpy isclose() is better than assertIsClose() which uses an absolute
# value comparison.
self.assertTrue(
np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i]))
elif fd.cpp_type == fd.CPPTYPE_STRING:
# In Python3 string tensor values will be represented as bytes, so we
# reencode the proto values to match that.
self.assertEqual(vs[i], ev.encode('ascii'))
else:
# Doubles and other types pass through unscathed.
self.assertEqual(vs[i], ev)
def _compareProtos(self, batch_shape, sizes, fields, field_dict):
"""Compare protos of type TestValue.
Args:
batch_shape: the shape of the input tensor of serialized messages.
sizes: int matrix of repeat counts returned by decode_proto
fields: list of test_example_pb2.FieldSpec (types and expected values)
field_dict: map from field names to decoded numpy tensors of values
"""
# Check that expected values match.
for field in fields:
values = field_dict[field.name]
self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype)
if 'ext_value' in field.name:
fd = test_example_pb2.PrimitiveValue()
else:
fd = field.value.DESCRIPTOR.fields_by_name[field.name]
# Values has the same shape as the input plus an extra
# dimension for repeats.
self.assertEqual(list(values.shape)[:-1], batch_shape)
# Nested messages are represented as TF strings, requiring
# some special handling.
if field.name == 'message_value' or 'ext_value' in field.name:
vs = []
for buf in values.flat:
msg = test_example_pb2.PrimitiveValue()
msg.ParseFromString(buf)
vs.append(msg)
if 'ext_value' in field.name:
evs = field.value.Extensions[test_example_pb2.ext_value]
else:
evs = getattr(field.value, field.name)
if len(vs) != len(evs):
self.fail('Field %s decoded %d outputs, expected %d' %
(fd.name, len(vs), len(evs)))
for v, ev in zip(vs, evs):
self.assertEqual(v, ev)
continue
tf_type_to_primitive_value_field = {
dtypes.bool:
'bool_value',
dtypes.float32:
'float_value',
dtypes.float64:
'double_value',
dtypes.int8:
'int8_value',
dtypes.int32:
'int32_value',
dtypes.int64:
'int64_value',
dtypes.string:
'string_value',
dtypes.uint8:
'uint8_value',
dtypes.uint32:
'uint32_value',
dtypes.uint64:
'uint64_value',
}
if field.name in ['enum_value', 'enum_value_with_default']:
tf_field_name = 'enum_value'
else:
tf_field_name = tf_type_to_primitive_value_field.get(field.dtype)
if tf_field_name is None:
self.fail('Unhandled tensorflow type %d' % field.dtype)
self._compareValues(fd, values.flat,
getattr(field.value, tf_field_name))
def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch,
message_type, message_format, sanitize,
force_disordered=False):
"""Run decode tests on a batch of messages.
Args:
fields: list of test_example_pb2.FieldSpec (types and expected values)
case_sizes: expected sizes array
batch_shape: the shape of the input tensor of serialized messages
batch: list of serialized messages
message_type: descriptor name for messages
message_format: format of messages, 'text' or 'binary'
sanitize: whether to sanitize binary protobuf inputs
force_disordered: whether to force fields encoded out of order.
"""
if force_disordered:
# Exercise code path that handles out-of-order fields by prepending extra
# fields with tag numbers higher than any real field. Note that this won't
# work with sanitization because that forces reserialization using a
# trusted decoder and encoder.
assert not sanitize
extra_fields = test_example_pb2.ExtraFields()
extra_fields.string_value = 'IGNORE ME'
extra_fields.bool_value = False
extra_msg = extra_fields.SerializeToString()
batch = [extra_msg + msg for msg in batch]
# Numpy silently truncates the strings if you don't specify dtype=object.
batch = np.array(batch, dtype=object)
batch = np.reshape(batch, batch_shape)
field_names = [f.name for f in fields]
output_types = [f.dtype for f in fields]
with self.cached_session() as sess:
sizes, vtensor = self._decode_module.decode_proto(
batch,
message_type=message_type,
field_names=field_names,
output_types=output_types,
message_format=message_format,
sanitize=sanitize)
vlist = sess.run([sizes] + vtensor)
sizes = vlist[0]
# Values is a list of tensors, one for each field.
value_tensors = vlist[1:]
# Check that the repeat sizes are correct.
self.assertTrue(
np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
# Check that the decoded sizes match the expected sizes.
self.assertEqual(len(sizes.flat), len(case_sizes))
self.assertTrue(
np.all(sizes.flat == np.array(
case_sizes, dtype=np.int32)))
field_dict = dict(zip(field_names, value_tensors))
self._compareProtos(batch_shape, sizes, fields, field_dict)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testBinary(self, case):
batch = [value.SerializeToString() for value in case.values]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
batch,
'tensorflow.contrib.proto.TestValue',
'binary',
sanitize=False)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testBinaryDisordered(self, case):
batch = [value.SerializeToString() for value in case.values]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
batch,
'tensorflow.contrib.proto.TestValue',
'binary',
sanitize=False,
force_disordered=True)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testPacked(self, case):
# Now try with the packed serialization.
#
# We test the packed representations by loading the same test case using
# PackedTestValue instead of TestValue. To do this we rely on the text
# format being the same for packed and unpacked fields, and reparse the
# test message using the packed version of the proto.
packed_batch = [
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_format.Parse(
text_format.MessageToString(value, float_format='.17g'),
test_example_pb2.PackedTestValue()).SerializeToString()
for value in case.values
]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
packed_batch,
'tensorflow.contrib.proto.PackedTestValue',
'binary',
sanitize=False)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testText(self, case):
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_batch = [
text_format.MessageToString(
value, float_format='.17g') for value in case.values
]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
text_batch,
'tensorflow.contrib.proto.TestValue',
'text',
sanitize=False)
@parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
def testSanitizerGood(self, case):
batch = [value.SerializeToString() for value in case.values]
self._runDecodeProtoTests(
case.fields,
case.sizes,
list(case.shapes),
batch,
'tensorflow.contrib.proto.TestValue',
'binary',
sanitize=True)
@parameterized.parameters((False), (True))
def testCorruptProtobuf(self, sanitize):
corrupt_proto = 'This is not a binary protobuf'
# Numpy silently truncates the strings if you don't specify dtype=object.
batch = np.array(corrupt_proto, dtype=object)
msg_type = 'tensorflow.contrib.proto.TestCase'
field_names = ['sizes']
field_types = [dtypes.int32]
with self.assertRaisesRegexp(
errors.DataLossError, 'Unable to parse binary protobuf'
'|Failed to consume entire buffer'):
self.evaluate(
self._decode_module.decode_proto(
batch,
message_type=msg_type,
field_names=field_names,
output_types=field_types,
sanitize=sanitize))
def testOutOfOrderRepeated(self):
fragments = [
test_example_pb2.TestValue(double_value=[1.0]).SerializeToString(),
test_example_pb2.TestValue(
message_value=[test_example_pb2.PrimitiveValue(
string_value='abc')]).SerializeToString(),
test_example_pb2.TestValue(
message_value=[test_example_pb2.PrimitiveValue(
string_value='def')]).SerializeToString()
]
all_fields_to_parse = ['double_value', 'message_value']
field_types = {
'double_value': dtypes.double,
'message_value': dtypes.string,
}
# Test against all 3! permutations of fragments, and for each permutation
# test parsing all possible combination of 2 fields.
for indices in itertools.permutations(range(len(fragments))):
proto = b''.join(fragments[i] for i in indices)
for i in indices:
if i == 1:
expected_message_values = [
test_example_pb2.PrimitiveValue(
string_value='abc').SerializeToString(),
test_example_pb2.PrimitiveValue(
string_value='def').SerializeToString(),
]
break
if i == 2:
expected_message_values = [
test_example_pb2.PrimitiveValue(
string_value='def').SerializeToString(),
test_example_pb2.PrimitiveValue(
string_value='abc').SerializeToString(),
]
break
expected_field_values = {
'double_value': [[1.0]],
'message_value': [expected_message_values],
}
for num_fields_to_parse in range(len(all_fields_to_parse)):
for comb in itertools.combinations(
all_fields_to_parse, num_fields_to_parse):
parsed_values = self.evaluate(
self._decode_module.decode_proto(
[proto],
message_type='tensorflow.contrib.proto.TestValue',
field_names=comb,
output_types=[field_types[f] for f in comb],
sanitize=False)).values
self.assertLen(parsed_values, len(comb))
for field_name, parsed in zip(comb, parsed_values):
self.assertAllEqual(parsed, expected_field_values[field_name],
'perm: {}, comb: {}'.format(indices, comb))
|
|
from panda3d.core import *
from panda3d.direct import *
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
from toontown.launcher import DownloadForceAcknowledge
import string
import random
from toontown.toonbase import ToontownGlobals
from toontown.hood import ZoneUtil
class HoodMgr(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('HoodMgr')
ToontownCentralInitialDropPoints = (
[65.860, 142.571, 2.526, 790.259, 0, 0],
[57.473, 125.268, 2.526, 756.419, 0, 0],
[45.851, 119.799, 2.526, 752.187, 0, 0],
[32.971, 116.496, 2.526, 735.172, 0, 0],
[22.488, 116.245, 2.526, 719.057, 0, 0],
[35.136, 132.832, 2.526, 746.570, 0, 0]
)
ToontownCentralHQDropPoints = (
[12.915, -24.938, 4.025, 256.087, 0, 0],
[19.111, -16.684, 4.025, 230.295, 0, 0],
[44.362, -5.827, 4.025, 183.660, 0, 0],
[75.218, -62.574, 4.025, 69.765, 0, 0],
[64.095, -98.374, 2.525, 8.584, 0, 0]
)
ToontownCentralTunnelDropPoints = (
[43.086, -134.343, 2.525, 153.905, 0, 0],
[30.529, -132.724, 2.525, 174.786, 0, 0],
[-112.550, -16.980, -0.539, 55.739, 0, 0],
[-113.516, 5.141, -0.332, 86.807, 0, 0],
[-127.868, 19.685, 0.025, 124.113, 0, 0],
[51.805, 1.551, 4.025, -94.668, 0, 0]
)
dropPoints = {
ToontownGlobals.DonaldsDock: (
[-28.0, -2.5, 5.8, 120.0, 0.0, 0.0],
[-22, 13, 5.8, 155.6, 0.0, 0.0],
[67, 47, 5.7, 134.7, 0.0, 0.0],
[62, 19, 5.7, 97.0, 0.0, 0.0],
[66, -27, 5.7, 80.5, 0.0, 0.0],
[-114, -7, 5.7, -97.0, -0.0, 0.0],
[-108, 36, 5.7, -153.8, -0.0, 0.0],
[-116, -46, 5.7, -70.1, -0.0, 0.0],
[-63, -79, 5.7, -41.2, -0.0, 0.0],
[-2, -79, 5.7, 57.4, -0.0, 0.0],
[-38, -78, 5.7, 9.1, -0.0, 0.0]
),
ToontownGlobals.ToontownCentral: (
[29.852, -24.213, 4.025, -144.876, 0, 0],
[47.767, 8.561, 4.025, 84.711, 0, 0],
[15.510, 0.600, 4.025, 95.333, 0, 0],
[47.125, 131.393, 2.525, 45.557, 0, 0],
[-36.086, 72.705, 0.025, 89.292, 0, 0],
[-121.765, 3.269, 0.025, 253.862, 0, 0],
[-93.421, -5.947, -0.998, 259.667, 0, 0],
[-62.664, -10.416, 1.227, 259.667, 0, 0],
[-67.422, -89.830, 0.525, 2.817, 0, 0],
[24.879, -136.027, 2.525, -119.153, 0, 0],
[105.006, -128.606, 4.051, 39.054, 0, 0],
[85.233, -59.504, 4.025, 48.868, 0, 0],
[63.280, -34.097, 4.025, 47.924, 0, 0],
[80.324, 23.511, 4.025, 132.485, 0, 0],
[103.581, 39.608, 4.025, 464.251, 0, 0],
[10.075, 75.395, 2.525, 499.846, 0, 0]
),
ToontownGlobals.TheBrrrgh: (
[35, -32, 6.2, 138, 0.0, 0.0],
[26, -105, 6.2, -339, 0.0, 0.0],
[-29, -139, 6.2, -385, 0.0, 0.0],
[-79, -123, 6.2, -369, 0.0, 0.0],
[-114, -86, 3, -54, 0.0, 0.0],
[-136, 9, 6.2, -125, 0.0, 0.0],
[-75, 92, 6.2, -187, 0.0, 0.0],
[-7, 75, 6.2, -187, 0.0, 0.0],
[-106, -42, 8.6, -111, 0.0, 0.0],
[-116, -44, 8.3, -20, 0.0, 0.0]
),
ToontownGlobals.MinniesMelodyland: (
[86, 44, -13.5, 121.1, 0.0, 0.0],
[88, -8, -13.5, 91, 0, 0],
[92, -76, -13.5, 62.5, 0.0, 0.0],
[53, -112, 6.5, 65.8, 0.0, 0.0],
[-69, -71, 6.5, -67.2, 0.0, 0.0],
[-75, 21, 6.5, -100.9, 0.0, 0.0],
[-21, 72, 6.5, -129.5, 0.0, 0.0],
[56, 72, 6.5, 138.2, 0.0, 0.0],
[-41, 47, 6.5, -98.9, 0.0, 0.0]
),
ToontownGlobals.DaisyGardens: (
[-0.998, 9.147, 0.025, -5.663, 0, 0],
[47.207, 51.395, 0.025, -77.411, 0.0, 0.0],
[113.105, 120.739, 0.025, -33.388, 0.0, 0.0],
[119.150, 187.219, 0.025, 28.910, 0.0, 0.0],
[93.115, 240.878, 14.016, -38.148, 0.0, 0.0],
[71.400, 288.892, 14.025, 44.651, 0.0, 0.0],
[39.726, 329.260, 13.925, 125.246, 0.0, 0.0],
[-24.758, 307.053, 14.025, 119.294, 0.0, 0.0],
[-74.713, 273.668, 14.026, 133.668, 0.0, 0.0],
[-81.067, 229.671, 14.025, 181.866, 0.0, 0.0],
[-117.230, 185.184, 0.025, 163.659, 0.0, 0.0]
),
ToontownGlobals.DonaldsDreamland: (
[77, 91, 0.0, 124.4, 0.0, 0.0],
[29, 92, 0.0, -154.5, 0.0, 0.0],
[-28, 49, -16.4, -142.0, 0.0, 0.0],
[21, 40, -16.0, -65.1, 0.0, 0.0],
[48, 27, -15.4, -161.0, 0.0, 0.0],
[-2, -22, -15.2, -132.1, 0.0, 0.0],
[-92, -88, 0.0, -116.3, 0.0, 0.0],
[-56, -93, 0.0, -21.5, 0.0, 0.0],
[20, -88, 0.0, -123.4, 0.0, 0.0],
[76, -90, 0.0, 11.0, 0.0, 0.0]
),
ToontownGlobals.FunnyFarm: ( # TODO: Drop points!
[0, 0, 0, 0, 0, 0],
),
ToontownGlobals.GoofySpeedway: (
[-0.7, 62, 0.08, 182, 0, 0],
[-1, -30, 0.06, 183, 0, 0],
[-13, -120, 0, 307, 0, 0],
[16.4, -120, 0, 65, 0, 0],
[-0.5, -90, 0, 182, 0, 0],
[-30, -25, -0.373, 326, 0, 0],
[29, -17, -0.373, 32, 0, 0]
),
ToontownGlobals.GolfZone: (
[-49.6, 102, 0, 162, 0, 0],
[-22.8, 36.6, 0, 157.5, 0, 0],
[40, 51, 0, 185, 0, 0],
[48.3, 122.2, 0, 192, 0, 0],
[106.3, 69.2, 0, 133, 0, 0],
[-81.5, 47.2, 0, 183, 0, 0],
[-80.5, -84.2, 0, 284, 0, 0],
[73, -111, 0, 354, 0, 0]
),
ToontownGlobals.OutdoorZone: (
[-165.8, 108, 0.025, 252, 0, 0],
[21, 130, 0.16, 170, 0, 0],
[93, 78.5, 0.23, 112, 0, 0],
[79, -1.6, 0.75, 163, 0, 0],
[10, 33, 5.32, 130.379, 0, 0],
[-200, -42, 0.025, 317.543, 0, 0],
[-21, -65, 0.335, -18, 0, 0],
[23, 68.5, 4.51, -22.808, 0, 0]
),
ToontownGlobals.Tutorial: (
[130.9, -8.6, -1.3, 105.5, 0, 0],
),
ToontownGlobals.SellbotHQ: (
[56.910, -173.576, -7.037, 15.061, 0, 0],
[-53.105, -197.259, -4.812, 25.870, 0, 0],
[-103, -118, 0.367, 622.422, 0, 0],
[-5.361, -228.596, -10.817, -118.934, 0, 0],
[-8.2536, -175.53, -19.5944, -313.592, 0, 0],
[66.7811, -96.8434, 0.286679, -567.363, 0, 0]
),
ToontownGlobals.CashbotHQ: (
[102, -437, -23.439, 360, 0, 0],
[124, -437, -23.439, 360, 0, 0],
[110, -446, -23.439, 360, 0, 0],
[132, -446, -23.439, 360, 0, 0]
),
ToontownGlobals.LawbotHQ: (
[77.5, 129.13, -68.4, -166.6, 0, 0],
[-57.7, 80.75, -68.4, -139.2, 0, 0],
[203.3, 46.36, -68.4, -213.37, 0, 0],
[88.2, -336.52, -68.4, -720.4, 0, 0],
[232.77, -305.33, -68.4, -651, 0, 0],
[-20.16, -345.76, -68.4, -777.98, 0, 0]
)
}
hoodName2Id = {
'dd': ToontownGlobals.DonaldsDock,
'tt': ToontownGlobals.ToontownCentral,
'br': ToontownGlobals.TheBrrrgh,
'mm': ToontownGlobals.MinniesMelodyland,
'dg': ToontownGlobals.DaisyGardens,
'oz': ToontownGlobals.OutdoorZone,
'ff': ToontownGlobals.FunnyFarm,
'gs': ToontownGlobals.GoofySpeedway,
'dl': ToontownGlobals.DonaldsDreamland,
'bosshq': ToontownGlobals.BossbotHQ,
'sellhq': ToontownGlobals.SellbotHQ,
'cashhq': ToontownGlobals.CashbotHQ,
'lawhq': ToontownGlobals.LawbotHQ,
'gz': ToontownGlobals.GolfZone
}
hoodId2Name = {
ToontownGlobals.DonaldsDock: 'dd',
ToontownGlobals.ToontownCentral: 'tt',
ToontownGlobals.Tutorial: 'tt',
ToontownGlobals.TheBrrrgh: 'br',
ToontownGlobals.MinniesMelodyland: 'mm',
ToontownGlobals.DaisyGardens: 'dg',
ToontownGlobals.OutdoorZone: 'oz',
ToontownGlobals.FunnyFarm: 'ff',
ToontownGlobals.GoofySpeedway: 'gs',
ToontownGlobals.DonaldsDreamland: 'dl',
ToontownGlobals.BossbotHQ: 'bosshq',
ToontownGlobals.SellbotHQ: 'sellhq',
ToontownGlobals.CashbotHQ: 'cashhq',
ToontownGlobals.LawbotHQ: 'lawhq',
ToontownGlobals.GolfZone: 'gz'
}
DefaultDropPoint = [0] * 6
dbgDropMode = 0
currentDropPoint = 0
def __init__(self, cr):
self.cr = cr
def getDropPoint(self, dropPointList):
if self.dbgDropMode == 0:
return random.choice(dropPointList)
else:
droppnt = self.currentDropPoint % len(dropPointList)
self.currentDropPoint = (self.currentDropPoint + 1) % len(dropPointList)
return dropPointList[droppnt]
def getAvailableZones(self):
if base.launcher == None:
return self.getZonesInPhase(4) + self.getZonesInPhase(6) + self.getZonesInPhase(8) + self.getZonesInPhase(9) + self.getZonesInPhase(10) + self.getZonesInPhase(11) + self.getZonesInPhase(12) + self.getZonesInPhase(13)
else:
zones = []
for phase in set(ToontownGlobals.phaseMap.values()):
if base.launcher.getPhaseComplete(phase):
zones = zones + self.getZonesInPhase(phase)
return zones
return
def getZonesInPhase(self, phase):
p = []
for i in ToontownGlobals.phaseMap.items():
if i[1] == phase:
p.append(i[0])
return p
def getPhaseFromHood(self, hoodId):
hoodId = ZoneUtil.getCanonicalHoodId(hoodId)
return ToontownGlobals.phaseMap[hoodId]
def getPlaygroundCenterFromId(self, hoodId):
dropPointList = self.dropPoints.get(hoodId, None)
if dropPointList:
return self.getDropPoint(dropPointList)
else:
self.notify.warning('getPlaygroundCenterFromId: No such hood name as: ' + str(hoodId))
return self.DefaultDropPoint
return
def getIdFromName(self, hoodName):
id = self.hoodName2Id.get(hoodName)
if id:
return id
else:
self.notify.error('No such hood name as: %s' % hoodName)
return None
return None
def getNameFromId(self, hoodId):
name = self.hoodId2Name.get(hoodId)
if name:
return name
else:
self.notify.error('No such hood id as: %s' % hoodId)
return None
return None
def getFullnameFromId(self, hoodId):
hoodId = ZoneUtil.getCanonicalZoneId(hoodId)
return ToontownGlobals.hoodNameMap[hoodId][-1]
def addLinkTunnelHooks(self, hoodPart, nodeList, currentZoneId):
tunnelOriginList = []
for i in nodeList:
linkTunnelNPC = i.findAllMatches('**/linktunnel*')
for p in range(linkTunnelNPC.getNumPaths()):
linkTunnel = linkTunnelNPC.getPath(p)
name = linkTunnel.getName()
nameParts = name.split('_')
hoodStr = nameParts[1]
zoneStr = nameParts[2]
hoodId = self.getIdFromName(hoodStr)
zoneId = int(zoneStr)
hoodId = ZoneUtil.getTrueZoneId(hoodId, currentZoneId)
zoneId = ZoneUtil.getTrueZoneId(zoneId, currentZoneId)
linkSphere = linkTunnel.find('**/tunnel_trigger')
if linkSphere.isEmpty():
linkSphere = linkTunnel.find('**/tunnel_sphere')
if not linkSphere.isEmpty():
cnode = linkSphere.node()
cnode.setName('tunnel_trigger_' + hoodStr + '_' + zoneStr)
cnode.setCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.GhostBitmask)
else:
linkSphere = linkTunnel.find('**/tunnel_trigger_' + hoodStr + '_' + zoneStr)
if linkSphere.isEmpty():
self.notify.error('tunnel_trigger not found')
tunnelOrigin = linkTunnel.find('**/tunnel_origin')
if tunnelOrigin.isEmpty():
self.notify.error('tunnel_origin not found')
tunnelOriginPlaceHolder = render.attachNewNode('toph_' + hoodStr + '_' + zoneStr)
tunnelOriginList.append(tunnelOriginPlaceHolder)
tunnelOriginPlaceHolder.setPos(tunnelOrigin.getPos(render))
tunnelOriginPlaceHolder.setHpr(tunnelOrigin.getHpr(render))
hood = base.localAvatar.cr.playGame.hood
if ZoneUtil.tutorialDict:
how = 'teleportIn'
tutorialFlag = 1
else:
how = 'tunnelIn'
tutorialFlag = 0
hoodPart.accept('enter' + linkSphere.getName(), hoodPart.handleEnterTunnel, [{'loader': ZoneUtil.getLoaderName(zoneId),
'where': ZoneUtil.getToonWhereName(zoneId),
'how': how,
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None,
'tunnelOrigin': tunnelOriginPlaceHolder,
'tutorial': tutorialFlag}])
return tunnelOriginList
def extractGroupName(self, groupFullName):
return groupFullName.split(':', 1)[0]
def makeLinkTunnelName(self, hoodId, currentZone):
return '**/toph_' + self.getNameFromId(hoodId) + '_' + str(currentZone)
|
|
#!/usr/bin/env python
import argparse
import os
import stat
import sys
from six.moves import input
from clint.textui import puts
from will.utils import print_head
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__)))
sys.path.append(PROJECT_ROOT)
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument(
'--config-dist-only',
action='store_true',
help='Only output a config.py.dist.'
)
args = parser.parse_args()
requirements_txt = "will\n"
class EmptyObj(object):
pass
def cleaned(service_name):
return service_name.lower().replace(".", ''),
def ask_user(question):
response = "?"
while response not in ["y", "n"]:
response = input("%s [y/n] " % question)
if response not in ["y", "n"]:
print("Please enter 'y' or 'n'.")
return response.startswith("y")
def enable_disable_service(service_name, source):
global requirements_txt
if ask_user(" Do you want to enable %s support?" % (service_name)):
source = source.replace("# will.backends.io_adapters.%s" % cleaned(service_name), "will.backends.io_adapters.%s" % cleaned(service_name))
req_path = os.path.join(os.path.join(PROJECT_ROOT, "..", "requirements"), "%s.txt" % cleaned(service_name))
print(req_path)
if os.path.exists(req_path):
with open(req_path, 'r') as f:
requirements_txt = "%s\n# %s\n%s" % (requirements_txt, service_name, f.read())
else:
source = source.replace("will.backends.io_adapters.%s" % cleaned(service_name), "# will.backends.io_adapters.%s" % cleaned(service_name))
return source
def main():
"""
Creates the following structure:
/plugins
__init__.py
hello.py
/templates
blank.html
.gitignore
run_will.py
requirements.txt
Procfile
README.md
"""
print_head()
puts("Welcome to the will project generator.")
puts("")
if args.config_dist_only:
print("Generating config.py.dist...")
else:
print("\nGenerating will scaffold...")
current_dir = os.getcwd()
plugins_dir = os.path.join(current_dir, "plugins")
templates_dir = os.path.join(current_dir, "templates")
if not args.config_dist_only:
print(" /plugins")
# Set up the directories
if not os.path.exists(plugins_dir):
os.makedirs(plugins_dir)
print(" __init__.py")
# Create the plugins __init__.py
with open(os.path.join(plugins_dir, "__init__.py"), 'w+') as f:
pass
print(" morning.py")
# Create the morning plugin
morning_file_path = os.path.join(plugins_dir, "morning.py")
if not os.path.exists(morning_file_path):
with open(morning_file_path, 'w+') as f:
f.write("""from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
class MorningPlugin(WillPlugin):
@respond_to("^good morning")
def good_morning(self, message):
self.reply("oh, g'morning!")
""")
print(" /templates")
if not os.path.exists(templates_dir):
os.makedirs(templates_dir)
print(" blank.html")
# Create the plugins __init__.py
with open(os.path.join(templates_dir, "blank.html"), 'w+') as f:
pass
print(" .gitignore")
# Create .gitignore, or at least add shelf.db
gitignore_path = os.path.join(current_dir, ".gitignore")
if not os.path.exists(gitignore_path):
with open(gitignore_path, 'w+') as f:
f.write("""*.py[cod]
pip-log.txt
shelf.db
""")
else:
append_ignore = False
with open(gitignore_path, "r+") as f:
if "shelf.db" not in f.read():
append_ignore = True
if append_ignore:
with open(gitignore_path, "a") as f:
f.write("\nshelf.db\n")
# Create run_will.py
print(" run_will.py")
run_will_path = os.path.join(current_dir, "run_will.py")
if not os.path.exists(run_will_path):
with open(run_will_path, 'w+') as f:
f.write("""#!/usr/bin/env python
from will.main import WillBot
if __name__ == '__main__':
bot = WillBot()
bot.bootstrap()
""")
# And make it executable
st = os.stat('run_will.py')
os.chmod("run_will.py", st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
# Create config.py
print(" config.py.dist")
config_path = os.path.join(current_dir, "config.py.dist")
if not os.path.exists(config_path) or ask_user("! config.py.dist exists. Overwrite it?"):
with open(os.path.join(PROJECT_ROOT, "config.py.dist"), "r") as source_f:
source = source_f.read()
# Ask on backends
print("\nWill supports a few different service backends. Let's set up the ones you want:\n")
source = enable_disable_service("Slack", source)
source = enable_disable_service("HipChat", source)
source = enable_disable_service("Rocket.Chat", source)
source = enable_disable_service("Shell", source)
with open(config_path, "w+") as f:
config = source
f.write(config)
if not args.config_dist_only:
print(" requirements.txt")
# Create requirements.txt
requirements_path = os.path.join(current_dir, "requirements.txt")
if not os.path.exists(requirements_path) or ask_user("! requirements.txt exists. Overwrite it?"):
with open(requirements_path, 'w+') as f:
f.write(requirements_txt)
print(" Procfile")
# Create Procfile
requirements_path = os.path.join(current_dir, "Procfile")
if not os.path.exists(requirements_path):
with open(requirements_path, 'w+') as f:
f.write("web: python run_will.py")
print(" README.md")
# Create the readme
readme_path = os.path.join(current_dir, "README.md")
if not os.path.exists(readme_path):
with open(readme_path, 'w+') as f:
f.write("""
This is our bot, a [will](https://github.com/skoczen/will) bot.
""")
print("\nDone.")
print("\n Your will is now ready to go. Run ./run_will.py to get started!")
else:
print("\nCreated a config.py.dist. Open it up to see what's new!\n")
if __name__ == '__main__':
main()
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:4320")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:4320")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a SPEC address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a SPEC address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
import numpy as np
import pandas as pd
import six
from functools import wraps, partial
from memory_profiler import memory_usage
import sys, os, time
from collections import OrderedDict, Iterable, namedtuple
from sqlalchemy import (Table, Column, Integer, Float, String, Sequence, BLOB,
MetaData, ForeignKey, create_engine, select,
UniqueConstraint, text)
#from multiprocessing import Process
import matplotlib
import matplotlib.pyplot as plt
import platform
import cProfile
import subprocess, time
from operator import itemgetter, attrgetter
import operator
import types
import copy
import unittest
from . utils import *
bench_cnt = 0
def bench(f=None, name=None, **kw):
def bench_decorator_impl(f):
global bench_cnt
f.bench_info = dict(name=name, pos=bench_cnt, **kw)
bench_cnt += 1
@wraps(f)
def wrapped(inst, *args, **kwargs):
return f(inst, *args, **kwargs)
return wrapped
if f is None:
return bench_decorator_impl
return bench_decorator_impl(f)
class BenchMeta(type):
def __init__(cls, name, bases, attrs):
global bench_cnt
bench_cnt = 0
super(BenchMeta, cls).__init__(name, bases, attrs)
bench_list = []
for obj in attrs.values():
if hasattr(obj, 'bench_info'):
dict_ = getattr(obj, 'bench_info')
dict_['func'] = obj
if dict_['name'] is None:
dict_['name'] = obj.__name__
bench_list.append(dict_)
delattr(obj, 'bench_info')
cls._bench_list = sorted(bench_list, key=itemgetter('pos'))
#if cls._bench_list:
# cls.runTest = lambda self: cls.runBench(self)
class BenchmarkCase(six.with_metaclass(BenchMeta, unittest.TestCase)):
"""
Each case should be placed in a method decorated by @bench(case=<unique_name>, corrected_by)
When subclassing BenchmarkIt, you can set these attributes:
* nb_step : number of steps : conditions (e.g. data size) should be different between two steps
* nb_repeat : number of repetitions for each step, conditions are identical
* description : ...
* database_name : temporary file if not defined
* with_memory_profile = True|False
* with_time_profile = True|False
* with_code_profile = True|False
"""
nb_step = 1
nb_repeat = 1
with_memory_prof = True
with_time_prof = True
with_code_prof = False
def __init__(self, methodName='runTest'):
# if methodName != 'runBench':
# raise ValueError('only runBench method is allowed here')
#print("methodName: ".format(methodName))
self._env = None
self._name = get_random_name("B")
self.nb_step = 1
self.nb_repeat = 1
self._current_step = 0
self.description = "Undefined description"
#self._input_proc = InputProc(repr_type=str, label="My label")
self._step_info = None
self._step_header = "Steps"
self.with_time_prof = True
self.with_memory_prof = True
self.with_code_prof = False
self._sql_id = None
self._cases = []
self._corrections = []
super(BenchmarkCase, self).__init__(methodName)
def set_step_info(self, info):
self._step_info = info
def set_step_header(self, header):
self._step_header = header
@property
def step_info(self):
return (self._step_info if self._step_info is not None
else str(self._current_step))
@property
def step_header(self):
return self._step_header
def setUp(self):
pass
def tearDown(self):
pass
def setUpStep(self, step):
pass
def tearDownStep(self, step):
pass
@property
def current_step(self):
return self._current_step
def dump(self):
lst_table=['bench_tbl', 'case_tbl','measurement_tbl']
for tbl in lst_table:
dump_table(tbl, self.env._db_name)
def save(self, db_name, name=None):
dict_ = {}
for case_name in self.case_names:
dict_[case_name] = self.to_df(case_name, corrected=False,
raw_header=True,
all_columns=True)
new_obj = copy.copy(self)
new_obj._env = BenchEnv(db_name=db_name)
if name is not None:
new_obj._name = name
new_obj._sql_id = None
new_obj.init_db_entry()
ins = new_obj.env.measurement_tbl.insert()
with new_obj.env.engine.connect() as conn:
for case_name, case_df in dict_.items():
headers = case_df.columns.values
for _, row in case_df.iterrows():
ins_val = dict(zip(headers, row))
case_id = new_obj.get_case_id(case_name)
ins_val['case_id'] = case_id
del ins_val['id']
conn.execute(ins, ins_val)
return new_obj
@staticmethod
def load(db_name, name):
bm = BenchmarkCase()
bm._env = BenchEnv(db_name=db_name)
bm._name = name
props = bm.load_bench_entry()
if not props:
return None
bm._sql_id = props['id']
bm._step_header = props['step_header']
return bm
def init_db_entry(self):
tbl = self.env._bench_tbl
ins = tbl.insert().values(name=self._name, description=self.description,
step_header=self._step_header,
py_version=platform.python_version(),
py_compiler=platform.python_compiler(),
py_platform=platform.platform(),
py_impl=platform.python_implementation(),
cpu_info=get_cpu_info(),
)
with self.env.engine.connect() as conn:
conn.execute(ins)
cls = type(self)
tbl = self.env._case_tbl
with self.env.engine.connect() as conn:
for dict_ in cls._bench_list:
name = dict_['name']
func = dict_['func']
corrected_by = dict_.get('corrected_by', None)
self._corrections.append((name, corrected_by))
ins = tbl.insert().values(name=name, bench_id=self.sql_id)
conn.execute(ins)
case_id = self.get_case_id(name)
runner = BenchRunner(func, case_id, corrected_by, self)
self._cases.append(runner)
self._update_corrections()
return self
@property
def sql_id(self):
if self._sql_id is not None:
return self._sql_id
tbl = self.env._bench_tbl
s = (select([tbl]).with_only_columns([tbl.c.id, tbl.c.name]).
where(tbl.c.name==self._name))
#http://www.rmunn.com/sqlalchemy-tutorial/tutorial.html
with self.env.engine.connect() as conn:
row = conn.execute(s).fetchone()
self._sql_id = row[0]
return self._sql_id
def load_bench_entry(self):
tbl = self.env._bench_tbl
s = (select([tbl]).
where(tbl.c.name==self._name))
with self.env.engine.connect() as conn:
row = conn.execute(s).fetchone()
if not row:
return None
return dict(row)
def _update_corrections(self):
for case, corr_by in self._corrections:
if corr_by is None:
continue
case_id = self.get_case_id(case)
corr_id = self.get_case_id(corr_by)
tbl = self.env._case_tbl
stmt = (tbl.update().where(tbl.c.id==case_id).
values(corrected_by=corr_id))
with self.env.engine.connect() as conn:
conn.execute(stmt)
def get_case_id(self, case):
tbl = self.env._case_tbl
s = (select([tbl]).with_only_columns([tbl.c.id]).
where(tbl.c.name==case).
where(tbl.c.bench_id==self.sql_id))
with self.env.engine.connect() as conn:
row = conn.execute(s).fetchone()
return row[0]
def get_case_corrector(self, case):
if isinstance(case, six.string_types):
case_id = self.get_case_id(case)
else:
case_id = case
tbl = self.env._case_tbl
s = (select([tbl]).with_only_columns([tbl.c.corrected_by]).
where(tbl.c.id==case_id))
with self.env.engine.connect() as conn:
row = conn.execute(s).fetchone()
return row[0]
@property
def correctors(self):
tbl = self.env._case_tbl
s = select([tbl]).with_only_columns([tbl.c.corrected_by])
with self.env.engine.connect() as conn:
rows = conn.execute(s).fetchall()
return [e[0] for e in rows if e[0] is not None]
def runBench(self):
self.init_db_entry()
for elt in self._cases:
self._current_step = 0
for step in range(1, self.nb_step+1):
try:
self._current_step += 1
self.setUpStep(step)
except unittest.SkipTest as e:
self._addSkip(result, str(e))
except KeyboardInterrupt:
raise
except:
#unittest.result.addError(self, sys.exc_info())
raise
else:
elt.run(step)
try:
self.tearDownStep(step)
except KeyboardInterrupt:
raise
except:
#unittest.result.addError(self, sys.exc_info())
success = False
@property
def _col_dict(self):
return {'case': 'Case', 'corrected_by':'Corrected by', 'i_th':'Measure',
'mem_usage': 'Memory usage', 'elapsed_time': 'Elapsed time',
'sys_time': 'System time', 'user_time': 'User time',
'ld_avg_1':'Load avg.(-1s)',
'ld_avg_5':'Load avg.(-5s)','ld_avg_15':'Load avg.(-15s)',
'step_info': self._step_header}
@property
def case_names(self):
tbl = self.env._case_tbl
s = (select([tbl]).with_only_columns([tbl.c.name]).
where(tbl.c.bench_id==self.sql_id))
with self.env.engine.connect() as conn:
rows = conn.execute(s).fetchall()
return set([elt.name for elt in rows])
#df = pd.read_sql_query(s, conn)
#return set(df['name'].values)
def step_col_df(self, case_id):
projection = [text('step_info')]
tbl = self.env.measurement_tbl
s = (select([tbl]).with_only_columns(projection).
where(tbl.c.case_id==case_id).
order_by(tbl.c.id))
conn = self.env.engine.connect()
return pd.read_sql_query(s, conn)
def df_subtract(self, this, other, sub_cols, raw_header):
ret = []
for col in this.columns:
if col in sub_cols:
arr = this[col].values - other[col].values
else:
arr = this[col].values
key = col if raw_header else self._col_dict.get(col,col)
ret.append((key, arr))
return pd.DataFrame.from_items(ret)
def pretty_header(self, df, raw_header):
if raw_header:
return df
header = [self._col_dict.get(col,col) for col in df.columns]
df.columns = header
return df
def to_df(self, case, with_mem=True, with_times=True,
with_step_info=True, corrected=True, raw_header=False, all_columns=False):
if isinstance(case, six.string_types):
case_id = self.get_case_id(case)
else:
case_id = case
projection = ['i_th']
projection += ['step_info'] if with_step_info==True else []
if with_mem:
projection.append('mem_usage')
if with_times:
projection += ['elapsed_time', 'sys_time', 'user_time']
projection += ['ld_avg_1', 'ld_avg_5', 'ld_avg_15']
tbl = self.env.measurement_tbl
only_columns = [col for col in tbl.columns if col.name in projection]
s = (select([tbl]).
where(tbl.c.case_id==case_id).
order_by(tbl.c.id)) #.with_only_columns(only_columns)
if not all_columns:
s = s.with_only_columns(only_columns)
conn = self.env.engine.connect()
df = pd.read_sql_query(s, conn)#, index_col=index_col)
if not corrected:
return self.pretty_header(df, raw_header)
corr = self.get_case_corrector(case_id)
if corr is None:
return self.pretty_header(df, raw_header)
corr_df = self.to_df(corr, corrected=False, raw_header=True)
return self.df_subtract(df, corr_df, ['mem_usage','elapsed_time',
'sys_time', 'user_time'],
raw_header)
def __getitem__(self, case):
return self.to_df(case, raw_header=True, all_columns=True)
def plot(self, cases=None, x=None, y=None, corrected=True, plot_opt='all'):
if cases is None:
cases = self.case_names
elif isinstance(cases, six.string_types):
cases = set([cases])
else:
cases = set(cases)
if not cases.issubset(self.case_names):
raise ValueError("Unknown case(s): {}".format(case))
#df = self.to_df(raw_header=True)
from matplotlib.lines import Line2D
favorite_colors = ['red', 'blue', 'magenta', 'orange', 'grey',
'yellow', 'black']
colors = list(matplotlib.colors.cnames.keys())
customized_colors = (favorite_colors +
[c for c in colors if c not in favorite_colors])
Bplot = namedtuple('Bplot','key, title, ylabel')
mode = "corrected mode, show " if corrected else "raw mode, show "
mode += plot_opt
plots = [Bplot('mem_usage', '[{}] Memory usage ({})'.format(self._name, mode),
'Used memory (Mb)'),
Bplot('elapsed_time', '[{}] Elapsed time ({})'.format(self._name, mode),
'Time (ms)'),
Bplot('sys_time', '[{}] System time ({})'.format(self._name, mode),
'Time (ms)'),
Bplot('user_time', '[{}] User time ({})'.format(self._name, mode),
'Time (ms)'),]
correctors_ = self.correctors
for bp in plots:
for i, case in enumerate(cases):
if corrected and self.get_case_id(case) in correctors_:
continue
df = self.to_df(case=case, raw_header=True, corrected=corrected)
repeat = df['i_th'].values.max() + 1
if x is None:
x = range(1, len(self.step_col_df(self.get_case_id(case)))//repeat+1)
kw = {'label': case}
if plot_opt == 'all':
for r in range(repeat):
dfq = df.query('i_th=={}'.format(r))
y = dfq[bp.key].values
plt.plot(x, y, customized_colors[i], **kw)
kw = {}
elif plot_opt == 'mean':
y = df.groupby(['step'])[bp.key].mean().values
plt.plot(x, y, customized_colors[i], **kw)
elif plot_opt == 'min':
y = df.groupby(['step'])[bp.key].min().values
plt.plot(x, y, customized_colors[i], **kw)
elif plot_opt == 'max':
y = df.groupby(['step'])[bp.key].max().values
plt.plot(x, y, customized_colors[i], **kw)
plt.title(bp.title)
plt.ylabel(bp.ylabel)
plt.xlabel(self._step_header)
plt.legend()
plt.show()
def prof_stats(self, case, step='first', measurement=0):
tbl = self.env.measurement_tbl
df = self.to_df(case, with_mem=False, with_times=False,
with_step_info=True, corrected=False, raw_header=True, all_columns=True)
if step=='last' or step==-1:
step_ = df['step'].iloc[-1]
elif step=='first' or step==0:
step_ = df['step'].iloc[0]
else:
step_ = step
case_id = self.get_case_id(case)
stmt = (tbl.select().with_only_columns([tbl.c.prof]).
# values are casted to int here because
# np.int64 is not int in PY3!
where(tbl.c.case_id==int(case_id)).
where(tbl.c.i_th==int(measurement)).
where(tbl.c.step==int(step_))
)
with self.env.engine.connect() as conn:
row = conn.execute(stmt).fetchone()
# TODO: use a REAL tmp file
tmp_file_name = '/tmp/benchmarkit_out.prof'
with open(tmp_file_name, 'wb') as tmp_file:
tmp_file.write(row[0])
# snakeviz is launched this way for virtualenv/anaconda compatibility
c_opt = 'import sys, snakeviz.cli;sys.exit(snakeviz.cli.main())'
cmd_ = [sys.executable, '-c', c_opt, tmp_file_name]
p = subprocess.Popen(cmd_)
time.sleep(3)
p.terminate()
@property
def name(self):
return self._name
@property
def env(self):
if self._env is None:
self._env = BenchEnv()
return self._env
@property
def loop_var_proc(self):
return self._loop_var_proc
@property
def time_flag(self):
return self.with_time_prof
@property
def mem_flag(self):
return self.with_memory_prof
@property
def prof_flag(self):
return self.with_code_prof
@property
def repeat(self):
return self.nb_repeat
def runTest(self):
self.runBench()
class BenchRunner():
def __init__(self, func, case_id, corrected_by, bench):
self._func = func #partial(func, bench)
self._case_id = case_id
self._corrected_by = corrected_by
self._bench = bench
self._args = None
self._kwargs = None
@property
def bench(self):
return self._bench
@property
def env(self):
return self._bench.env
def run_times(self, args, kwargs, step, i_th):
ut, st, cut, cst, et = os.times()
self._func(*args, **kwargs)
ut_, st_, cut_, cst_, et_ = os.times()
elapsed_time = et_ - et
sys_time = st_ -st
user_time = ut_ - ut
ld_avg_1, ld_avg_5, ld_avg_15 = os.getloadavg()
## engine = create_engine('sqlite:///' + self.env.db_name, echo=True)
## metadata = MetaData()
## metadata.reflect(bind=engine)
## measurement_tbl = metadata.tables['measurement_tbl']
stmt = (self.env.measurement_tbl.update().
where(self.env.measurement_tbl.c.case_id==self._case_id).
where(self.env.measurement_tbl.c.i_th==i_th).
where(self.env.measurement_tbl.c.step==step).
values(
elapsed_time=elapsed_time,
sys_time=sys_time,
user_time=user_time,
ld_avg_1=ld_avg_1,
ld_avg_5=ld_avg_5,
ld_avg_15=ld_avg_15,
step_info=self.bench.step_info,
)
)
with self.env.engine.connect() as conn:
conn.execute(stmt)
def run_mem(self, args, kwargs, step, i_th):
mem = memory_usage((self._func, args, kwargs), max_usage=True)[0]
stmt = (self.env.measurement_tbl.update().
where(self.env.measurement_tbl.c.case_id==self._case_id).
where(self.env.measurement_tbl.c.i_th==i_th).
where(self.env.measurement_tbl.c.step==step).
values(
mem_usage=mem,
step_info=self.bench.step_info,
)
)
with self.env.engine.connect() as conn:
conn.execute(stmt)
def run_prof(self, args, kwargs, step, i_th):
def to_profile():
self._func(*args, **kwargs)
# TODO: use a REAL tmp file
tmp_file_name = '/tmp/benchmarkit.prof'
cProfile.runctx('to_profile()', globals(), locals(), tmp_file_name)
with open(tmp_file_name, 'rb') as tmp_file:
prof_blob = tmp_file.read()
stmt = (self.env.measurement_tbl.update().
where(self.env.measurement_tbl.c.case_id==self._case_id).
where(self.env.measurement_tbl.c.i_th==i_th).
where(self.env.measurement_tbl.c.step==step).
values(
prof=prof_blob,
step_info=self.bench.step_info,
)
)
with self.env.engine.connect() as conn:
conn.execute(stmt)
def run(self, t):
#args, kwargs = self.bench.input_proc.to_args(t)
args, kwargs = (self.bench,), {}
#values_ = self.bench.input_proc.to_dict(t)
#v = self.bench.input_proc.to_value(t)
step = self.bench.current_step
values_ = dict(case_id=self._case_id, step=step)
l_val = [dict(i_th=i) for i in six.moves.range(self.bench.repeat)]
#map(lambda d: d.update(values_), l_val)
for d in l_val:
d.update(values_)
ins = self.env.measurement_tbl.insert() #.values(**values_)
with self.env.engine.connect() as conn:
conn.execute(ins, l_val)
for i_th in six.moves.range(self.bench.repeat):
if self.bench.time_flag:
p = Process(target=BenchRunner.run_times, args=(self, args, kwargs, step, i_th))
p.start()
p.join()
#self.run_times(args, kwargs, v)
if self.bench.mem_flag:
p = Process(target=BenchRunner.run_mem, args=(self, args, kwargs, step, i_th))
p.start()
p.join()
if self.bench.prof_flag:
p = Process(target=BenchRunner.run_prof, args=(self, args, kwargs, step, i_th))
p.start()
p.join()
def banner(s, c='='):
hr = c*(len(s) + 2) + '\n'
s2 = ' ' + s + ' \n'
return hr + s2 + hr
def print_banner(s, c='='):
print(banner(s, c))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.