gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a SaveCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a SaveCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
import os
import unittest
from meshparser.exceptions import ParseError
from meshparser.vrmlparser.parser import VRMLParser
from meshparser.nodepare.pare import NodePare
file_path = os.path.dirname(os.path.realpath(__file__))
class ParserTestCase(unittest.TestCase):
def testExistence(self):
v = VRMLParser()
self.assertRaises(IOError, v.parse, 'file that doesnt exist')
def testParse1(self):
v = VRMLParser()
test_filename = os.path.join(file_path, 'data/Horse_1_1.wrl')
self.assertTrue(v.canParse(test_filename))
v.parse(test_filename)
def testParse2(self):
v = VRMLParser()
v.parse(os.path.join(file_path, 'data/Horse_1_2.wrl'))
self.assertEqual(0, len(v.getPoints()))
def testParse3(self):
v = VRMLParser()
v.parse(os.path.join(file_path, 'data/Horse_1_3.wrl'))
self.assertEqual(4572, len(v.getPoints()))
self.assertEqual(9120, len(v.getElements()))
def testOldFormat(self):
v = VRMLParser()
self.assertRaises(Exception, v.parse, os.path.join(file_path, 'data', 'old_format.wrl'))
def testString(self):
v = VRMLParser()
test_file = os.path.join(file_path, 'data', 'string.wrl')
v.parse(test_file)
data = v._data
self.assertIn('WorldInfo', data)
self.assertIn('title', data['WorldInfo'])
self.assertIn('info', data['WorldInfo'])
self.assertEqual("\" hash # in a \\\"quoted # string\\\"\"", data["WorldInfo"]["title"])
def testFStringSingle(self):
from meshparser.vrmlparser.parser import _FString
str1 = "\"A test string\""
line_elements = str1.split()
fs = _FString()
fs.parse(line_elements)
self.assertEqual("\"A test string\"", fs.getData())
def testFStringMulti(self):
from meshparser.vrmlparser.parser import _FString
str1 = "[ \"Another test string\",\n \"More testing string\" ]"
line_elements = str1.split()
fs = _FString()
fs.parse(line_elements)
string_data = fs.getData()
self.assertEqual("\"Another test string\"", string_data[0])
self.assertEqual("\"More testing string\"", string_data[1])
def testFStringMultiSplit(self):
from meshparser.vrmlparser.parser import _FString
str1 = "[ \"Another test string\","
str2 = "\"More testing string\" ]"
line_elements_1 = str1.split()
line_elements_2 = str2.split()
fs = _FString()
fs.parse(line_elements_1)
self.assertFalse(fs.isFinished())
fs.parse(line_elements_2)
self.assertTrue(fs.isFinished())
string_data = fs.getData()
self.assertEqual("\"Another test string\"", string_data[0])
self.assertEqual("\"More testing string\"", string_data[1])
def testFVec3fSingle1(self):
from meshparser.vrmlparser.parser import _FVec3f
str1 = '0.176164 0.303858 0.144138'
line_elements_1 = str1.split()
fv = _FVec3f()
fv.parse(line_elements_1)
values = fv.getData()
self.assertAlmostEquals(0.176164, values[0])
self.assertAlmostEquals(0.303858, values[1])
self.assertAlmostEquals(0.144138, values[2])
def testFVec3fSingle2(self):
from meshparser.vrmlparser.parser import _FVec3f
str1 = '-1.67149e-08 -8.78133e-08 3.14159'
line_elements_1 = str1.split()
fv = _FVec3f()
fv.parse(line_elements_1)
values = fv.getData()
self.assertAlmostEquals(-1.67149e-08, values[0])
self.assertAlmostEquals(-8.78133e-08, values[1])
self.assertAlmostEquals(3.14159, values[2])
self.assertTrue(fv.isFinished())
def testFNodeSimple(self):
from meshparser.vrmlparser.parser import _FNode
str1 = "Transform { }"
line_elements_1 = str1.split()
fn = _FNode()
fn.parse(line_elements_1)
values = fn.getData()
self.assertIn('Transform', values)
def testFNodeMulti(self):
from meshparser.vrmlparser.parser import _FNode
str1 = "[ Shape { appearance Appearance { material Material { " \
"ambientIntensity 0.2 diffuseColor 1.000 1.000 0.200 } } } ]"
line_elements_1 = str1.split()
fn = _FNode()
fn.parse(line_elements_1)
values = fn.getData()
self.assertEqual(1, len(values))
self.assertIn('Shape', values[0])
def testInRange(self):
from meshparser.vrmlparser.parser import _check_index_within_index_pairs
self.assertTrue(_check_index_within_index_pairs(3, [[2, 4]]))
self.assertTrue(_check_index_within_index_pairs(3, [[2, 4], [5, 8]]))
self.assertFalse(_check_index_within_index_pairs(1, [[2, 4]]))
self.assertFalse(_check_index_within_index_pairs(6, [[2, 4], [9, 15]]))
def testComment(self):
from meshparser.vrmlparser.parser import _remove_comment
test_file = os.path.join(file_path, 'data', 'string.wrl')
with open(test_file) as f:
lines = f.readlines()
for index, line in enumerate(lines):
no_comment_line = _remove_comment(line.strip())
if index == 4:
self.assertEqual("info [ \" # not a comment\" ]", no_comment_line)
elif index == 5:
self.assertEqual("", no_comment_line)
elif index == 7:
self.assertEqual("title \" hash # in a string\" ", no_comment_line)
elif index == 8:
self.assertEqual("title \" hash # in a \\\"quoted # string\\\"\" ", no_comment_line)
def testTransformBasic1(self):
from meshparser.vrmlparser.parser import _TransformNode
node = _TransformNode()
test_file = os.path.join(file_path, 'data', 'transform_test_1.wrl')
with open(test_file) as f:
lines = f.readlines()
while lines:
line = lines.pop(0)
line_elements = line.split()
node.parse(line_elements)
self.assertTrue(node.isFinished())
self.assertIn('children', node._data)
self.assertEqual(1, len(node._data['children']))
def testTransformBasic2(self):
from meshparser.vrmlparser.parser import _TransformNode
node = _TransformNode()
test_file = os.path.join(file_path, 'data', 'transform_test_2.wrl')
with open(test_file) as f:
lines = f.readlines()
while lines:
line = lines.pop(0)
line_elements = line.split()
node.parse(line_elements)
self.assertTrue(node.isFinished())
self.assertIn('children', node._data)
self.assertEqual(1, len(node._data['children']))
self.assertIn('Shape', node._data['children'][0])
self.assertIn('appearance', node._data['children'][0]['Shape'])
self.assertIn('Appearance', node._data['children'][0]['Shape']['appearance'])
def testTransformBasic3(self):
from meshparser.vrmlparser.parser import _TransformNode
node = _TransformNode()
test_file = os.path.join(file_path, 'data', 'transform_test_3.wrl')
with open(test_file) as f:
lines = f.readlines()
while lines:
line = lines.pop(0)
line_elements = line.split()
node.parse(line_elements)
self.assertTrue(node.isFinished())
self.assertIn('children', node._data)
self.assertEqual(1, len(node._data['children']))
self.assertIn('Shape', node._data['children'][0])
self.assertIn('appearance', node._data['children'][0]['Shape'])
self.assertIn('Appearance', node._data['children'][0]['Shape']['appearance'])
self.assertIn('material', node._data['children'][0]['Shape']['appearance']['Appearance'])
self.assertIn('Material', node._data['children'][0]['Shape']['appearance']['Appearance']['material'])
self.assertIn('emissiveColor', node._data['children'][0]['Shape']['appearance']['Appearance']['material']['Material'])
def testLab1(self):
v = VRMLParser()
test_file = os.path.join(file_path, 'data', 'lab1.wrl')
self.assertRaises(ParseError, v.parse, test_file)
def testDuplicatedPoints(self):
v = VRMLParser()
v.parse(os.path.join(file_path, 'data/Horse_1_4.wrl'))
points = v.getPoints()
self.assertEqual(33, len(points))
np = NodePare()
np.addPoints(points)
np.parePoints()
self.assertEqual(23, len(np.getParedPoints()))
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
"""
Long/Short Cross-Sectional Momentum
Author: Gilbert Wassermann
This algorithm creates traditional value factors and standardizes
them using a synthetic S&P500. It then uses a 130/30 strategy to trade.
https://www.math.nyu.edu/faculty/avellane/Lo13030.pdf
Please direct any questions, feedback, or corrections to [email protected]
The material on this website is provided for informational purposes only
and does not constitute an offer to sell, a solicitation to buy, or a
recommendation or endorsement for any security or strategy,
nor does it constitute an offer to provide investment advisory or other services by Quantopian.
In addition, the content of the website neither constitutes investment advice
nor offers any opinion with respect to the suitability of any security or any specific investment.
Quantopian makes no guarantees as to accuracy or completeness of the
views expressed in the website. The views are subject to change,
and may have become unreliable for various reasons,
including changes in market conditions or economic circumstances.
"""
import numpy as np
import pandas as pd
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.factors import CustomFactor
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import SimpleMovingAverage, AverageDollarVolume
from quantopian.pipeline.filters.morningstar import IsPrimaryShare
from quantopian.pipeline.data import morningstar as mstar
# Custom Factor 1 : Dividend Yield
class Div_Yield(CustomFactor):
inputs = [morningstar.valuation_ratios.dividend_yield]
window_length = 1
def compute(self, today, assets, out, d_y):
out[:] = d_y[-1]
# Custom Factor 2 : P/B Ratio
class Price_to_Book(CustomFactor):
inputs = [morningstar.valuation_ratios.pb_ratio]
window_length = 1
def compute(self, today, assets, out, p_b_r):
out[:] = -p_b_r[-1]
# Custom Factor 3 : Price to Trailing 12 Month Sales
class Price_to_TTM_Sales(CustomFactor):
inputs = [morningstar.valuation_ratios.ps_ratio]
window_length = 1
def compute(self, today, assets, out, ps):
out[:] = -ps[-1]
# Custom Factor 4 : Price to Trailing 12 Month Cashflow
class Price_to_TTM_Cashflows(CustomFactor):
inputs = [morningstar.valuation_ratios.pcf_ratio]
window_length = 1
def compute(self, today, assets, out, pcf):
out[:] = -pcf[-1]
# This factor creates the synthetic S&P500
class SPY_proxy(CustomFactor):
inputs = [morningstar.valuation.market_cap]
window_length = 1
def compute(self, today, assets, out, mc):
out[:] = mc[-1]
# This pulls all necessary data in one step
def Data_Pull():
# create the pipeline for the data pull
Data_Pipe = Pipeline()
# create SPY proxy
Data_Pipe.add(SPY_proxy(), 'SPY Proxy')
# Div Yield
Data_Pipe.add(Div_Yield(), 'Dividend Yield')
# Price to Book
Data_Pipe.add(Price_to_Book(), 'Price to Book')
# Price / TTM Sales
Data_Pipe.add(Price_to_TTM_Sales(), 'Price / TTM Sales')
# Price / TTM Cashflows
Data_Pipe.add(Price_to_TTM_Cashflows(), 'Price / TTM Cashflow')
return Data_Pipe
# function to filter out unwanted values in the scores
def filter_fn(x):
if x <= -10:
x = -10.0
elif x >= 10:
x = 10.0
return x
def standard_frame_compute(df):
"""
Standardizes the Pipeline API data pull
using the S&P500's means and standard deviations for
particular CustomFactors.
parameters
----------
df: numpy.array
full result of Data_Pull
returns
-------
numpy.array
standardized Data_Pull results
numpy.array
index of equities
"""
# basic clean of dataset to remove infinite values
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
# need standardization params from synthetic S&P500
df_SPY = df.sort(columns='SPY Proxy', ascending=False)
# create separate dataframe for SPY
# to store standardization values
df_SPY = df_SPY.head(500)
# get dataframes into numpy array
df_SPY = df_SPY.as_matrix()
# store index values
index = df.index.values
# turn iinto a numpy array for speed
df = df.as_matrix()
# create an empty vector on which to add standardized values
df_standard = np.empty(df.shape[0])
for col_SPY, col_full in zip(df_SPY.T, df.T):
# summary stats for S&P500
mu = np.mean(col_SPY)
sigma = np.std(col_SPY)
col_standard = np.array(((col_full - mu) / sigma))
# create vectorized function (lambda equivalent)
fltr = np.vectorize(filter_fn)
col_standard = (fltr(col_standard))
# make range between -10 and 10
col_standard = (col_standard / df.shape[1])
# attach calculated values as new row in df_standard
df_standard = np.vstack((df_standard, col_standard))
# get rid of first entry (empty scores)
df_standard = np.delete(df_standard,0,0)
return (df_standard, index)
def composite_score(df, index):
"""
Summarize standardized data in a single number.
parameters
----------
df: numpy.array
standardized results
index: numpy.array
index of equities
returns
-------
pandas.Series
series of summarized, ranked results
"""
# sum up transformed data
df_composite = df.sum(axis=0)
# put into a pandas dataframe and connect numbers
# to equities via reindexing
df_composite = pd.Series(data=df_composite,index=index)
# sort descending
df_composite.sort(ascending=False)
return df_composite
def initialize(context):
# get data from pipeline
data_pull = Data_Pull()
attach_pipeline(data_pull,'Data')
# filter out bad stocks for universe
mask = filter_universe()
data_pull.set_screen(mask)
# set leverage ratios for longs and shorts
context.long_leverage = 1.3
context.short_leverage = -0.3
# at the start of each moth, run the rebalancing function
schedule_function(rebalance, date_rules.month_start(), time_rules.market_open(minutes=30))
# clean untradeable securities daily
schedule_function(daily_clean,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(minutes=30))
# record variables
schedule_function(record_vars,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close())
pass
# called before every day of trading
def before_trading_start(context, data):
# apply the logic to the data pull in order to get a ranked list of equities
context.output = pipeline_output('Data')
context.output, index = standard_frame_compute(context.output)
context.output = composite_score(context.output, index)
# create lists of stocks on which to go long and short
context.long_set = set(context.output.head(26).index)
context.short_set = set(context.output.tail(6).index)
# log long and short equities and their corresponding composite scores
def handle_data(context, data):
"""
print "LONG LIST"
log.info(context.long_set)
print "SHORT LIST"
log.info(context.short_set)
"""
pass
# called at the start of every month in order to rebalance the longs and shorts lists
def rebalance(context, data):
# calculate how much of each stock to buy or hold
long_pct = context.long_leverage / len(context.long_set)
short_pct = context.short_leverage / len(context.short_set)
# universe now contains just longs and shorts
context.security_set = set(context.long_set.union(context.short_set))
for stock in context.security_set:
if data.can_trade(stock):
if stock in context.long_set:
order_target_percent(stock, long_pct)
elif stock in context.short_set:
order_target_percent(stock, short_pct)
# close out stale positions
daily_clean(context, data)
# make sure all untradeable securities are sold off each day
def daily_clean(context, data):
for stock in context.portfolio.positions:
if stock not in context.security_set and data.can_trade(stock):
order_target_percent(stock, 0)
def record_vars(context, data):
# number of long and short positions. Even in minute mode, only the end-of-day
# leverage is plotted.
shorts = longs = 0
for position in context.portfolio.positions.itervalues():
if position.amount < 0:
shorts += 1
elif position.amount > 0:
longs += 1
record(leverage=context.account.leverage, short_count=shorts, long_count=longs,
exposure=context.account.net_leverage)
def filter_universe():
"""
9 filters:
1. common stock
2 & 3. not limited partnership - name and database check
4. database has fundamental data
5. not over the counter
6. not when issued
7. not depository receipts
8. primary share
9. high dollar volume
Check Scott's notebook for more details.
"""
common_stock = mstar.share_class_reference.security_type.latest.eq('ST00000001')
not_lp_name = ~mstar.company_reference.standard_name.latest.matches('.* L[\\. ]?P\.?$')
not_lp_balance_sheet = mstar.balance_sheet.limited_partnership.latest.isnull()
have_data = mstar.valuation.market_cap.latest.notnull()
not_otc = ~mstar.share_class_reference.exchange_id.latest.startswith('OTC')
not_wi = ~mstar.share_class_reference.symbol.latest.endswith('.WI')
not_depository = ~mstar.share_class_reference.is_depositary_receipt.latest
primary_share = IsPrimaryShare()
# Combine the above filters.
tradable_filter = (common_stock & not_lp_name & not_lp_balance_sheet &
have_data & not_otc & not_wi & not_depository & primary_share)
high_volume_tradable = (AverageDollarVolume(window_length=21,
mask=tradable_filter).percentile_between(70, 100))
screen = high_volume_tradable
return screen
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import hashlib
from os import remove, makedirs
from os.path import exists, isdir
from functools import partial
from types import FunctionType
import inspect
from ._decorator import experimental, deprecated
def make_sentinel(name):
return type(name, (object, ), {
'__repr__': lambda s: name,
'__str__': lambda s: name,
'__class__': None
})()
def find_sentinels(function, sentinel):
keys = []
function_spec = inspect.getargspec(function)
if function_spec.defaults is not None:
# Concept from http://stackoverflow.com/a/12627202/579416
keywords_start = -len(function_spec.defaults)
for key, default in zip(function_spec.args[keywords_start:],
function_spec.defaults):
if default is sentinel:
keys.append(key)
return keys
class MiniRegistry(dict):
def __call__(self, name):
"""Act as a decorator to register functions with self"""
def decorator(func):
self[name] = func
return func
return decorator
def copy(self):
"""Useful for inheritance"""
return self.__class__(super(MiniRegistry, self).copy())
def formatted_listing(self):
"""Produce an RST list with descriptions."""
if len(self) == 0:
return "\tNone"
else:
return "\n".join(["\t%r\n\t %s" %
(name, self[name].__doc__.split("\n")[0])
for name in sorted(self)])
def interpolate(self, obj, name):
"""Inject the formatted listing in the second blank line of `name`."""
# Py2/3 compatible way of calling getattr(obj, name).__func__
f = getattr(obj, name).__get__(None, type(None))
if hasattr(f, 'func_code'):
f2 = FunctionType(f.func_code, f.func_globals, name=f.func_name,
argdefs=f.func_defaults, closure=f.func_closure)
else:
f2 = FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
# Conveniently the original docstring is on f2, not the new ones if
# inheritence is happening. I have no idea why.
t = f2.__doc__.split("\n\n")
t.insert(2, self.formatted_listing())
f2.__doc__ = "\n\n".join(t)
setattr(obj, name, f2)
def chunk_str(s, n, char):
"""Insert `char` character every `n` characters in string `s`.
Canonically pronounced "chunkster".
"""
# Modified from http://stackoverflow.com/a/312464/3776794
if n < 1:
raise ValueError(
"Cannot split string into chunks with n=%d. n must be >= 1." % n)
return char.join((s[i:i+n] for i in range(0, len(s), n)))
@experimental(as_of="0.4.0")
def cardinal_to_ordinal(n):
"""Return ordinal string version of cardinal int `n`.
Parameters
----------
n : int
Cardinal to convert to ordinal. Must be >= 0.
Returns
-------
str
Ordinal version of cardinal `n`.
Raises
------
ValueError
If `n` is less than 0.
Notes
-----
This function can be useful when writing human-readable error messages.
Examples
--------
>>> from skbio.util import cardinal_to_ordinal
>>> cardinal_to_ordinal(0)
'0th'
>>> cardinal_to_ordinal(1)
'1st'
>>> cardinal_to_ordinal(2)
'2nd'
>>> cardinal_to_ordinal(3)
'3rd'
"""
# Taken and modified from http://stackoverflow.com/a/20007730/3776794
# Originally from http://codegolf.stackexchange.com/a/4712 by Gareth
if n < 0:
raise ValueError("Cannot convert negative integer %d to ordinal "
"string." % n)
return "%d%s" % (n, "tsnrhtdd"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])
@experimental(as_of="0.4.0")
def is_casava_v180_or_later(header_line):
"""Check if the header looks like it is Illumina software post-casava v1.8
Parameters
----------
header_line : bytes
A header line
Returns
-------
bool
``True`` for if casava v1.8+, otherwise ``False``
Examples
--------
>>> from skbio.util import is_casava_v180_or_later
>>> print(is_casava_v180_or_later('@foo'))
False
>>> id_ = '@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
>>> print(is_casava_v180_or_later(id_))
True
"""
if not header_line.startswith(b'@'):
raise ValueError("Non-header line passed in!")
fields = header_line.split(b':')
return len(fields) == 10 and fields[7] in b'YN'
@experimental(as_of="0.4.0")
def safe_md5(open_file, block_size=2 ** 20):
"""Computes an md5 sum without loading the file into memory
Parameters
----------
open_file : file object
open file handle to the archive to compute the checksum. It
must be open as a binary file
block_size : int, optional
size of the block taken per iteration
Returns
-------
md5 : md5 object from the hashlib module
object with the loaded file
Notes
-----
This method is based on the answers given in:
http://stackoverflow.com/a/1131255/379593
Examples
--------
>>> from StringIO import StringIO
>>> from skbio.util import safe_md5
>>> fd = StringIO("foo bar baz") # open file like object
>>> x = safe_md5(fd)
>>> x.hexdigest()
'ab07acbb1e496801937adfa772424bf7'
>>> fd.close()
"""
md5 = hashlib.md5()
data = True
while data:
data = open_file.read(block_size)
if data:
md5.update(data)
return md5
@experimental(as_of="0.4.0")
def remove_files(list_of_filepaths, error_on_missing=True):
"""Remove list of filepaths, optionally raising an error if any are missing
Parameters
----------
list_of_filepaths : list of strings
list with filepaths to remove
error_on_missing : bool, optional
whether or not the function should raise an ``OSError`` if a file is
not found
Raises
------
OSError
If a filepath in the list does not exist
Examples
--------
>>> from tempfile import NamedTemporaryFile
>>> from os.path import exists
>>> from skbio.util import remove_files
>>> h = NamedTemporaryFile(delete=False)
>>> exists(h.name) # it exists
True
>>> remove_files([h.name])
>>> exists(h.name) # and now it's gone
False
"""
missing = []
for fp in list_of_filepaths:
try:
remove(fp)
except OSError:
missing.append(fp)
if error_on_missing and missing:
raise OSError("Some filepaths were not accessible: %s" %
'\t'.join(missing))
@experimental(as_of="0.4.0")
def create_dir(dir_name, fail_on_exist=False, handle_errors_externally=False):
"""Create a directory safely and fail meaningfully
Parameters
----------
dir_name: string
name of directory to create
fail_on_exist: bool, optional
if true raise an error if ``dir_name`` already exists
handle_errors_externally: bool, optional
if True do not raise Errors, but return failure codes. This allows to
handle errors locally and e.g. hint the user at a --force_overwrite
options.
Returns
-------
return_value : int
These values are only returned if no error is raised:
- ``0``: directory was safely created
- ``1``: directory already existed
- ``2``: a file with the same name exists
- ``3``: any other unspecified ``OSError``
Notes
-----
Depending of how thorough we want to be we could add tests, e.g. for
testing actual write permission in an existing dir.
Examples
--------
>>> from skbio.util import create_dir
>>> from os.path import exists, join
>>> from tempfile import gettempdir
>>> from os import rmdir
>>> new_dir = join(gettempdir(), 'scikitbio')
>>> create_dir(new_dir)
0
>>> exists(new_dir)
True
>>> rmdir(new_dir)
"""
error_code_lookup = _get_create_dir_error_codes()
# pre-instanciate function with
ror = partial(_handle_error_codes, dir_name, handle_errors_externally)
if exists(dir_name):
if isdir(dir_name):
# dir is there
if fail_on_exist:
return ror(error_code_lookup['DIR_EXISTS'])
else:
return error_code_lookup['DIR_EXISTS']
else:
# must be file with same name
return ror(error_code_lookup['FILE_EXISTS'])
else:
# no dir there, try making it
try:
makedirs(dir_name)
except OSError:
return ror(error_code_lookup['OTHER_OS_ERROR'])
return error_code_lookup['NO_ERROR']
@experimental(as_of="0.4.0")
def find_duplicates(iterable):
"""Find duplicate elements in an iterable.
Parameters
----------
iterable : iterable
Iterable to be searched for duplicates (i.e., elements that are
repeated).
Returns
-------
set
Repeated elements in `iterable`.
"""
# modified from qiita.qiita_db.util.find_repeated
# https://github.com/biocore/qiita
# see licenses/qiita.txt
seen, repeated = set(), set()
for e in iterable:
if e in seen:
repeated.add(e)
else:
seen.add(e)
return repeated
flatten_deprecation_reason = (
"Solutions to this problem exist in the python standarnd library. "
"Please refer to the following links for good alternatives:\n"
"http://stackoverflow.com/a/952952/3639023\n"
"http://stackoverflow.com/a/406199/3639023")
@deprecated(as_of="0.2.3-dev", until="0.4.1",
reason=flatten_deprecation_reason)
def flatten(items):
"""Removes one level of nesting from items
Parameters
----------
items : iterable
list of items to flatten one level
Returns
-------
flattened_items : list
list of flattened items, items can be any sequence, but flatten always
returns a list.
Examples
--------
>>> from skbio.util import flatten
>>> h = [['a', 'b', 'c', 'd'], [1, 2, 3, 4, 5], ['x', 'y'], ['foo']]
>>> print(flatten(h))
['a', 'b', 'c', 'd', 1, 2, 3, 4, 5, 'x', 'y', 'foo']
"""
result = []
for i in items:
try:
result.extend(i)
except TypeError:
result.append(i)
return result
def _get_create_dir_error_codes():
return {'NO_ERROR': 0,
'DIR_EXISTS': 1,
'FILE_EXISTS': 2,
'OTHER_OS_ERROR': 3}
def _handle_error_codes(dir_name, suppress_errors=False,
error_code=None):
"""Wrapper function for error_handling.
dir_name: name of directory that raised the error
suppress_errors: if True raise Errors, otherwise return error_code
error_code: the code for the error
"""
error_code_lookup = _get_create_dir_error_codes()
if error_code is None:
error_code = error_code_lookup['NO_ERROR']
error_strings = \
{error_code_lookup['DIR_EXISTS']:
"Directory already exists: %s" % dir_name,
error_code_lookup['FILE_EXISTS']:
"File with same name exists: %s" % dir_name,
error_code_lookup['OTHER_OS_ERROR']:
"Could not create output directory: %s. " % dir_name +
"Check the permissions."}
if error_code == error_code_lookup['NO_ERROR']:
return error_code_lookup['NO_ERROR']
if suppress_errors:
return error_code
else:
raise OSError(error_strings[error_code])
|
|
#!/usr/bin/python
'''
File defining a python class for snowpit data
November 2016, Simon Filhol
'''
import numpy as np
import pandas as pd
import os
import snowpyt.CAAMLv6_xml as cxv6
from snowpyt.snowflake.sf_dict import snowflake_symbol_dict
import snowpyt.snowflake.sf_dict as sfd
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from matplotlib.ticker import MaxNLocator
path2snowflake = cxv6.__file__[:-14] + '/'
class layer(object):
def __init__(self):
self.dtop = None
self.dtop_unit = None
self.dbot = None
self.thickness = None
self.thickness_unit = None
self.grain_type1 = None
self.grain_type2 = None
self.grain_type3 = None
self.grainSize_unit = None
self.grainSize_mean = None
self.grainSize_max = None
self.hardness_ram = None
self.hardness_index = None
self.hardness = None
self.lwc = None
self.id = None
# # wrong syntax. Check how to have a automatic update of the following fields within the class:
# if (self.dtop is not None) and (self.thickness is not None):
# self.dbot = self.dtop - self.thickness
#
# if (self.dtop is not None) and (self.dbot is not None):
# self.thickness = self.dtop - self.dbot
# derive hardness code automatically
# def __str__(self):
# return "-----layer object-----\ndepthTop={}{}\nthickness={}{}\ngrainFormPrimary={}\ngrainFormSecondary={}\ngrainSize\n\tavg={}{}\n\tavgMax={}{}\nhardness={}\nlwc={}".format(
# self.dtop, self.dtop_unit, self.thickness, self.thickness_unit, self.grain_type1, self.grain_type2,
# self.grain_size_avg, self.grain_size_unit, self.grain_size_max, self.grain_size_unit, self.hardness,
# self.lwc)
class temperature_profile(object):
def __init__(self):
self.depth = []
self.depth_unit = None
self.temp = []
self.temp_unit = None
def __str__(self):
return "-----temperature profile-----\ndepth={} {}\ntemp={} {}".format(self.depth, self.depth_unit, self.temp,
self.temp_unit)
class density_profile(object):
def __init__(self):
self.depth = []
self.depth_unit = None
self.thickness = []
self.thickness_unit = None
self.density = []
self.density_unit = None
def __str__(self):
return "-----density profile-----\ndepth={} {}\nthickness={} {}\ndensity={} {}".format(self.depth,
self.depth_unit,
self.density_unit)
class sample_profile(object):
def __init__(self):
self.layer_top = []
self.layer_bot = []
self.depth_unit = None
self.names = []
self.values = []
self.values_units = None
class metadata(object):
def __init__(self):
self.date = None
self.time = None
self.operation = None
self.observer = None
self.profile_depth = None
self.profile_depth_unit = None
self.location_description = None
self.srsName = None
self.east = None
self.east_unit = None
self.north = None
self.north_unit = None
self.elevation = None
self.elevation_unit = None
self.sky_condition = None
self.precipitation = None
self.air_temperature = None
self.air_temperature_unit = None
self.windspeed = None
self.windspeed_unit = None
self.winddir=None
self.comments = None
def __str__(self):
return "-----metadata-----\ndate={}\noperation={}\nobserver={}\nprofile depth={} {}\nlocation description={}\nsrs name={}\nE={}\nN={}\nelevation={} {}\nsky condition={}\nprecipitation={}\nair temperature={} {}\nwindspeed={} {}\ncomments={}".format(
self.date, self.operation, self.observer, self.profile_depth, self.profile_depth_unit,
self.location_description, self.srsName, self.east, self.north, self.elevation, self.elevation_unit,
self.sky_condition, self.precipitation, self.air_temperature, self.air_temperature_unit, self.windspeed,
self.windspeed_unit, self.comments)
class Snowpit(object):
# try to modify the snowpit class to use medata, layers and profile as class object
def __init__(self):
self.snowflakeDICT = snowflake_symbol_dict
self.caaml_file = None
self.sample_file = None
self.metadata = metadata()
self.temperature_profile = temperature_profile()
self.density_profile = density_profile()
self.sample_profile = sample_profile()
self.table = pd.DataFrame()
self.layers = None
self.units = None
self.layers_top = None
self.layers_bot = None
def _extract_layers(self, print2term=True):
# Function to reoganize layer data
self.layers_bot = np.zeros(self.layers.__len__()) * np.nan
self.layers_top = self.layers_bot * np.nan
self.layers_hardness_ram = self.layers_bot * np.nan
self.layers_hardness_index = self.layers_bot * np.nan
self.layers_grainSize_mean = self.layers_top * np.nan
self.layers_grainSize_max = self.layers_top * np.nan
self.layers_id = self.layers_top * np.nan
self.layers_grainType1 = np.empty(self.layers.__len__(), dtype=object)
self.layers_grainType2 = np.empty(self.layers.__len__(), dtype=object)
self.layers_grainType3 = np.empty(self.layers.__len__(), dtype=object)
for i, layer in enumerate(self.layers):
if print2term:
print('layer # ' + str(i))
print(layer.__dict__)
self.layers_bot[i] = layer.dbot
self.layers_top[i] = layer.dtop
self.layers_hardness_index[i] = sfd.hardness_dict.get(layer.hardness)
try:
self.layers_hardness_ram[i] = 19.3 * self.layers_hardness_index[i] ** 2.4
except:
print('WARNING: no hardness data')
self.layers_grainSize_mean[i] = layer.grainSize_mean
self.layers_grainSize_max[i] = layer.grainSize_max
self.layers_id[i] = layer.id
self.layers_grainType1[i] = layer.grain_type1
self.layers_grainType2[i] = layer.grain_type2
self.layers_grainType3[i] = layer.grain_type3
def import_caamlv6(self, print2term=True):
# Load metadata
self.metadata = cxv6.get_metadata(self.caaml_file, print2term=print2term)
# load temperature profile
self.temperature_profile = cxv6.get_temperature(self.caaml_file, print2term=print2term)
# load density profile
self.density_profile = cxv6.get_density(self.caaml_file, print2term=print2term)
# load layers
self.layers = cxv6.get_layers(self.caaml_file, print2term=print2term)
if self.layers is not None:
self._extract_layers(print2term=print2term)
def import_sample_csv(self, bar_plot=False):
'''
Function to import sample profiles.
:param bar_plot: plot sample profile as bar instead of line-scatter. Default is False
'''
self.sample_profile.df = pd.read_csv(self.sample_file)
# self.sample_profile.layer_top = self.sample_profile.df.height_top
# self.sample_profile.layer_bot = self.sample_profile.df.height_bot
self.sample_profile.names = self.sample_profile.df.columns[2:]
self.sample_profile.bar_plot = bar_plot
def plot(self, save=False,metadata=False, invert_depth=False,figsize=(8,4), dpi=150,
plot_order=['temperature', 'density', 'crystal size',
'stratigraphy', 'hardness',
'sample names', 'dD', 'd18O', 'd-ex']):
fig = plt.figure(figsize=figsize, dpi=dpi)
if metadata:
my_rowspan = 3
else:
my_rowspan = 4
# ===========================================================
# Automatically adjust summary plot based on data available
ncol = plot_order.__len__()
if ncol == 1:
ax1 = plt.subplot2grid((4, ncol), (0, ncol - 1), rowspan=my_rowspan)
self.axs_list = [ax1]
if ncol >= 2:
ax1 = plt.subplot2grid((4, ncol), (0, 0), rowspan=my_rowspan)
self.axs_list = []
self.axs_list.append(ax1)
for n in range(1, ncol):
ax = plt.subplot2grid((4, ncol), (0, n), rowspan=my_rowspan, sharey=ax1)
self.axs_list.append(ax)
print(self.axs_list)
def to_plot(plot_order=plot_order):
# function to plot plots based on the order indicated in plot_order
plots_dict = {'temperature': plot_temperature,
'density': plot_density,
'stratigraphy': plot_stratigraphy,
'hardness': plot_hardness,
'crystal size': plot_crystalSize,
'sample_name': plot_sample_names,
'dD': plot_dD,
'd18O': plot_d18O,
'dXS': plot_dXS}
for i, axs in enumerate(self.axs_list):
plots_dict.get(plot_order[i])(axs)
def add_grid(ax):
xlim = ax.get_xlim()
ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
np.repeat(xlim[0], self.layers_top.__len__()),
alpha=0.5, edgecolor='m', linewidth=0.75, linestyle=':',zorder=20,fill=False)
def plot_isotope(ax,iso='dD', std=None):
if std is None:
std = iso + '_SD' #std column name default for data from FARLAB
# include a little logic to check that iso is properly define
if iso is not ('dD' or 'd18O' or 'dXS'):
print('iso must be dD, d18O or dXS')
return
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
color_dict = {'dD': '#1f77b4',
'd18O': '#1f77b4','dxs':'#d62728'}
title_dict = {'dD': "dD ($^{o}/_{oo}$)",
'd18O': "d18O ($^{o}/_{oo}$)",
'dXS': "d-excess ($^{o}/_{oo}$)"}
color=color_dict[iso]
#sample-type layer color in gray scale
col_vec=[]
hatch_vec=[]
symb_vec=[]
cat=self.sample_profile.df.ice_type
for let in cat:
if let=='S':
col_vec=np.append(col_vec,'None')
hatch_vec=np.append(hatch_vec,'')
symb_vec=np.append(symb_vec,'o')
if let=='I':
col_vec=np.append(col_vec,'0.7')
hatch_vec=np.append(hatch_vec,'.')
symb_vec=np.append(symb_vec,'sq')
if let=='M':
col_vec=np.append(col_vec,'0.9')
hatch_vec=np.append(hatch_vec,'\\')
symb_vec=np.append(symb_vec,'d')
#staircase step plot:
im = ax.step(np.append(self.sample_profile.df[iso].values[0], self.sample_profile.df[iso].values),
np.append(self.sample_profile.df.height_top.values,0), where='post', color=color)
#ax.set_title("dD ($^{o}/_{oo}$)")
xlim = ax.get_xlim()
# Mika: add error-bar in isotope
ax.barh(
self.sample_profile.df.height_top,2*self.sample_profile.df[std].values,
(self.sample_profile.df.height_bot-self.sample_profile.df.height_top),
(self.sample_profile.df[iso].values-self.sample_profile.df[std].values),
align='edge',edgecolor='k',linewidth=0,color=color,alpha=0.6,zorder=5)
# Mika: add isotope-sample-layer type - this needs the col_vec.
ax.barh(
self.sample_profile.df.height_top,np.diff(xlim),
(self.sample_profile.df.height_bot-self.sample_profile.df.height_top),xlim[0],
align='edge',edgecolor='k',color=col_vec,linewidth=0,zorder=2)
# Add grid following the layering
add_grid(ax)
# ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
# np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
# np.repeat(xlim[0], self.layers_top.__len__()),
# alpha=0.5, edgecolor='m', linewidth=0.75, linestyle=':',zorder=20,fill=False)
ax.set_title(title_dict[iso])
ax.set_xlim(xlim)
ax.grid(axis='x', linewidth=0.5, linestyle=':')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_dD(ax):
plot_isotope(ax,iso='dD')
def plot_d18O(ax):
plot_isotope(ax,iso='d18O')
def plot_dXS(ax):
plot_isotope(ax,iso='dxs')
def plot_density(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
im = ax.plot(self.density_profile.density, self.density_profile.depth)
xlim = ax.get_xlim()
# Add grid following the layering
add_grid(ax)
# ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
# np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
# np.repeat(xlim[0], self.layers_top.__len__()),
# color='w', alpha=0.5, edgecolor='m', linewidth=0.75, linestyle=':')
ax.set_xlim(xlim)
ax.grid(axis='x', linewidth=0.5, linestyle=':')
ax.set_title("Density")
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_temperature(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
im = ax.plot(self.temperature_profile.temp, self.temperature_profile.depth)
xlim = ax.get_xlim()
# # Add grid following the layering
add_grid(ax)
# ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
# np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
# np.repeat(xlim[0], self.layers_top.__len__()),
# color='w', alpha=0.5, edgecolor='m', linewidth=0.75, linestyle=':')
ax.set_xlim(xlim)
ax.set_title("Temperature ($^\circ$C)")
ax.grid(axis='x', linestyle=':', linewidth=0.5)
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_stratigraphy(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
plt.setp(ax.get_xticklabels(), visible=False)
im2 = ax.barh(self.layers_bot-(self.layers_bot-self.layers_top)/2,
np.repeat(1, self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
color=cm.Blues(self.layers_hardness_index / 6), edgecolor='k', linewidth=0.5)
#edgecolor='k', linewidth=0.5)
ax.set_xlim(0, 1)
# include sample name on pit face
# for i, sample in enumerate(self.sample_name):
# include snowflake symbols
for i, flake in enumerate(self.layers_grainType1.astype(str)):
if flake == 'nan':
flake = None
if flake != None:
if snowflake_symbol_dict.get(flake) != None:
im = plt.imread(path2snowflake + snowflake_symbol_dict.get(flake))
im[im == 0] = np.nan
imagebox = OffsetImage(im, zoom=.01)
if (self.layers_grainType2[i] is None) and (self.layers_grainType3[i] is None):
hloc = 0.5
elif (self.layers_grainType2[i] != None) and (self.layers_grainType3[i] is None):
hloc = 0.33
else:
hloc = 0.25
xy = [hloc,
((self.layers_top[i] - self.layers_bot[i]) / 2 + self.layers_bot[i])] # coordinates to position this image
ab = AnnotationBbox(imagebox, xy, xycoords='data', boxcoords='data', frameon=False)
ax.add_artist(ab)
else:
print('WARNING: [' + flake + '] is not a compatible snowflake type. Check spelling!')
for i, flake in enumerate(self.layers_grainType2.astype(str)):
if flake == 'nan':
flake = None
if flake is not None:
if snowflake_symbol_dict.get(flake) != None:
im = plt.imread(path2snowflake + snowflake_symbol_dict.get(flake))
im[im == 0] = np.nan
imagebox = OffsetImage(im, zoom=.01)
if (self.layers_grainType2[i] != None) and (self.layers_grainType3[i] is None):
hloc2 = 0.66
else:
hloc2 = 0.5
xy = [hloc2,
((self.layers_top[i] - self.layers_bot[i]) / 2 + self.layers_bot[i])] # coordinates to position this image
ab = AnnotationBbox(imagebox, xy, xycoords='data', boxcoords='data', frameon=False)
ax.add_artist(ab)
else:
print('WARNING: [' + flake + '] is not a compatible snowflake type. Check spelling!')
for i, flake in enumerate(self.layers_grainType3.astype(str)):
if flake == 'nan':
flake = None
if flake != None:
if snowflake_symbol_dict.get(flake) != None:
im = plt.imread(path2snowflake + snowflake_symbol_dict.get(flake))
im[im == 0] = np.nan
imagebox = OffsetImage(im, zoom=.01)
xy = [0.75,
((self.layers_top[i] - self.layers_bot[i]) / 2 + self.layers_bot[i])] # coordinates to position this image
ab = AnnotationBbox(imagebox, xy, xycoords='data', boxcoords='data', frameon=False)
ax.add_artist(ab)
else:
print('WARNING: [' + flake + '] is not a compatible snowflake type. Check spelling!')
ax.set_title("Stratigraphy")
return im2
def plot_hardness(ax):
plt.setp(ax.get_yticklabels(), visible=False)
# Add grid following the layering
im = ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2, self.layers_hardness_index,
self.layers_bot - self.layers_top, color=cm.Blues(self.layers_hardness_index / 6), edgecolor='k',
linewidth=0.5)
ax.set_xlim(0, 7)
ax.set_title("Hardness")
labels_ax = ['', 'Fist', '4F', '1F', 'P', 'K', 'I']
ax.set_xticklabels(labels_ax, rotation=45)
ax.xaxis.set_major_locator(MaxNLocator(integer=True, prune='upper'))
return im
def plot_crystalSize(ax):
if ax is ax1:
ax.set_ylabel("Depth (cm)")
else:
plt.setp(ax.get_yticklabels(), visible=False)
ax.yaxis.tick_right()
im = ax.barh(self.layers_bot-(self.layers_bot-self.layers_top)/2, self.layers_grainSize_max-self.layers_grainSize_mean, 1, self.layers_grainSize_mean)
xlim = ax.get_xlim()
add_grid(ax)
# ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
# np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
# np.repeat(xlim[0], self.layers_top.__len__()),
# color='w', alpha=0.2, edgecolor='k', linewidth=0.5, linestyle=':')
ax.xaxis.set_ticks([0, 0.1, 0.2, 0.5, 1, 1.5, 2, 3, 4, 5, 10, 15, 20, 25, 30, 35, 40])
ax.set_xlim(xlim)
ax.set_title("Crystal size (mm)")
ax.grid(axis='x', linewidth=0.5, linestyle=':')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
return im
def plot_sample_names(ax):
# add here code for plotting column of sample names
ax.set_xlim([0,1])
for i, name in enumerate(self.sample_profile.sample_name.astype(str)):
if name != 'nan':
ax.text(0.5, self.sample_profile.depth[i], name,
bbox={'facecolor':'red', 'edgecolor':'none', 'alpha':0.5, 'pad':1},fontsize=5)
xlim = ax.get_xlim()
add_grid(ax)
# ax.barh(self.layers_bot - (self.layers_bot - self.layers_top) / 2,
# np.repeat(xlim[1] - xlim[0], self.layers_top.__len__()), - (self.layers_bot - self.layers_top),
# np.repeat(xlim[0], self.layers_top.__len__()),
# color='w', alpha=0.2, edgecolor='k', linewidth=0.5, linestyle=':')
ax.set_xlim(xlim)
ax.set_title("Sample Name")
plt.setp(ax.get_xticklabels(), visible=False)
if metadata:
metadata_text = "Date: " + p.metadata.date + '; Time [24hr]: ' + '\n' + \
"Observer: " + p.metadata.observer + '\n' + \
"Location description: " + p.metadata.location_description + '\n' + \
"East : " + str(p.metadata.east) + ' ' + \
"North: " + str(p.metadata.north) + ' ' + \
"Elevation: " + str(p.metadata.elevation) + ' ' + p.metadata.elevation_unit + '\n' + \
"Air temperature: " + str(p.metadata.air_temperature) + '$^{\circ}C$' '\n'
plt.figtext(0.08, 0.12 , metadata_text,
horizontalalignment='left',
verticalalignment='center', wrap=True, fontsize=4)
to_plot(plot_order)
if invert_depth:
fig.gca().invert_yaxis()
plt.tight_layout()
plt.subplots_adjust(wspace=0)
if save == True:
fig.savefig(fig_fname)
print('Figure saved as ' + fig_fname)
def print_metadata(self):
print('Not implemented [print_metadata()]')
def print_layers(self):
print('Not implemented [print_layers()]')
def calc_SWE(self, method='avg', ice_layer_density=680):
'''
calculate SWE using three methods: avg SWE for all pit 'avg', SWE based on density samples 'samples', and SWE based
:param method: 'avg', 'samples' or 'layers'. no default
- 'avg' is simply the
- 'samples' is density by density samples. Top and bottom layer from all top to all bottom
to half between density sample 1;2 and N-1;N. All others, make layer horizons between samples,
use density sample in middle of these layers as density
- 'layers' is density by strat-layer, find matching density sample.Ice layer density a given
density.if more than one match, use average. if no matches, search for neareast. (or search for nearest two, upper and lower, and make average)
: param ice_layer_density: assign a constant density to ice layers (An ice layer is detected when hand hardness index = knife = 6)
:return: SWE in [cm]
'''
if method == 'avg':
SWE=(self.density_profile.density.mean()*self.layers_top[0])/1000
if method == 'samples':
if self.layers is not None:
#make layer boundaries, horizons, at half-points between density samples
#make into pandas to use rolling mean, make back into numpy array
self.density_profile.layer_horz = pd.DataFrame(self.density_profile.depth).rolling(2,min_periods=2).mean().to_numpy()
#override first value of rolling mean, nan, with top max height of snowpit
self.density_profile.layer_horz[0] = self.layers_top[0]
#app bottom of snowpit, yes 0
self.density_profile.layer_horz = np.append(self.density_profile.layer_horz,self.layers_bot[-1])
#calculate thicknsesses:
self.density_profile.layer_thickness = abs(np.diff(self.density_profile.layer_horz))
SWE=(self.density_profile.layer_thickness*self.density_profile.density / 1000).sum()
else:
print('No layers: choose another method')
return
if method == 'layers':
"""
Correction to do: If the first density sample is below the bottom of the second layer then it fails
one logic could be: take height of upper density. Assign this density to any layer above unless flagged as ice. Do the opposite for bootom layers
10:30
then assign density o layers which have a density sample within their height range
10:30
and finally do interpolation for the one having no value after this
"""
if self.layers is not None:
def nearest(direction, lookin, lookfor):
if direction == 'up':
idx = np.where(lookin > lookfor)[0][-1]
elif direction == 'down':
idx = np.where(lookin < lookfor)[0][0]
else:
print('ERROR: You must provide a direction, up or down')
return idx
#get thickness or our strat-layers:
self.layers_thickness = self.layers_top-self.layers_bot
#initialize numpy array for our strat-densities, length of the strat-layers
self.layers_density = np.zeros(len(self.layers_top))
#get the density of strat-layers, with different conditions
for i in range(len(self.layers_top)):
#if ice layer, set arbitrary density:
if self.layers_hardness_index[i] == 6:
self.layers_density[i] = ice_layer_density
#if not ice, check if there are NO density samples within the strat-layer:
elif np.sum((self.density_profile.depth > self.layers_bot[i]) & (self.density_profile.depth < self.layers_top[i])) == 0:
#if yes:
#take care of first layer, bottom layer, since they both have only one of idxlower/idxupper
if i == 0:
self.layers_density[i] = self.density_profile.density[nearest('down',self.density_profile.depth,self.layers_bot[i])]
elif i == len(self.layers_top)-1:
self.layers_density[i]=self.density_profile.density[nearest('up',self.density_profile.depth,self.layers_top[i])]
#for all other layers, look both up and down:
else:
print(i)
idxupper = nearest('up',self.density_profile.depth,self.layers_top[i])
idxlower = nearest('down',self.density_profile.depth,self.layers_top[i])
self.layers_density[i] = self.density_profile.density[idxupper:idxlower+1].mean()
#if there ARE samples within the layer, take mean of those samples:
else:
self.layers_density[i]=self.density_profile.density[(self.density_profile.depth >= self.layers_bot[i]) & (self.density_profile.depth <= self.layers_top[i])].mean()
SWE = (self.layers_thickness * self.layers_density / 1000).sum()
else:
print('No layers: choose another method')
return
return SWE
print(SWE)
#
#
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for distance utility functions."""
import tensorflow as tf
from poem.core import distance_utils
class DistanceUtilsTest(tf.test.TestCase):
def test_compute_l2_distances(self):
# Shape = [2, 1, 2, 2]
lhs_points = [[[[0.0, 1.0], [2.0, 3.0]]], [[[10.0, 11.0], [12.0, 13.0]]]]
rhs_points = [[[[0.0, 1.1], [2.3, 3.4]]], [[[10.4, 11.0], [12.4, 13.3]]]]
# Shape = [2, 1, 2]
distances = distance_utils.compute_l2_distances(lhs_points, rhs_points)
self.assertAllClose(distances, [[[0.1, 0.5]], [[0.4, 0.5]]])
def test_compute_l2_distances_keepdims(self):
# Shape = [2, 1, 2, 2]
lhs = [[[[0.0, 1.0], [2.0, 3.0]]], [[[10.0, 11.0], [12.0, 13.0]]]]
rhs = [[[[0.0, 1.1], [2.3, 3.4]]], [[[10.4, 11.0], [12.4, 13.3]]]]
# Shape = [2, 1, 2]
distances = distance_utils.compute_l2_distances(lhs, rhs, keepdims=True)
self.assertAllClose(distances, [[[[0.1], [0.5]]], [[[0.4], [0.5]]]])
def test_compute_squared_l2_distances(self):
# Shape = [2, 1, 2, 2]
lhs_points = [[[[0.0, 1.0], [2.0, 3.0]]], [[[10.0, 11.0], [12.0, 13.0]]]]
rhs_points = [[[[0.0, 1.1], [2.3, 3.4]]], [[[10.4, 11.0], [12.4, 13.3]]]]
# Shape = [2, 1, 2]
distances = distance_utils.compute_l2_distances(
lhs_points, rhs_points, squared=True)
self.assertAllClose(distances, [[[0.01, 0.25]], [[0.16, 0.25]]])
def test_compute_sigmoid_matching_probabilities(self):
inner_distances = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
matching_probabilities = (
distance_utils.compute_sigmoid_matching_probabilities(
inner_distances, a=0.01, b=1.0))
self.assertAllClose(matching_probabilities,
[[0.70617913, 0.704397395, 0.702607548],
[0.700809625, 0.69900366, 0.697189692]])
def test_compute_sigmoid_matching_distances(self):
inner_distances = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
matching_distances = distance_utils.compute_sigmoid_matching_distances(
inner_distances, a=0.01, b=1.0)
self.assertAllClose(matching_distances,
[[0.347886348, 0.350412601, 0.352956796],
[0.355519006, 0.3580993, 0.360697751]])
def test_compute_all_pair_squared_l2_distances(self):
# Shape = [2, 2, 2].
lhs = tf.constant([[[1.0, 2.0], [3.0, 4.0]], [[4.0, 3.0], [2.0, 1.0]]])
# Shape = [2, 3, 2].
rhs = tf.constant([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[6.0, 5.0], [4.0, 3.0], [2.0, 1.0]]])
# Shape = [2, 2, 3].
distances = distance_utils.compute_all_pair_l2_distances(
lhs, rhs, squared=True)
self.assertAllClose(distances, [[[0.0, 8.0, 32.0], [8.0, 0.0, 8.0]],
[[8.0, 0.0, 8.0], [32.0, 8.0, 0.0]]])
def test_compute_all_pair_l2_distances(self):
# Shape = [2, 2, 2].
lhs = tf.constant([[[1.0, 2.0], [3.0, 4.0]], [[4.0, 3.0], [2.0, 1.0]]])
# Shape = [2, 3, 2].
rhs = tf.constant([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[6.0, 5.0], [4.0, 3.0], [2.0, 1.0]]])
# Shape = [2, 2, 3].
distances = distance_utils.compute_all_pair_l2_distances(lhs, rhs)
self.assertAllClose(
distances,
[[[0.0, 2.828427125, 5.656854249], [2.828427125, 0.0, 2.828427125]],
[[2.828427125, 0.0, 2.828427125], [5.656854249, 2.828427125, 0.0]]])
def test_compute_gaussian_likelihoods(self):
# Shape = [2, 1, 1].
means = tf.constant([[[1.0]], [[2.0]]])
# Shape = [2, 1, 1].
stddevs = tf.constant([[[1.0]], [[2.0]]])
# Shape = [2, 3, 1].
samples = tf.constant([[[1.0], [2.0], [3.0]], [[2.0], [4.0], [6.0]]])
# Shape = [2, 3, 1].
likelihoods = distance_utils.compute_gaussian_likelihoods(
means, stddevs, samples)
self.assertAllClose(
likelihoods, [[[1.0], [0.3173], [0.0455]], [[1.0], [0.3173], [0.0455]]],
atol=1e-4)
def test_compute_distance_matrix(self):
# Shape = [2, 1]
start_points = tf.constant([[1], [2]])
# Shape = [3, 1]
end_points = tf.constant([[3], [4], [5]])
distance_matrix = distance_utils.compute_distance_matrix(
start_points, end_points, distance_fn=tf.math.subtract)
self.assertAllEqual(distance_matrix,
[[[-2], [-3], [-4]], [[-1], [-2], [-3]]])
def test_compute_distance_matrix_with_both_masks(self):
# Shape = [2, 3, 1].
start_points = tf.constant([
[[1.0], [2.0], [3.0]],
[[4.0], [5.0], [6.0]],
])
# Shape = [3, 3, 1].
end_points = tf.constant([
[[11.0], [12.0], [13.0]],
[[14.0], [15.0], [16.0]],
[[17.0], [18.0], [19.0]],
])
# Shape = [2, 3].
start_point_masks = tf.constant([[1.0, 1.0, 1.0], [1.0, 1.0, 0.0]])
# Shape = [3, 3].
end_point_masks = tf.constant([[1.0, 0.0, 1.0], [1.0, 0.0, 0.0],
[1.0, 1.0, 1.0]])
def masked_add(lhs, rhs, masks):
masks = tf.expand_dims(masks, axis=-1)
return tf.math.reduce_sum((lhs + rhs) * masks, axis=[-2, -1])
# Shape = [2, 3].
distance_matrix = distance_utils.compute_distance_matrix(
start_points,
end_points,
distance_fn=masked_add,
start_point_masks=start_point_masks,
end_point_masks=end_point_masks)
self.assertAllClose(distance_matrix,
[[28.0, 15.0, 60.0], [15.0, 18.0, 44.0]])
def test_compute_distance_matrix_with_start_masks(self):
# Shape = [2, 3, 1].
start_points = tf.constant([
[[1.0], [2.0], [3.0]],
[[4.0], [5.0], [6.0]],
])
# Shape = [3, 3, 1].
end_points = tf.constant([
[[11.0], [12.0], [13.0]],
[[14.0], [15.0], [16.0]],
[[17.0], [18.0], [19.0]],
])
# Shape = [2, 3].
start_point_masks = tf.constant([[1.0, 1.0, 1.0], [1.0, 1.0, 0.0]])
def masked_add(lhs, rhs, masks):
masks = tf.expand_dims(masks, axis=-1)
return tf.math.reduce_sum((lhs + rhs) * masks, axis=[-2, -1])
# Shape = [2, 3].
distance_matrix = distance_utils.compute_distance_matrix(
start_points,
end_points,
distance_fn=masked_add,
start_point_masks=start_point_masks)
self.assertAllClose(distance_matrix,
[[42.0, 51.0, 60.0], [32.0, 38.0, 44.0]])
def test_compute_distance_matrix_with_end_masks(self):
# Shape = [2, 3, 1].
start_points = tf.constant([
[[1.0], [2.0], [3.0]],
[[4.0], [5.0], [6.0]],
])
# Shape = [3, 3, 1].
end_points = tf.constant([
[[11.0], [12.0], [13.0]],
[[14.0], [15.0], [16.0]],
[[17.0], [18.0], [19.0]],
])
# Shape = [3, 3].
end_point_masks = tf.constant([[1.0, 0.0, 1.0], [1.0, 0.0, 0.0],
[1.0, 1.0, 1.0]])
def masked_add(lhs, rhs, masks):
masks = tf.expand_dims(masks, axis=-1)
return tf.math.reduce_sum((lhs + rhs) * masks, axis=[-2, -1])
# Shape = [2, 3].
distance_matrix = distance_utils.compute_distance_matrix(
start_points,
end_points,
distance_fn=masked_add,
end_point_masks=end_point_masks)
self.assertAllClose(distance_matrix,
[[28.0, 15.0, 60.0], [34.0, 18.0, 69.0]])
def test_compute_gaussian_kl_divergence_unit_univariate(self):
lhs_means = tf.constant([[0.0]])
lhs_stddevs = tf.constant([[1.0]])
kl_divergence = distance_utils.compute_gaussian_kl_divergence(
lhs_means, lhs_stddevs, rhs_means=0.0, rhs_stddevs=1.0)
self.assertAllClose(kl_divergence, [0.0])
def test_compute_gaussian_kl_divergence_unit_multivariate_to_univariate(self):
lhs_means = tf.constant([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
lhs_stddevs = tf.constant([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
kl_divergence = distance_utils.compute_gaussian_kl_divergence(
lhs_means, lhs_stddevs, rhs_means=0.0, rhs_stddevs=1.0)
self.assertAllClose(kl_divergence, [0.0, 0.0])
def test_compute_gaussian_kl_divergence_multivariate_to_multivariate(self):
lhs_means = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
lhs_stddevs = tf.constant([[6.0, 5.0, 4.0], [3.0, 2.0, 1.0]])
rhs_means = tf.constant([[6.0, 5.0, 4.0], [3.0, 2.0, 1.0]])
rhs_stddevs = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
kl_divergence = distance_utils.compute_gaussian_kl_divergence(
lhs_means, lhs_stddevs, rhs_means=rhs_means, rhs_stddevs=rhs_stddevs)
self.assertAllClose(kl_divergence, [31.198712171, 2.429343385])
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Logistic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
class Logistic(distribution.Distribution):
"""The Logistic distribution with location `loc` and `scale` parameters.
#### Mathematical details
The cumulative density function of this distribution is:
```none
cdf(x; mu, sigma) = 1 / (1 + exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The Logistic distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Logistic(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Logistic distribution.
dist = tf.contrib.distributions.Logistic(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Logistics.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tf.contrib.distributions.Logistic(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tf.contrib.distributions.Logistic(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Logistic"):
"""Construct Logistic distributions with mean and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s). Must
contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale]) as ns:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype([self._loc, self._scale])
super(Logistic, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return nn_ops.softplus(-self._z(x))
def _cdf(self, x):
return math_ops.sigmoid(self._z(x))
def _log_survival_function(self, x):
return nn_ops.softplus(self._z(x))
def _survival_function(self, x):
return math_ops.sigmoid(-self._z(x))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - 2. * nn_ops.softplus(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 2 + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(3)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
|
|
"""
Support for Z-Wave lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.zwave/
"""
import logging
# Because we do not compile openzwave on CI
# pylint: disable=import-error
from threading import Timer
from homeassistant.components.light import ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, \
ATTR_RGB_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, \
SUPPORT_RGB_COLOR, DOMAIN, Light
from homeassistant.components import zwave
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.util.color import HASS_COLOR_MAX, HASS_COLOR_MIN, \
color_temperature_mired_to_kelvin, color_temperature_to_rgb, \
color_rgb_to_rgbw, color_rgbw_to_rgb
_LOGGER = logging.getLogger(__name__)
AEOTEC = 0x86
AEOTEC_ZW098_LED_BULB = 0x62
AEOTEC_ZW098_LED_BULB_LIGHT = (AEOTEC, AEOTEC_ZW098_LED_BULB)
COLOR_CHANNEL_WARM_WHITE = 0x01
COLOR_CHANNEL_COLD_WHITE = 0x02
COLOR_CHANNEL_RED = 0x04
COLOR_CHANNEL_GREEN = 0x08
COLOR_CHANNEL_BLUE = 0x10
WORKAROUND_ZW098 = 'zw098'
DEVICE_MAPPINGS = {
AEOTEC_ZW098_LED_BULB_LIGHT: WORKAROUND_ZW098
}
# Generate midpoint color temperatures for bulbs that have limited
# support for white light colors
TEMP_MID_HASS = (HASS_COLOR_MAX - HASS_COLOR_MIN) / 2 + HASS_COLOR_MIN
TEMP_WARM_HASS = (HASS_COLOR_MAX - HASS_COLOR_MIN) / 3 * 2 + HASS_COLOR_MIN
TEMP_COLD_HASS = (HASS_COLOR_MAX - HASS_COLOR_MIN) / 3 + HASS_COLOR_MIN
SUPPORT_ZWAVE_DIMMER = SUPPORT_BRIGHTNESS
SUPPORT_ZWAVE_COLOR = SUPPORT_BRIGHTNESS | SUPPORT_RGB_COLOR
SUPPORT_ZWAVE_COLORTEMP = (SUPPORT_BRIGHTNESS | SUPPORT_RGB_COLOR
| SUPPORT_COLOR_TEMP)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Find and add Z-Wave lights."""
if discovery_info is None or zwave.NETWORK is None:
return
node = zwave.NETWORK.nodes[discovery_info[zwave.const.ATTR_NODE_ID]]
value = node.values[discovery_info[zwave.const.ATTR_VALUE_ID]]
name = '{}.{}'.format(DOMAIN, zwave.object_id(value))
node_config = hass.data[zwave.DATA_DEVICE_CONFIG].get(name)
refresh = node_config.get(zwave.CONF_REFRESH_VALUE)
delay = node_config.get(zwave.CONF_REFRESH_DELAY)
_LOGGER.debug('name=%s node_config=%s CONF_REFRESH_VALUE=%s'
' CONF_REFRESH_DELAY=%s', name, node_config,
refresh, delay)
if value.command_class != zwave.const.COMMAND_CLASS_SWITCH_MULTILEVEL:
return
if value.type != zwave.const.TYPE_BYTE:
return
if value.genre != zwave.const.GENRE_USER:
return
value.set_change_verified(False)
if node.has_command_class(zwave.const.COMMAND_CLASS_SWITCH_COLOR):
add_devices([ZwaveColorLight(value, refresh, delay)])
else:
add_devices([ZwaveDimmer(value, refresh, delay)])
def brightness_state(value):
"""Return the brightness and state."""
if value.data > 0:
return (value.data / 99) * 255, STATE_ON
else:
return 0, STATE_OFF
class ZwaveDimmer(zwave.ZWaveDeviceEntity, Light):
"""Representation of a Z-Wave dimmer."""
def __init__(self, value, refresh, delay):
"""Initialize the light."""
zwave.ZWaveDeviceEntity.__init__(self, value, DOMAIN)
self._brightness = None
self._state = None
self._delay = delay
self._refresh_value = refresh
self._zw098 = None
# Enable appropriate workaround flags for our device
# Make sure that we have values for the key before converting to int
if (value.node.manufacturer_id.strip() and
value.node.product_id.strip()):
specific_sensor_key = (int(value.node.manufacturer_id, 16),
int(value.node.product_id, 16))
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZW098:
_LOGGER.debug("AEOTEC ZW098 workaround enabled")
self._zw098 = 1
self.update_properties()
# Used for value change event handling
self._refreshing = False
self._timer = None
_LOGGER.debug('self._refreshing=%s self.delay=%s',
self._refresh_value, self._delay)
def update_properties(self):
"""Update internal properties based on zwave values."""
# Brightness
self._brightness, self._state = brightness_state(self._value)
def value_changed(self, value):
"""Called when a value for this entity's node has changed."""
if self._refresh_value:
if self._refreshing:
self._refreshing = False
self.update_properties()
else:
def _refresh_value():
"""Used timer callback for delayed value refresh."""
self._refreshing = True
self._value.refresh()
if self._timer is not None and self._timer.isAlive():
self._timer.cancel()
self._timer = Timer(self._delay, _refresh_value)
self._timer.start()
self.schedule_update_ha_state()
else:
self.update_properties()
self.schedule_update_ha_state()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_ZWAVE_DIMMER
def turn_on(self, **kwargs):
"""Turn the device on."""
# Zwave multilevel switches use a range of [0, 99] to control
# brightness. Level 255 means to set it to previous value.
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
brightness = int((self._brightness / 255) * 99)
else:
brightness = 255
if self._value.node.set_dimmer(self._value.value_id, brightness):
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
if self._value.node.set_dimmer(self._value.value_id, 0):
self._state = STATE_OFF
def ct_to_rgb(temp):
"""Convert color temperature (mireds) to RGB."""
colorlist = list(
color_temperature_to_rgb(color_temperature_mired_to_kelvin(temp)))
return [int(val) for val in colorlist]
class ZwaveColorLight(ZwaveDimmer):
"""Representation of a Z-Wave color changing light."""
def __init__(self, value, refresh, delay):
"""Initialize the light."""
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self._value_color = None
self._value_color_channels = None
self._color_channels = None
self._rgb = None
self._ct = None
super().__init__(value, refresh, delay)
# Create a listener so the color values can be linked to this entity
dispatcher.connect(
self._value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED)
self._get_color_values()
def _get_color_values(self):
"""Search for color values available on this node."""
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
_LOGGER.debug("Searching for zwave color values")
# Currently zwave nodes only exist with one color element per node.
if self._value_color is None:
for value_color in self._value.node.get_rgbbulbs().values():
self._value_color = value_color
if self._value_color_channels is None:
self._value_color_channels = self.get_value(
class_id=zwave.const.COMMAND_CLASS_SWITCH_COLOR,
genre=zwave.const.GENRE_SYSTEM, type=zwave.const.TYPE_INT)
if self._value_color and self._value_color_channels:
_LOGGER.debug("Zwave node color values found.")
dispatcher.disconnect(
self._value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED)
self.update_properties()
def _value_added(self, value):
"""Called when a value has been added to the network."""
if self._value.node != value.node:
return
# Check for the missing color values
self._get_color_values()
def update_properties(self):
"""Update internal properties based on zwave values."""
super().update_properties()
if self._value_color is None:
return
if self._value_color_channels is None:
return
# Color Channels
self._color_channels = self._value_color_channels.data
# Color Data String
data = self._value_color.data
# RGB is always present in the openzwave color data string.
self._rgb = [
int(data[1:3], 16),
int(data[3:5], 16),
int(data[5:7], 16)]
# Parse remaining color channels. Openzwave appends white channels
# that are present.
index = 7
# Warm white
if self._color_channels & COLOR_CHANNEL_WARM_WHITE:
warm_white = int(data[index:index+2], 16)
index += 2
else:
warm_white = 0
# Cold white
if self._color_channels & COLOR_CHANNEL_COLD_WHITE:
cold_white = int(data[index:index+2], 16)
index += 2
else:
cold_white = 0
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if warm_white > 0:
self._ct = TEMP_WARM_HASS
self._rgb = ct_to_rgb(self._ct)
elif cold_white > 0:
self._ct = TEMP_COLD_HASS
self._rgb = ct_to_rgb(self._ct)
else:
# RGB color is being used. Just report midpoint.
self._ct = TEMP_MID_HASS
elif self._color_channels & COLOR_CHANNEL_WARM_WHITE:
self._rgb = list(color_rgbw_to_rgb(*self._rgb, w=warm_white))
elif self._color_channels & COLOR_CHANNEL_COLD_WHITE:
self._rgb = list(color_rgbw_to_rgb(*self._rgb, w=cold_white))
# If no rgb channels supported, report None.
if not (self._color_channels & COLOR_CHANNEL_RED or
self._color_channels & COLOR_CHANNEL_GREEN or
self._color_channels & COLOR_CHANNEL_BLUE):
self._rgb = None
@property
def rgb_color(self):
"""Return the rgb color."""
return self._rgb
@property
def color_temp(self):
"""Return the color temperature."""
return self._ct
def turn_on(self, **kwargs):
"""Turn the device on."""
rgbw = None
if ATTR_COLOR_TEMP in kwargs:
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if kwargs[ATTR_COLOR_TEMP] > TEMP_MID_HASS:
self._ct = TEMP_WARM_HASS
rgbw = b'#000000FF00'
else:
self._ct = TEMP_COLD_HASS
rgbw = b'#00000000FF'
elif ATTR_RGB_COLOR in kwargs:
self._rgb = kwargs[ATTR_RGB_COLOR]
if (not self._zw098 and (
self._color_channels & COLOR_CHANNEL_WARM_WHITE or
self._color_channels & COLOR_CHANNEL_COLD_WHITE)):
rgbw = b'#'
for colorval in color_rgb_to_rgbw(*self._rgb):
rgbw += format(colorval, '02x').encode('utf-8')
rgbw += b'00'
else:
rgbw = b'#'
for colorval in self._rgb:
rgbw += format(colorval, '02x').encode('utf-8')
rgbw += b'0000'
if rgbw and self._value_color:
self._value_color.node.set_rgbw(self._value_color.value_id, rgbw)
super().turn_on(**kwargs)
@property
def supported_features(self):
"""Flag supported features."""
if self._zw098:
return SUPPORT_ZWAVE_COLORTEMP
else:
return SUPPORT_ZWAVE_COLOR
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import copy
import mock
import os
import yaml
from c7n.testing import TestUtils
from click.testing import CliRunner
from c7n_org import cli as org
ACCOUNTS_AWS_DEFAULT = yaml.safe_dump({
'accounts': [
{'name': 'dev',
'account_id': '112233445566',
'tags': ['red', 'black'],
'role': 'arn:aws:iam:{account_id}::/role/foobar'},
{'name': 'qa',
'account_id': '002244668899',
'tags': ['red', 'green'],
'role': 'arn:aws:iam:{account_id}::/role/foobar'},
],
}, default_flow_style=False)
ACCOUNTS_AZURE = {
'subscriptions': [{
'subscription_id': 'ea42f556-5106-4743-99b0-c129bfa71a47',
'name': 'devx',
}]
}
ACCOUNTS_GCP = {
'projects': [{
'project_id': 'custodian-1291',
'name': 'devy'
}],
}
POLICIES_AWS_DEFAULT = yaml.safe_dump({
'policies': [
{'name': 'compute',
'resource': 'aws.ec2',
'tags': ['red', 'green']},
{'name': 'serverless',
'resource': 'aws.lambda',
'tags': ['red', 'black']},
],
}, default_flow_style=False)
class OrgTest(TestUtils):
def setup_run_dir(self, accounts=None, policies=None):
root = self.get_temp_dir()
if accounts:
accounts = yaml.safe_dump(accounts, default_flow_style=False)
else:
accounts = ACCOUNTS_AWS_DEFAULT
with open(os.path.join(root, 'accounts.yml'), 'w') as fh:
fh.write(accounts)
if policies:
policies = yaml.safe_dump(policies, default_flow_style=False)
else:
policies = POLICIES_AWS_DEFAULT
with open(os.path.join(root, 'policies.yml'), 'w') as fh:
fh.write(policies)
cache_path = os.path.join(root, 'cache')
os.makedirs(cache_path)
return root
def test_validate_azure_provider(self):
run_dir = self.setup_run_dir(
accounts=ACCOUNTS_AZURE,
policies={'policies': [{
'name': 'vms',
'resource': 'azure.vm'}]
})
logger = mock.MagicMock()
run_account = mock.MagicMock()
run_account.return_value = ({}, True)
self.patch(org, 'logging', logger)
self.patch(org, 'run_account', run_account)
self.change_cwd(run_dir)
runner = CliRunner()
result = runner.invoke(
org.cli,
['run', '-c', 'accounts.yml', '-u', 'policies.yml',
'--debug', '-s', 'output', '--cache-path', 'cache'],
catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
def test_validate_gcp_provider(self):
run_dir = self.setup_run_dir(
accounts=ACCOUNTS_GCP,
policies={
'policies': [{
'resource': 'gcp.instance',
'name': 'instances'}]
})
logger = mock.MagicMock()
run_account = mock.MagicMock()
run_account.return_value = ({}, True)
self.patch(org, 'logging', logger)
self.patch(org, 'run_account', run_account)
self.change_cwd(run_dir)
runner = CliRunner()
result = runner.invoke(
org.cli,
['run', '-c', 'accounts.yml', '-u', 'policies.yml',
'--debug', '-s', 'output', '--cache-path', 'cache'],
catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
def test_cli_run_aws(self):
run_dir = self.setup_run_dir()
logger = mock.MagicMock()
run_account = mock.MagicMock()
run_account.return_value = (
{'compute': 24, 'serverless': 12}, True)
self.patch(org, 'logging', logger)
self.patch(org, 'run_account', run_account)
self.change_cwd(run_dir)
log_output = self.capture_logging('c7n_org')
runner = CliRunner()
result = runner.invoke(
org.cli,
['run', '-c', 'accounts.yml', '-u', 'policies.yml',
'--debug', '-s', 'output', '--cache-path', 'cache',
'--metrics-uri', 'aws://'],
catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(
log_output.getvalue().strip(),
"Policy resource counts Counter({'compute': 96, 'serverless': 48})")
def test_filter_policies(self):
d = {'policies': [
{'name': 'find-ml',
'tags': ['bar:xyz', 'red', 'black'],
'resource': 'gcp.instance'},
{'name': 'find-serverless',
'resource': 'aws.lambda',
'tags': ['blue', 'red']}]}
t1 = copy.deepcopy(d)
org.filter_policies(t1, [], [], [], [])
self.assertEqual(
[n['name'] for n in t1['policies']],
['find-ml', 'find-serverless'])
t2 = copy.deepcopy(d)
org.filter_policies(t2, ['blue', 'red'], [], [], [])
self.assertEqual(
[n['name'] for n in t2['policies']], ['find-serverless'])
t3 = copy.deepcopy(d)
org.filter_policies(t3, [], ['find-ml'], [], [])
self.assertEqual(
[n['name'] for n in t3['policies']], ['find-ml'])
t4 = copy.deepcopy(d)
org.filter_policies(t4, [], [], 'gcp.instance', [])
self.assertEqual(
[n['name'] for n in t4['policies']], ['find-ml'])
def test_resolve_regions(self):
self.assertEqual(
org.resolve_regions(['us-west-2']),
['us-west-2'])
self.assertEqual(
org.resolve_regions([]),
('us-east-1', 'us-west-2'))
def test_filter_accounts(self):
d = {'accounts': [
{'name': 'dev',
'tags': ['blue', 'red']},
{'name': 'prod',
'tags': ['green', 'red']}]}
t1 = copy.deepcopy(d)
org.filter_accounts(t1, [], [], [])
self.assertEqual(
[a['name'] for a in t1['accounts']],
['dev', 'prod'])
t2 = copy.deepcopy(d)
org.filter_accounts(t2, [], [], ['prod'])
self.assertEqual(
[a['name'] for a in t2['accounts']],
['dev'])
t3 = copy.deepcopy(d)
org.filter_accounts(t3, [], ['dev'], [])
self.assertEqual(
[a['name'] for a in t3['accounts']],
['dev'])
t4 = copy.deepcopy(d)
org.filter_accounts(t4, ['red', 'blue'], [], [])
self.assertEqual(
[a['name'] for a in t4['accounts']],
['dev'])
|
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API over the nova service.
"""
from django.utils import http as utils_http
from django.views import generic
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
@urls.register
class Keypairs(generic.View):
"""API for nova keypairs.
"""
url_regex = r'nova/keypairs/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of keypairs associated with the current logged-in
account.
The listing result is an object with property "items".
"""
result = api.nova.keypair_list(request)
return {'items': [u.to_dict() for u in result]}
@rest_utils.ajax(data_required=True)
def post(self, request):
"""Create a keypair.
Create a keypair using the parameters supplied in the POST
application/json object. The parameters are:
:param name: the name to give the keypair
:param public_key: (optional) a key to import
This returns the new keypair object on success.
"""
if 'public_key' in request.DATA:
new = api.nova.keypair_import(request, request.DATA['name'],
request.DATA['public_key'])
else:
new = api.nova.keypair_create(request, request.DATA['name'])
return rest_utils.CreatedResponse(
'/api/nova/keypairs/%s' % utils_http.urlquote(new.name),
new.to_dict()
)
@urls.register
class AvailabilityZones(generic.View):
"""API for nova availability zones.
"""
url_regex = r'nova/availzones/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of availability zones.
The following get parameters may be passed in the GET
request:
:param detailed: If this equals "true" then the result will
include more detail.
The listing result is an object with property "items".
"""
detailed = request.GET.get('detailed') == 'true'
result = api.nova.availability_zone_list(request, detailed)
return {'items': [u.to_dict() for u in result]}
@urls.register
class Limits(generic.View):
"""API for nova limits.
"""
url_regex = r'nova/limits/$'
@rest_utils.ajax()
def get(self, request):
"""Get an object describing the current project limits.
Note: the Horizon API doesn't support any other project (tenant) but
the underlying client does...
The following get parameters may be passed in the GET
request:
:param reserved: This may be set to "true" but it's not
clear what the result of that is.
The result is an object with limits as properties.
"""
reserved = request.GET.get('reserved') == 'true'
result = api.nova.tenant_absolute_limits(request, reserved)
return result
@urls.register
class Servers(generic.View):
"""API over all servers.
"""
url_regex = r'nova/servers/$'
_optional_create = [
'block_device_mapping', 'block_device_mapping_v2', 'nics', 'meta',
'availability_zone', 'instance_count', 'admin_pass', 'disk_config',
'config_drive'
]
@rest_utils.ajax(data_required=True)
def post(self, request):
"""Create a server.
Create a server using the parameters supplied in the POST
application/json object. The required parameters as specified by
the underlying novaclient are:
:param name: The new server name.
:param source_id: The ID of the image to use.
:param flavor_id: The ID of the flavor to use.
:param key_name: (optional extension) name of previously created
keypair to inject into the instance.
:param user_data: user data to pass to be exposed by the metadata
server this can be a file type object as well or a
string.
:param security_groups: An array of one or more objects with a "name"
attribute.
Other parameters are accepted as per the underlying novaclient:
"block_device_mapping", "block_device_mapping_v2", "nics", "meta",
"availability_zone", "instance_count", "admin_pass", "disk_config",
"config_drive"
This returns the new server object on success.
"""
try:
args = (
request,
request.DATA['name'],
request.DATA['source_id'],
request.DATA['flavor_id'],
request.DATA['key_name'],
request.DATA['user_data'],
request.DATA['security_groups'],
)
except KeyError as e:
raise rest_utils.AjaxError(400, 'missing required parameter '
"'%s'" % e.args[0])
kw = {}
for name in self._optional_create:
if name in request.DATA:
kw[name] = request.DATA[name]
new = api.nova.server_create(*args, **kw)
return rest_utils.CreatedResponse(
'/api/nova/servers/%s' % utils_http.urlquote(new.id),
new.to_dict()
)
@urls.register
class Server(generic.View):
"""API for retrieving a single server
"""
url_regex = r'nova/servers/(?P<server_id>.+|default)$'
@rest_utils.ajax()
def get(self, request, server_id):
"""Get a specific server
http://localhost/api/nova/servers/1
"""
return api.nova.server_get(request, server_id).to_dict()
@urls.register
class Extensions(generic.View):
"""API for nova extensions.
"""
url_regex = r'nova/extensions/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of extensions.
The listing result is an object with property "items". Each item is
an image.
Example GET:
http://localhost/api/nova/extensions
"""
result = api.nova.list_extensions(request)
return {'items': [e.to_dict() for e in result]}
@urls.register
class Flavors(generic.View):
"""API for nova flavors.
"""
url_regex = r'nova/flavors/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of flavors.
The listing result is an object with property "items". Each item is
an flavor. By default this will return the flavors for the user's
current project. If the user is admin, public flavors will also be
returned.
:param is_public: For a regular user, set to True to see all public
flavors. For an admin user, set to False to not see public flavors.
:param get_extras: Also retrieve the extra specs.
Example GET:
http://localhost/api/nova/flavors?is_public=true
"""
is_public = request.GET.get('is_public')
is_public = (is_public and is_public.lower() == 'true')
get_extras = request.GET.get('get_extras')
get_extras = bool(get_extras and get_extras.lower() == 'true')
flavors = api.nova.flavor_list(request, is_public=is_public,
get_extras=get_extras)
result = {'items': []}
for flavor in flavors:
d = flavor.to_dict()
if get_extras:
d['extras'] = flavor.extras
result['items'].append(d)
return result
@urls.register
class Flavor(generic.View):
"""API for retrieving a single flavor
"""
url_regex = r'nova/flavors/(?P<flavor_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, flavor_id):
"""Get a specific flavor
:param get_extras: Also retrieve the extra specs.
Example GET:
http://localhost/api/nova/flavors/1
"""
get_extras = request.GET.get('get_extras')
get_extras = bool(get_extras and get_extras.lower() == 'true')
flavor = api.nova.flavor_get(request, flavor_id, get_extras=get_extras)
result = flavor.to_dict()
if get_extras:
result['extras'] = flavor.extras
return result
@urls.register
class FlavorExtraSpecs(generic.View):
"""API for managing flavor extra specs
"""
url_regex = r'nova/flavors/(?P<flavor_id>[^/]+)/extra-specs/$'
@rest_utils.ajax()
def get(self, request, flavor_id):
"""Get a specific flavor's extra specs
Example GET:
http://localhost/api/nova/flavors/1/extra-specs
"""
return api.nova.flavor_get_extras(request, flavor_id, raw=True)
@rest_utils.ajax(data_required=True)
def patch(self, request, flavor_id):
"""Update a specific flavor's extra specs.
This method returns HTTP 204 (no content) on success.
"""
if request.DATA.get('removed'):
api.nova.flavor_extra_delete(
request, flavor_id, request.DATA.get('removed')
)
api.nova.flavor_extra_set(
request, flavor_id, request.DATA['updated']
)
@urls.register
class AggregateExtraSpecs(generic.View):
"""API for managing aggregate extra specs
"""
url_regex = r'nova/aggregates/(?P<aggregate_id>[^/]+)/extra-specs/$'
@rest_utils.ajax()
def get(self, request, aggregate_id):
"""Get a specific aggregate's extra specs
Example GET:
http://localhost/api/nova/flavors/1/extra-specs
"""
return api.nova.aggregate_get(request, aggregate_id).metadata
@rest_utils.ajax(data_required=True)
def patch(self, request, aggregate_id):
"""Update a specific aggregate's extra specs.
This method returns HTTP 204 (no content) on success.
"""
updated = request.DATA['updated']
if request.DATA.get('removed'):
for name in request.DATA.get('removed'):
updated[name] = None
api.nova.aggregate_set_metadata(request, aggregate_id, updated)
|
|
#
#Copyright (c) 2012-2021, NVIDIA CORPORATION.
#SPDX-License-Identifier: Apache-2.0
import gevent
import gevent.pool
import gevent.monkey
gevent.monkey.patch_socket()
gevent.monkey.patch_ssl()
gevent.monkey.patch_time()
import os
import re
import sys
import time
import signal
import logging
import msgpack
import zmq.green as zmq
import ssbench
from ssbench.importer import random
import ssbench.swift_client as client
from ssbench.run_state import RunState
from ssbench.util import raise_file_descriptor_limit
from ssbench.util import is_ipv6
def _container_creator(storage_urls, token, container, policy=None):
put_headers = None if policy is None else {'x-storage-policy': policy}
storage_url = random.choice(storage_urls)
http_conn = client.http_connection(storage_url)
try:
client.head_container(storage_url, token, container,
http_conn=http_conn)
except client.ClientException:
client.put_container(storage_url, token, container,
headers=put_headers, http_conn=http_conn)
def _container_deleter(concurrency, storage_urls, token, container_info):
container_name = container_info['name']
logging.info('deleting %r (%d objs)', container_name,
container_info['count'])
storage_url = random.choice(storage_urls)
http_conn = client.http_connection(storage_url)
_, obj_list = client.get_container(
random.choice(storage_urls), token, container_name,
http_conn=http_conn)
pool = gevent.pool.Pool(concurrency)
for obj_name in [o['name'] for o in obj_list]:
pool.spawn(client.delete_object, random.choice(storage_urls), token,
container_name, obj_name)
pool.join()
client.delete_container(
random.choice(storage_urls), token, container_name,
http_conn=http_conn)
def _gen_cleanup_job(object_info):
return {
'type': ssbench.DELETE_OBJECT,
'container': object_info[0],
'name': object_info[1],
}
class Master(object):
DELETER_RE = '^%s_\d+_%s$'
def __init__(self, zmq_bind_ip=None, zmq_work_port=None,
zmq_results_port=11300, quiet=False, connect_timeout=None,
network_timeout=None):
if zmq_bind_ip is not None and zmq_work_port is not None:
work_endpoint = 'tcp://%s:%d' % (zmq_bind_ip, zmq_work_port)
results_endpoint = 'tcp://%s:%d' % (zmq_bind_ip, zmq_results_port)
ipv6 = is_ipv6(zmq_bind_ip)
self.context = zmq.Context()
self.work_push = self.context.socket(zmq.PUSH)
self.work_push.ipv6 = ipv6
self.work_push.bind(work_endpoint)
self.results_pull = self.context.socket(zmq.PULL)
self.results_pull.ipv6 = ipv6
self.results_pull.bind(results_endpoint)
self.connect_timeout = connect_timeout
self.network_timeout = network_timeout
self.quiet = quiet
def process_results_to(self, results_raw, processor, label='',
run_results=None):
results = msgpack.loads(results_raw, use_list=False)
result_count = 0
for result in results:
result_count += 1
logging.debug(
'RESULT: %13s %s/%-17s %s/%s %s',
result['type'], result['container'], result['name'],
'%7.4f' % result.get('first_byte_latency')
if result.get('first_byte_latency', None) else ' (none)',
'%7.4f' % result.get('last_byte_latency')
if result.get('last_byte_latency', None) else '(none) ',
result.get('trans_id', ''))
if label and not self.quiet:
if 'exception' in result:
sys.stderr.write('X')
elif result.get('first_byte_latency', None) is not None:
if result['first_byte_latency'] < 1:
sys.stderr.write('.')
elif result['first_byte_latency'] < 3:
sys.stderr.write('o')
elif result['first_byte_latency'] < 10:
sys.stderr.write('O')
else:
sys.stderr.write('*')
else:
if result['last_byte_latency'] < 1:
sys.stderr.write('_')
elif result['last_byte_latency'] < 3:
sys.stderr.write('|')
elif result['last_byte_latency'] < 10:
sys.stderr.write('^')
else:
sys.stderr.write('@')
sys.stderr.flush()
processor(result)
if run_results:
run_results.process_raw_results(results_raw)
return result_count
def do_a_run(self, concurrency, job_generator, result_processor,
auth_kwargs, mapper_fn=None, label='', noop=False,
batch_size=1, run_results=None):
if label and not self.quiet:
print >>sys.stderr, label + """
X work job raised an exception
. < 1s first-byte-latency
o < 3s first-byte-latency
O < 10s first-byte-latency
* >= 10s first-byte-latency
_ < 1s last-byte-latency (CREATE or UPDATE)
| < 3s last-byte-latency (CREATE or UPDATE)
^ < 10s last-byte-latency (CREATE or UPDATE)
@ >= 10s last-byte-latency (CREATE or UPDATE)
""".rstrip()
def _job_decorator(raw_job):
if mapper_fn is not None:
work_job = mapper_fn(raw_job)
if not work_job:
if noop:
work_job = raw_job
work_job['container'] = 'who_cares'
work_job['name'] = 'who_cares'
else:
logging.warning('Unable to fill in job %r', raw_job)
return None
else:
work_job = raw_job
work_job['auth_kwargs'] = auth_kwargs
work_job['connect_timeout'] = self.connect_timeout
work_job['network_timeout'] = self.network_timeout
return work_job
active = 0
for raw_job in job_generator:
work_job = _job_decorator(raw_job)
if not work_job:
continue
send_q = [work_job]
logging.debug('active: %d\tconcurrency: %d', active, concurrency)
if active >= concurrency:
result_jobs_raw = self.results_pull.recv()
result_count = self.process_results_to(
result_jobs_raw, result_processor, label=label,
run_results=run_results)
active -= result_count
while len(send_q) < min(batch_size, concurrency - active):
try:
work_job = _job_decorator(job_generator.next())
if not work_job:
continue
send_q.append(work_job)
except StopIteration:
break
self.work_push.send(msgpack.dumps(send_q))
active += len(send_q)
# NOTE: we'll never exit this loop with unsent contents in send_q
# Drain the results
logging.debug('All jobs sent; awaiting results...')
while active > 0:
logging.debug('Draining results: active = %d', active)
result_jobs_raw = self.results_pull.recv()
result_count = self.process_results_to(
result_jobs_raw, result_processor, label=label,
run_results=run_results)
active -= result_count
if label and not self.quiet:
sys.stderr.write('\n')
sys.stderr.flush()
def kill_workers(self, timeout=5):
"""
Send a suicide message to all workers, with some kind of timeout.
"""
logging.info('Killing workers, taking up to %d seconds.', int(timeout))
poller = zmq.Poller()
poller.register(self.results_pull, zmq.POLLIN)
while True:
# Seems to get stuck gevent-blocking in the work_push.send() after
# all the workers have died. Also, gevent.Timeout() doesn't seem
# to work here?!
signal.alarm(int(timeout))
self.work_push.send(msgpack.dumps([{'type': 'PING'}]))
socks = dict(poller.poll(timeout * 1500))
if self.results_pull in socks \
and socks[self.results_pull] == zmq.POLLIN:
result_packed = self.results_pull.recv()
result = msgpack.loads(result_packed, use_list=False)
logging.info('Heard from worker id=%d; sending SUICIDE',
result[0]['worker_id'])
self.work_push.send(msgpack.dumps([{'type': 'SUICIDE'}]))
gevent.sleep(0.1)
else:
break
signal.alarm(0)
def cleanup_containers(self, auth_kwargs, container_base, concurrency,
policy):
storage_urls, token = self._authenticate(auth_kwargs)
_, container_list = client.get_account(
random.choice(storage_urls), token)
our_container_re = re.compile(self.DELETER_RE % (container_base, policy))
start_time = time.time()
obj_count = 0
container_count = 0
pool = gevent.pool.Pool(concurrency)
for container_info in container_list:
# e.g. {'count': 41, 'bytes': 496485, 'name': 'doc'}
if our_container_re.match(container_info['name']):
pool.spawn(_container_deleter, concurrency, storage_urls,
token, container_info)
container_count += 1
obj_count += container_info['count']
else:
logging.debug('Ignoring non-ssbench container %r',
container_info['name'])
pool.join()
delta_t = time.time() - start_time
logging.info('Deleted %.1f containers/s, %.1f objs/s',
container_count / delta_t, obj_count / delta_t)
def _authenticate(self, auth_kwargs):
"""
Helper method to turn some auth_kwargs into a set of potential storage
URLs and a token.
"""
if auth_kwargs.get('token'):
logging.debug('Using token %s at one of %r',
auth_kwargs['token'], auth_kwargs['storage_urls'])
return auth_kwargs['storage_urls'], auth_kwargs['token']
logging.debug('Authenticating to %s with %s/%s',
auth_kwargs['auth_url'], auth_kwargs['user'],
auth_kwargs['key'])
storage_url, token = client.get_auth(**auth_kwargs)
if auth_kwargs['storage_urls']:
logging.debug('Overriding auth storage url %s with '
'one of %r', storage_url,
auth_kwargs['storage_urls'])
return auth_kwargs['storage_urls'], token
return [storage_url], token
def run_scenario(self, scenario, auth_kwargs, run_results, noop=False,
with_profiling=False, keep_objects=False, batch_size=1):
"""
Runs a CRUD scenario, given cluster parameters and a Scenario object.
:param scenario: Scenario object describing the benchmark run
:param auth_kwargs: All-you-can-eat dictionary of
authentication-related arguments.
:param run_results: RunResults objects for the run
:param noop: Run in no-op mode?
:param with_profiing: Profile the run?
:param keep_objects: Keep uploaded objects instead of deleting them?
:param batch_size: Send this many bench jobs per packet to workers
:param returns: Collected result records from workers
"""
run_state = RunState()
logging.info(u'Starting scenario run for "%s"', scenario.name)
raise_file_descriptor_limit()
# Construct auth_kwargs appropriate for client.get_auth()
if auth_kwargs.get('token'):
auth_kwargs = {
'storage_urls': auth_kwargs['storage_urls'],
'token': auth_kwargs['token'],
}
# Ensure containers exist
if not noop:
storage_urls, c_token = self._authenticate(auth_kwargs)
logging.info('Ensuring %d containers (%s_*) exist; '
'concurrency=%d...',
len(scenario.containers), scenario.container_base,
scenario.container_concurrency)
pool = gevent.pool.Pool(scenario.container_concurrency)
for container in scenario.containers:
pool.spawn(_container_creator, storage_urls, c_token,
container, policy=scenario.policy)
pool.join()
# Enqueue initialization jobs
if not noop:
logging.info('Initializing cluster with stock data (up to %d '
'concurrent workers)', scenario.user_count)
self.do_a_run(scenario.user_count, scenario.initial_jobs(),
run_state.handle_initialization_result, auth_kwargs,
batch_size=batch_size)
logging.info('Starting benchmark run (up to %d concurrent '
'workers)', scenario.user_count)
if noop:
logging.info(' (not actually talking to Swift cluster!)')
if with_profiling:
import cProfile
prof = cProfile.Profile()
prof.enable()
self.do_a_run(scenario.user_count, scenario.bench_jobs(),
run_state.handle_run_result, auth_kwargs,
mapper_fn=run_state.fill_in_job,
label='Benchmark Run:', noop=noop, batch_size=batch_size,
run_results=run_results)
if with_profiling:
prof.disable()
prof_output_path = '/tmp/do_a_run.%d.prof' % os.getpid()
prof.dump_stats(prof_output_path)
logging.info('PROFILED main do_a_run to %s', prof_output_path)
if not noop and not keep_objects:
logging.info('Deleting population objects from cluster')
self.do_a_run(scenario.user_count,
run_state.cleanup_object_infos(),
lambda *_: None,
auth_kwargs, mapper_fn=_gen_cleanup_job,
batch_size=batch_size)
elif keep_objects:
logging.info('NOT deleting any objects due to -k/--keep-objects')
|
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import fixtures
from oslo_config import cfg
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import manager as compute_manager
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_utils
from nova.tests.unit import fake_volume
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova import volume
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('default_flavor', 'nova.compute.flavors')
CONF.import_opt('use_ipv6', 'nova.netconf')
def get_fake_cache():
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3',
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return info
def get_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
if isinstance(instances, list):
for instance in instances:
instance['info_cache'] = {'network_info': get_fake_cache()}
else:
instances['info_cache'] = {'network_info': get_fake_cache()}
return instances
class CinderCloudTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(CinderCloudTestCase, self).setUp()
ec2utils.reset_cache()
self.useFixture(fixtures.TempDir()).path
fake_utils.stub_out_utils_spawn_n(self.stubs)
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.unit.fake_volume.API')
def fake_show(meh, context, id, **kwargs):
return {'id': id,
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(_self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
fake.stub_out_image_service(self.stubs)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
fake_network.set_stub_network_methods(self.stubs)
# set up our cloud
self.cloud = cloud.CloudController()
self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
# Short-circuit the conductor service
self.flags(use_local=True, group='conductor')
# Stub out the notification service so we use the no-op serializer
# and avoid lazy-load traces with the wrap_exception decorator in
# the compute service.
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
self.consoleauth = self.start_service('consoleauth')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.volume_api = volume.API()
self.volume_api.reset_fake_api(self.context)
self.stubs.Set(compute_manager.ComputeManager,
'_update_scheduler_instance_info', dumb)
self.stubs.Set(compute_manager.ComputeManager,
'_delete_scheduler_instance_info', dumb)
self.stubs.Set(compute_manager.ComputeManager,
'_sync_scheduler_instance_info', dumb)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
self.volume_api.reset_fake_api(self.context)
super(CinderCloudTestCase, self).tearDown()
fake.FakeImageService_reset()
def _stub_instance_get_with_fixed_ips(self, func_name):
orig_func = getattr(self.cloud.compute_api, func_name)
def fake_get(*args, **kwargs):
return get_instances_with_cached_ips(orig_func, *args, **kwargs)
self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
keypair_api = compute_api.KeypairAPI()
return keypair_api.create_key_pair(self.context, self.context.user_id,
name)
def test_describe_volumes(self):
# Makes sure describe_volumes works and filters results.
vol1 = self.cloud.create_volume(self.context,
size=1,
name='test-1',
description='test volume 1')
self.assertEqual(vol1['status'], 'available')
vol2 = self.cloud.create_volume(self.context,
size=1,
name='test-2',
description='test volume 2')
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
result = self.cloud.describe_volumes(self.context,
[vol1['volumeId']])
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(vol1['volumeId'], result['volumeSet'][0]['volumeId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
self.cloud.delete_volume(self.context, vol2['volumeId'])
def test_format_volume_maps_status(self):
fake_volume = {'id': 1,
'status': 'creating',
'availability_zone': 'nova',
'volumeId': 'vol-0000000a',
'attachmentSet': [{}],
'snapshotId': None,
'created_at': '2013-04-18T06:03:35.025626',
'size': 1,
'mountpoint': None,
'attach_status': None}
self.assertEqual(self.cloud._format_volume(self.context,
fake_volume)['status'],
'creating')
fake_volume['status'] = 'attaching'
self.assertEqual(self.cloud._format_volume(self.context,
fake_volume)['status'],
'in-use')
fake_volume['status'] = 'detaching'
self.assertEqual(self.cloud._format_volume(self.context,
fake_volume)['status'],
'in-use')
fake_volume['status'] = 'banana'
self.assertEqual(self.cloud._format_volume(self.context,
fake_volume)['status'],
'banana')
def test_create_volume_in_availability_zone(self):
"""Makes sure create_volume works when we specify an availability
zone
"""
availability_zone = 'zone1:host1'
result = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
volume_id = result['volumeId']
availabilityZone = result['availabilityZone']
self.assertEqual(availabilityZone, availability_zone)
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
self.assertEqual(result['volumeSet'][0]['availabilityZone'],
availabilityZone)
self.cloud.delete_volume(self.context, volume_id)
def test_create_volume_from_snapshot(self):
# Makes sure create_volume works when we specify a snapshot.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap of vol %s'
% vol1['volumeId'])
vol2 = self.cloud.create_volume(self.context,
snapshot_id=snap['snapshotId'])
volume1_id = vol1['volumeId']
volume2_id = vol2['volumeId']
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
self.assertEqual(result['volumeSet'][1]['volumeId'], volume2_id)
self.cloud.delete_volume(self.context, volume2_id)
self.cloud.delete_snapshot(self.context, snap['snapshotId'])
self.cloud.delete_volume(self.context, volume1_id)
def test_volume_status_of_attaching_volume(self):
"""Test the volume's status in response when attaching a volume."""
vol1 = self.cloud.create_volume(self.context,
size=1,
name='test-ls',
description='test volume ls')
self.assertEqual('available', vol1['status'])
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
# NOTE(ft): Since fake attach action is very fast, we replace it to
# empty function to check EC2 API results at 'attaching' stage
self.stubs.Set(fake_volume.API, 'attach',
lambda *args, **kwargs: None)
resp = self.cloud.attach_volume(self.context,
vol1['volumeId'],
ec2_instance_id,
'/dev/sde')
self.assertEqual('attaching', resp['status'])
resp = self.cloud.describe_volumes(self.context, [vol1['volumeId']])
volume = resp['volumeSet'][0]
self.assertEqual('in-use', volume['status'])
self.assertThat({'status': 'attaching',
'volumeId': vol1['volumeId']},
matchers.IsSubDictOf(volume['attachmentSet'][0]))
def test_volume_status_of_detaching_volume(self):
"""Test the volume's status in response when detaching a volume."""
vol1 = self.cloud.create_volume(self.context,
size=1,
name='test-ls',
description='test volume ls')
self.assertEqual('available', vol1['status'])
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
# NOTE(ft): Since fake detach action is very fast, we replace it to
# empty function to check EC2 API results at 'detaching' stage
self.stubs.Set(fake_volume.API, 'detach',
lambda *args, **kwargs: None)
resp = self.cloud.detach_volume(self.context,
vol1['volumeId'])
self.assertEqual('detaching', resp['status'])
resp = self.cloud.describe_volumes(self.context, [vol1['volumeId']])
volume = resp['volumeSet'][0]
self.assertEqual('in-use', volume['status'])
self.assertThat({'status': 'detaching',
'volumeId': vol1['volumeId'],
'device': '/dev/sdb',
'instanceId': ec2_instance_id},
matchers.IsSubDictOf(volume['attachmentSet'][0]))
def test_describe_snapshots(self):
# Makes sure describe_snapshots works and filters results.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap1 of vol %s' %
vol1['volumeId'])
snap2 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap2 of vol %s' %
vol1['volumeId'])
result = self.cloud.describe_snapshots(self.context)
self.assertEqual(len(result['snapshotSet']), 2)
result = self.cloud.describe_snapshots(
self.context,
snapshot_id=[snap2['snapshotId']])
self.assertEqual(len(result['snapshotSet']), 1)
self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
self.cloud.delete_snapshot(self.context, snap2['snapshotId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
def test_format_snapshot_maps_status(self):
fake_snapshot = {'status': 'new',
'id': 1,
'volume_id': 1,
'created_at': 1353560191.08117,
'progress': 90,
'project_id': str(uuid.uuid4()),
'volume_size': 10000,
'display_description': 'desc'}
self.assertEqual(self.cloud._format_snapshot(self.context,
fake_snapshot)['status'],
'pending')
fake_snapshot['status'] = 'creating'
self.assertEqual(self.cloud._format_snapshot(self.context,
fake_snapshot)['status'],
'pending')
fake_snapshot['status'] = 'available'
self.assertEqual(self.cloud._format_snapshot(self.context,
fake_snapshot)['status'],
'completed')
fake_snapshot['status'] = 'active'
self.assertEqual(self.cloud._format_snapshot(self.context,
fake_snapshot)['status'],
'completed')
fake_snapshot['status'] = 'deleting'
self.assertEqual(self.cloud._format_snapshot(self.context,
fake_snapshot)['status'],
'pending')
fake_snapshot['status'] = 'deleted'
self.assertIsNone(self.cloud._format_snapshot(self.context,
fake_snapshot))
fake_snapshot['status'] = 'error'
self.assertEqual(self.cloud._format_snapshot(self.context,
fake_snapshot)['status'],
'error')
fake_snapshot['status'] = 'banana'
self.assertEqual(self.cloud._format_snapshot(self.context,
fake_snapshot)['status'],
'banana')
def test_create_snapshot(self):
# Makes sure create_snapshot works.
availability_zone = 'zone1:host1'
self.cloud.describe_snapshots(self.context)
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap1 of vol %s' %
vol1['volumeId'])
snapshot_id = snap1['snapshotId']
result = self.cloud.describe_snapshots(self.context)
self.assertEqual(len(result['snapshotSet']), 1)
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
def test_delete_snapshot(self):
# Makes sure delete_snapshot works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap1 of vol %s' %
vol1['volumeId'])
snapshot_id = snap1['snapshotId']
result = self.cloud.delete_snapshot(self.context,
snapshot_id=snapshot_id)
self.assertTrue(result)
self.cloud.delete_volume(self.context, vol1['volumeId'])
def _block_device_mapping_create(self, instance_uuid, mappings):
volumes = []
for bdm in mappings:
db.block_device_mapping_create(self.context, bdm)
if 'volume_id' in bdm:
values = {'id': bdm['volume_id']}
for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
('snapshot_size', 'volume_size'),
('delete_on_termination',
'delete_on_termination')]:
if bdm_key in bdm:
values[vol_key] = bdm[bdm_key]
kwargs = {'name': 'bdmtest-volume',
'description': 'bdm test volume description',
'status': 'available',
'host': 'fake',
'size': 1,
'attach_status': 'detached',
'volume_id': values['id']}
vol = self.volume_api.create_with_kwargs(self.context,
**kwargs)
if 'snapshot_id' in values:
self.volume_api.create_snapshot(self.context,
vol['id'],
'snapshot-bdm',
'fake snap for bdm tests',
values['snapshot_id'])
self.volume_api.attach(self.context, vol['id'],
instance_uuid, bdm['device_name'])
volumes.append(vol)
return volumes
def _setUpBlockDeviceMapping(self):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst0 = db.instance_create(self.context,
{'image_ref': image_uuid,
'instance_type_id': 1,
'root_device_name': '/dev/sdb1',
'system_metadata': sys_meta})
inst1 = db.instance_create(self.context,
{'image_ref': image_uuid,
'instance_type_id': 1,
'root_device_name': '/dev/sdc1',
'system_metadata': sys_meta})
inst2 = db.instance_create(self.context,
{'image_ref': '',
'instance_type_id': 1,
'root_device_name': '/dev/vda',
'system_metadata': sys_meta})
instance0_uuid = inst0['uuid']
mappings0 = [
{'instance_uuid': instance0_uuid,
'device_name': '/dev/sdb1',
'snapshot_id': '1',
'volume_id': '2'},
{'instance_uuid': instance0_uuid,
'device_name': '/dev/sdb2',
'volume_id': '3',
'volume_size': 1},
{'instance_uuid': instance0_uuid,
'device_name': '/dev/sdb3',
'delete_on_termination': True,
'snapshot_id': '4',
'volume_id': '5'},
{'instance_uuid': instance0_uuid,
'device_name': '/dev/sdb4',
'delete_on_termination': False,
'snapshot_id': '6',
'volume_id': '7'},
{'instance_uuid': instance0_uuid,
'device_name': '/dev/sdb5',
'snapshot_id': '8',
'volume_id': '9',
'volume_size': 0},
{'instance_uuid': instance0_uuid,
'device_name': '/dev/sdb6',
'snapshot_id': '10',
'volume_id': '11',
'volume_size': 1},
{'instance_uuid': instance0_uuid,
'device_name': '/dev/sdb7',
'no_device': True},
{'instance_uuid': instance0_uuid,
'device_name': '/dev/sdb8',
'virtual_name': 'swap'},
{'instance_uuid': instance0_uuid,
'device_name': '/dev/sdb9',
'virtual_name': 'ephemeral3'}]
instance2_uuid = inst2['uuid']
mappings2 = [
{'instance_uuid': instance2_uuid,
'device_name': 'vda',
'snapshot_id': '1',
'volume_id': '21'}]
volumes0 = self._block_device_mapping_create(instance0_uuid, mappings0)
volumes2 = self._block_device_mapping_create(instance2_uuid, mappings2)
return ((inst0, inst1, inst2), (volumes0, [], volumes2))
def _tearDownBlockDeviceMapping(self, instances, volumes):
for vols in volumes:
for vol in vols:
self.volume_api.delete(self.context, vol['id'])
for instance in instances:
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid']):
db.block_device_mapping_destroy(self.context, bdm['id'])
db.instance_destroy(self.context, instance['uuid'])
_expected_instance_bdm0 = {
'instanceId': 'i-00000001',
'rootDeviceName': '/dev/sdb1',
'rootDeviceType': 'ebs'}
_expected_block_device_mapping0 = [
{'deviceName': '/dev/sdb1',
'ebs': {'status': 'attached',
'deleteOnTermination': False,
'volumeId': 'vol-00000002',
}},
{'deviceName': '/dev/sdb2',
'ebs': {'status': 'attached',
'deleteOnTermination': False,
'volumeId': 'vol-00000003',
}},
{'deviceName': '/dev/sdb3',
'ebs': {'status': 'attached',
'deleteOnTermination': True,
'volumeId': 'vol-00000005',
}},
{'deviceName': '/dev/sdb4',
'ebs': {'status': 'attached',
'deleteOnTermination': False,
'volumeId': 'vol-00000007',
}},
{'deviceName': '/dev/sdb5',
'ebs': {'status': 'attached',
'deleteOnTermination': False,
'volumeId': 'vol-00000009',
}},
{'deviceName': '/dev/sdb6',
'ebs': {'status': 'attached',
'deleteOnTermination': False,
'volumeId': 'vol-0000000b', }}]
# NOTE(yamahata): swap/ephemeral device case isn't supported yet.
_expected_instance_bdm1 = {
'instanceId': 'i-00000002',
'rootDeviceName': '/dev/sdc1',
'rootDeviceType': 'instance-store'}
_expected_instance_bdm2 = {
'instanceId': 'i-00000003',
'rootDeviceName': '/dev/vda',
'rootDeviceType': 'ebs'}
def test_format_instance_bdm(self):
(instances, volumes) = self._setUpBlockDeviceMapping()
result = {}
self.cloud._format_instance_bdm(self.context, instances[0]['uuid'],
'/dev/sdb1', result)
self.assertThat(
{'rootDeviceType': self._expected_instance_bdm0['rootDeviceType']},
matchers.IsSubDictOf(result))
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = {}
self.cloud._format_instance_bdm(self.context, instances[1]['uuid'],
'/dev/sdc1', result)
self.assertThat(
{'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
matchers.IsSubDictOf(result))
self._tearDownBlockDeviceMapping(instances, volumes)
def _assertInstance(self, instance_id):
ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
result = self.cloud.describe_instances(self.context,
instance_id=[ec2_instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
result = result['instancesSet'][0]
self.assertEqual(result['instanceId'], ec2_instance_id)
return result
def _assertEqualBlockDeviceMapping(self, expected, result):
self.assertEqual(len(expected), len(result))
for x in expected:
found = False
for y in result:
if x['deviceName'] == y['deviceName']:
self.assertThat(x, matchers.IsSubDictOf(y))
found = True
break
self.assertTrue(found)
def test_describe_instances_bdm(self):
"""Make sure describe_instances works with root_device_name and
block device mappings
"""
(instances, volumes) = self._setUpBlockDeviceMapping()
result = self._assertInstance(instances[0]['id'])
self.assertThat(
self._expected_instance_bdm0,
matchers.IsSubDictOf(result))
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = self._assertInstance(instances[1]['id'])
self.assertThat(
self._expected_instance_bdm1,
matchers.IsSubDictOf(result))
result = self._assertInstance(instances[2]['id'])
self.assertThat(
self._expected_instance_bdm2,
matchers.IsSubDictOf(result))
self._tearDownBlockDeviceMapping(instances, volumes)
def test_format_instance_bdm_while_attaching_volume(self):
# NOTE(ft): Since instance bdm is deleted immediately by detach
# operation call, no test for 'detaching' stage is required
vol = self.cloud.create_volume(self.context, size=1)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
# NOTE(ft): Since fake attach action is very fast, we replace it to
# empty function to check EC2 API results at 'attaching' stage
self.stubs.Set(fake_volume.API, 'attach',
lambda *args, **kwargs: None)
self.cloud.attach_volume(self.context, vol['volumeId'],
ec2_instance_id, '/dev/sde')
resp = self.cloud.describe_instances(self.context,
instance_id=['ami-1'])
resp = resp['reservationSet'][0]
self.assertEqual(1, len(resp['instancesSet']))
inst = resp['instancesSet'][0]
self.assertThat({'deviceName': '/dev/sde',
'ebs': {'deleteOnTermination': False,
'status': 'attaching',
'volumeId': vol['volumeId']}},
matchers.IsSubDictOf(inst['blockDeviceMapping'][0]))
def _setUpImageSet(self, create_volumes_and_snapshots=False):
self.flags(max_local_block_devices=-1)
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
{'device': 'sdb0', 'virtual': 'ephemeral0'},
{'device': 'sdb1', 'virtual': 'ephemeral1'},
{'device': 'sdb2', 'virtual': 'ephemeral2'},
{'device': 'sdb3', 'virtual': 'ephemeral3'},
{'device': 'sdb4', 'virtual': 'ephemeral4'},
{'device': 'sdc0', 'virtual': 'swap'},
{'device': 'sdc1', 'virtual': 'swap'},
{'device': 'sdc2', 'virtual': 'swap'},
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
{'device_name': '/dev/sdb1', 'snapshot_id': 1234567},
{'device_name': '/dev/sdb2', 'volume_id': 1234567},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
{'device_name': '/dev/sdc2', 'volume_id': 12345678},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available',
'mappings': mappings1,
'block_device_mapping': block_device_mapping1,
}
}
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
'snapshot_id': 1234567}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'type': 'machine',
'root_device_name': '/dev/sdb1',
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
def fake_show(meh, context, image_id, **kwargs):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
return i
raise exception.ImageNotFound(image_id=image_id)
def fake_detail(meh, context):
return [copy.deepcopy(image1), copy.deepcopy(image2)]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
volumes = []
snapshots = []
if create_volumes_and_snapshots:
for bdm in block_device_mapping1:
if 'volume_id' in bdm:
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
kwargs = {'volume_id': 76543210,
'volume_size': 1,
'name': 'test-snap',
'description': 'test snap desc',
'snap_id': bdm['snapshot_id'],
'status': 'available'}
snap = self.volume_api.create_snapshot_with_kwargs(
self.context, **kwargs)
snapshots.append(snap['id'])
return (volumes, snapshots)
def _assertImageSet(self, result, root_device_type, root_device_name):
self.assertEqual(1, len(result['imagesSet']))
result = result['imagesSet'][0]
self.assertIn('rootDeviceType', result)
self.assertEqual(result['rootDeviceType'], root_device_type)
self.assertIn('rootDeviceName', result)
self.assertEqual(result['rootDeviceName'], root_device_name)
self.assertIn('blockDeviceMapping', result)
return result
_expected_root_device_name1 = '/dev/sda1'
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
# It makes sense only when user overriding existing
# mapping.
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
'snap-00053977'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
'vol-00053977'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
'snap-00bc614e'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
'vol-00bc614e'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
'ebs': {'snapshotId': 'snap-00053977'}}]
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def _restart_compute_service(self, periodic_interval_max=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval_max:
self.compute = self.start_service(
'compute', periodic_interval_max=periodic_interval_max)
else:
self.compute = self.start_service('compute')
def _volume_create(self, volume_id=None):
kwargs = {'name': 'test-volume',
'description': 'test volume description',
'status': 'available',
'host': 'fake',
'size': 1,
'attach_status': 'detached'}
if volume_id:
kwargs['volume_id'] = volume_id
return self.volume_api.create_with_kwargs(self.context, **kwargs)
def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
def _assert_volume_detached(self, vol):
self.assertIsNone(vol['instance_uuid'])
self.assertIsNone(vol['mountpoint'])
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
# Make sure run instance with block device mapping works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol2 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
'delete_on_termination': False},
{'device_name': '/dev/sdc',
'volume_id': vol2_uuid,
'delete_on_termination': True},
]}
ec2_instance_id = self._run_instance(**kwargs)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
if str(vol['id']) == str(vol1_uuid):
self.volume_api.attach(self.context, vol['id'],
instance_uuid, '/dev/sdb')
elif str(vol['id']) == str(vol2_uuid):
self.volume_api.attach(self.context, vol['id'],
instance_uuid, '/dev/sdc')
vol = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
vol = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
vol = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
self.cloud.start_instances(self.context, [ec2_instance_id])
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
self.assertIn(vol['mountpoint'], ['/dev/sdb', '/dev/sdc'])
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
# Here we puke...
self.cloud.terminate_instances(self.context, [ec2_instance_id])
admin_ctxt = context.get_admin_context(read_deleted="no")
vol = self.volume_api.get(admin_ctxt, vol2_uuid)
self.assertFalse(vol['deleted'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
self._restart_compute_service()
def test_stop_with_attached_volume(self):
# Make sure attach info is reflected to block device mapping.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol2 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_detached(vol)
inst_obj = objects.Instance.get_by_uuid(self.context, instance_uuid)
self.cloud.compute_api.attach_volume(self.context,
inst_obj,
volume_id=vol2_uuid,
device='/dev/sdc')
vol1 = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol1, instance_uuid, '/dev/sdb')
vol2 = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
self.cloud.compute_api.detach_volume(self.context,
inst_obj, vol1)
vol1 = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_detached(vol1)
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
vol2 = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
self.cloud.start_instances(self.context, [ec2_instance_id])
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 1)
self._assert_volume_detached(vol1)
vol1 = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_detached(vol1)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
return result['snapshotId']
def test_run_with_snapshot(self):
# Makes sure run/stop/start instance with snapshot works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap of vol %s' %
vol1['volumeId'])
snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId'])
snap2 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-2',
description='test snap of vol %s' %
vol1['volumeId'])
snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'snapshot_id': snap1_uuid,
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'snapshot_id': snap2_uuid,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 2)
vol1_id = None
vol2_id = None
for vol in vols:
snapshot_uuid = vol['snapshot_id']
if snapshot_uuid == snap1_uuid:
vol1_id = vol['id']
mountpoint = '/dev/vdb'
elif snapshot_uuid == snap2_uuid:
vol2_id = vol['id']
mountpoint = '/dev/vdc'
else:
self.fail()
self._assert_volume_attached(vol, instance_uuid, mountpoint)
# Just make sure we found them
self.assertTrue(vol1_id)
self.assertTrue(vol2_id)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
admin_ctxt = context.get_admin_context(read_deleted="no")
vol = self.volume_api.get(admin_ctxt, vol1_id)
self._assert_volume_detached(vol)
self.assertFalse(vol['deleted'])
def test_create_image(self):
# Make sure that CreateImage works.
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
self._restart_compute_service()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import warnings
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape", "get_batch_shape", "event_shape", "get_event_shape",
"sample_n", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "std", "mode"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create
# a non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = (n,) + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.get_event_shape()
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape
# Sampling returns a sample per distribution. `samples` has shape
# (5, 2, 2), which is (n,) + batch_shape + event_shape, where n=5,
# batch_shape=(2, 2), and event_shape=().
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape (2, 2) as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is (2, 2), one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `a` and `b`, and does not have well-defined mode if
`a < 1` or `b < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
is_continuous,
is_reparameterized,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
is_continuous: Python boolean. If `True` this
`Distribution` is continuous over its supported domain.
is_reparameterized: Python boolean. If `True` this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python boolean. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
parameters: Python dictionary of parameters used to instantiate this
`Distribution`.
graph_parents: Python list of graph prerequisites of this `Distribution`.
name: A name for this distribution. Default: subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
parameters = parameters or {}
self._dtype = dtype
self._is_continuous = is_continuous
self._is_reparameterized = is_reparameterized
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
Subclasses should override static method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. TensorShape) shapes.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
return self._parameters
@property
def is_continuous(self):
return self._is_continuous
@property
def is_reparameterized(self):
return self._is_reparameterized
@property
def allow_nan_stats(self):
"""Python boolean describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance
of a Cauchy distribution is infinity. However, sometimes the
statistic is undefined, e.g., if a distribution's pdf does not achieve a
maximum within the support of the distribution, the mode is undefined.
If the mean is undefined, then by definition the variance is undefined.
E.g. the mean for Student's T for df = 1 is undefined (no clear way to say
it is either + or - infinity), so the variance = E[(X - mean)^2] is also
undefined.
Returns:
allow_nan_stats: Python boolean.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python boolean indicated possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
intialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` intitialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
# Python3 leaks "__class__" into `locals()` so we remove if present.
# TODO(b/32376812): Remove this pop.
parameters.pop("__class__", None)
return type(self)(**parameters)
def _batch_shape(self):
raise NotImplementedError("batch_shape is not implemented")
def batch_shape(self, name="batch_shape"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.get_batch_shape().is_fully_defined():
return ops.convert_to_tensor(self.get_batch_shape().as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape()
def _get_batch_shape(self):
return tensor_shape.TensorShape(None)
def get_batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._get_batch_shape()
def _event_shape(self):
raise NotImplementedError("event_shape is not implemented")
def event_shape(self, name="event_shape"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.get_event_shape().is_fully_defined():
return ops.convert_to_tensor(self.get_event_shape().as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape()
def _get_event_shape(self):
return tensor_shape.TensorShape(None)
def get_event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._get_event_shape()
@property
def is_scalar_event(self):
"""Indicates that `event_shape==[]`."""
return ops.convert_to_tensor(
self._is_scalar_helper(self.get_event_shape, self.event_shape),
name="is_scalar_event")
@property
def is_scalar_batch(self):
"""Indicates that `batch_shape==[]`."""
return ops.convert_to_tensor(
self._is_scalar_helper(self.get_batch_shape, self.batch_shape),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
if sample_shape.get_shape().ndims == 0:
return self.sample_n(sample_shape, seed, **condition_kwargs)
sample_shape, total = self._expand_sample_shape(sample_shape)
samples = self.sample_n(total, seed, **condition_kwargs)
output_shape = array_ops.concat_v2(
[sample_shape, array_ops.slice(array_ops.shape(samples), [1], [-1])],
0)
output = array_ops.reshape(samples, output_shape)
output.set_shape(tensor_util.constant_value_as_shape(
sample_shape).concatenate(samples.get_shape()[1:]))
return output
def sample_n(self, n, seed=None, name="sample_n", **condition_kwargs):
"""Generate `n` samples.
Args:
n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
observations to sample.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with a prepended dimension (n,).
Raises:
TypeError: if `n` is not an integer type.
"""
warnings.warn("Please use `sample` instead of `sample_n`. `sample_n` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
with self._name_scope(name, values=[n]):
n = ops.convert_to_tensor(n, name="n")
if not n.dtype.is_integer:
raise TypeError("n.dtype=%s is not an integer type" % n.dtype)
x = self._sample_n(n, seed, **condition_kwargs)
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(n))
batch_ndims = self.get_batch_shape().ndims
event_ndims = self.get_event_shape().ndims
if batch_ndims is not None and event_ndims is not None:
inferred_shape = sample_shape.concatenate(
self.get_batch_shape().concatenate(
self.get_event_shape()))
x.set_shape(inferred_shape)
elif x.get_shape().ndims is not None and x.get_shape().ndims > 0:
x.get_shape()[0].merge_with(sample_shape[0])
if batch_ndims is not None and batch_ndims > 0:
x.get_shape()[1:1+batch_ndims].merge_with(self.get_batch_shape())
if event_ndims is not None and event_ndims > 0:
x.get_shape()[-event_ndims:].merge_with(self.get_event_shape())
return x
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def log_prob(self, value, name="log_prob", **condition_kwargs):
"""Log probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob", **condition_kwargs):
"""Probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def log_cdf(self, value, name="log_cdf", **condition_kwargs):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def cdf(self, value, name="cdf", **condition_kwargs):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def log_survival_function(self, value, name="log_survival_function",
**condition_kwargs):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(1. - self.cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def survival_function(self, value, name="survival_function",
**condition_kwargs):
"""Survival function.
Given random variable `X`, the survival function is defined:
```
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **condition_kwargs)
except NotImplementedError:
raise original_exception
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance."""
with self._name_scope(name):
return self._variance()
def _std(self):
raise NotImplementedError("std is not implemented")
def std(self, name="std"):
"""Standard deviation."""
with self._name_scope(name):
return self._std()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def log_pdf(self, value, name="log_pdf", **condition_kwargs):
"""Log probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pdf`. `log_pdf` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("log_pdf is undefined for non-continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pdf(self, value, name="pdf", **condition_kwargs):
"""Probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pdf`. `pdf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("pdf is undefined for non-continuous distributions.")
return self.prob(value, name, **condition_kwargs)
def log_pmf(self, value, name="log_pmf", **condition_kwargs):
"""Log probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pmf`. `log_pmf` will "
"be deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("log_pmf is undefined for continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pmf(self, value, name="pmf", **condition_kwargs):
"""Probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pmf`. `pmf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("pmf is undefined for continuous distributions.")
return self.prob(value, name=name, **condition_kwargs)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape(self, sample_shape):
"""Helper to `sample` which ensures sample_shape is 1D."""
sample_shape_static_val = tensor_util.constant_value(sample_shape)
ndims = sample_shape.get_shape().ndims
if sample_shape_static_val is None:
if ndims is None or not sample_shape.get_shape().is_fully_defined():
ndims = array_ops.rank(sample_shape)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array((1,), dtype=dtypes.int32.as_numpy_dtype()),
array_ops.shape(sample_shape))
sample_shape = array_ops.reshape(sample_shape, expanded_shape)
total = math_ops.reduce_prod(sample_shape) # reduce_prod([]) == 1
else:
if ndims is None:
raise ValueError(
"Shouldn't be here; ndims cannot be none when we have a "
"tf.constant shape.")
if ndims == 0:
sample_shape_static_val = np.reshape(sample_shape_static_val, [1])
sample_shape = ops.convert_to_tensor(
sample_shape_static_val,
dtype=dtypes.int32,
name="sample_shape")
total = np.prod(sample_shape_static_val,
dtype=dtypes.int32.as_numpy_dtype())
return sample_shape, total
def _is_scalar_helper(self, static_shape_fn, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape_fn().ndims is not None:
return static_shape_fn().ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape_fn is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
|
# Copyright 2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import range
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
from nnabla.testing import assert_allclose
def initialize_grad(parameters):
for param in parameters.values():
param.grad.zero()
@pytest.mark.parametrize("seed", [313])
def test_graph_logreg(seed):
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4], need_grad=True)
w1 = nn.Variable([12, 5], need_grad=True)
w2 = nn.Variable([12, 5], need_grad=True)
b1 = nn.Variable([5], need_grad=True)
b2 = nn.Variable([5], need_grad=True)
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
w1.d = rng.randn(*w1.shape)
w2.d = rng.randn(*w2.shape)
b1.d = rng.randn(*b1.shape)
b2.d = rng.randn(*b2.shape)
t.d = rng.randint(0, 5, size=t.shape)
nn.set_default_context(nn.Context())
# Forwardprop by definintion
z1 = F.affine(x, w1, b1, 1)
z2 = F.affine(x, w2, b2, 1)
l1 = F.softmax_cross_entropy(z1, t, 1)
L1 = F.mean(l1)
l2 = F.softmax_cross_entropy(z2, t, 1)
L2 = F.mean(l2)
nn.forward_all([L1, L2])
# Backprop for z1
# Diff should be initialized since they are always accumulated
x.g = 0
w1.g = 0
b1.g = 0
L1.backward(clear_buffer=True)
inputs = [x, w1, b1]
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L1, inputs, 1e-3, False)
assert_allclose(ngrad, agrad, atol=1e-2)
# Backprop for z2
# Diff should be initialized since they are always accumulated
x.g = 0
w2.g = 0
b2.g = 0
L2.backward(clear_buffer=True)
inputs = [x, w2, b2]
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L2, inputs, 1e-3, False)
assert_allclose(ngrad, agrad, atol=1e-2)
@pytest.mark.parametrize("seed", [311])
@pytest.mark.parametrize("model", ["mlp", "recurrent", "convolution"])
def test_graph_model(model, seed):
np.random.seed(313)
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4, 4], need_grad=True)
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
t.d = rng.randint(0, 5, size=t.shape)
nn.set_default_context(nn.Context())
# Forwardprop by definintion
nn.clear_parameters()
if model == "mlp":
with nn.parameter_scope('fc1'):
z = PF.affine(x, 3)
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
z4 = PF.affine(z2, 5)
elif model == "recurrent":
with nn.parameter_scope('fc1'):
z = PF.affine(x, 4)
z2 = F.relu(z, inplace=True)
h = z2
for _ in range(2):
with nn.parameter_scope('fc2'):
h = PF.affine(h, 4)
h = F.relu(h, inplace=True)
with nn.parameter_scope('fc3'):
z3 = PF.affine(h, 5)
z4 = PF.affine(h, 5)
elif model == "convolution":
with nn.parameter_scope('conv1'):
z = PF.convolution(x, 3, (2, 2))
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
z4 = PF.affine(z2, 5)
else:
raise ValueError()
l1 = F.softmax_cross_entropy(z3, t, 1)
L1 = F.mean(l1)
l2 = F.softmax_cross_entropy(z4, t, 1)
L2 = F.mean(l2)
# Forwardprop
nn.forward_all([L1, L2])
parameters = nn.get_parameters()
# Backprop for L1
# Diff should be initialized since they are always accumulated
x.grad.zero()
initialize_grad(parameters)
L1.backward(clear_buffer=True)
inputs = [x] + list(parameters.values())
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L1, inputs, 1e-3, False)
assert_allclose(ngrad, agrad, atol=1.05e-2)
# Backprop for L2
# Diff should be initialized since they are always accumulated
x.grad.zero()
initialize_grad(parameters)
L2.backward(clear_buffer=True)
inputs = [x] + list(parameters.values())
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L2, inputs, 1e-3, False)
assert_allclose(ngrad, agrad, atol=1.05e-2)
@pytest.mark.parametrize("seed", [311])
def test_graph_unlink_backward(seed):
rng = np.random.RandomState(seed)
x0 = nn.Variable([2, 4], need_grad=True)
x1 = nn.Variable([2, 4], need_grad=True)
x0.d = rng.randn(*x0.shape)
x1.d = rng.randn(*x1.shape)
x0.grad.zero()
x1.grad.zero()
with nn.parameter_scope("fc0"):
h0 = PF.affine(x0, 2)
h0.need_grad = False
with nn.parameter_scope("fc1"):
h1 = PF.affine(x1, 2)
h = h0 + h1
with nn.parameter_scope("fc"):
y1 = PF.affine(h, 1)
y2 = PF.affine(h, 1)
nn.forward_all([y1, y2])
y1.backward(clear_buffer=True)
assert np.all(x0.g == 0)
assert not np.all(x1.g == 0)
y2.backward(clear_buffer=True)
assert np.all(x0.g == 0)
assert not np.all(x1.g == 0)
@pytest.mark.parametrize("seed", [311])
def test_graph_clear_buffer(seed):
np.random.seed(313)
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4, 4])
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
t.d = rng.randint(0, 5, size=t.shape)
# Network definition
nn.set_default_context(nn.Context())
nn.clear_parameters()
x1 = x + 1
x2 = x1 - 1
with nn.parameter_scope('conv1'):
z = PF.convolution(x2, 3, (2, 2))
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
z4 = PF.affine(z2, 5)
l1 = F.softmax_cross_entropy(z3, t, 1)
L1 = F.mean(l1)
l2 = F.softmax_cross_entropy(z4, t, 1)
L2 = F.mean(l2)
# Forwardprop
import tempfile
import os
tmpd = tempfile.mkdtemp()
nn.save_parameters(os.path.join(tmpd, 'parameter.h5'))
first = False
for cnng in [False, True]:
for cb in [False, True]:
_ = nn.load_parameters(os.path.join(tmpd, 'parameter.h5'))
for v in nn.get_parameters().values():
v.grad.zero()
nn.forward_all([L1, L2], clear_no_need_grad=cnng)
# for now, the first backward cannot be
# called with clear_buffer=True
L1.backward(clear_buffer=False)
L2.backward(clear_buffer=cb)
if not first:
first = True
g = list(nn.get_parameters().values())[0].g.copy()
else:
g2 = list(nn.get_parameters().values())[0].g.copy()
import platform
if platform.machine() == 'ppc64le':
pytest.skip("This test fails on ppc64le")
assert np.all(g == g2)
@pytest.mark.parametrize("seed", [311])
@pytest.mark.parametrize("clear_buffer", [True, False])
def test_graph_forward_clear_buffer(seed, clear_buffer):
nn.clear_parameters()
x = nn.Variable((2, 10))
h = PF.affine(x, 10, name='hidden')
y1 = PF.affine(h, 10, name='out1')
y2 = PF.affine(h, 10, name='out2')
# input
rng = np.random.RandomState(seed)
data = rng.randn(*x.shape)
# reference values
x.d = data
y1.forward()
y2.forward()
ref_y1 = y1.d.copy()
ref_y2 = y2.d.copy()
# check
nn.forward_all([y1, y2], clear_buffer=clear_buffer)
assert_allclose(y1.d, ref_y1)
assert_allclose(y2.d, ref_y2)
@pytest.mark.parametrize("seed", [311])
@pytest.mark.parametrize("clear_buffer", [True, False])
def test_graph_more_than_2_outputs(seed, clear_buffer):
count = 0
def func_hook(f):
nonlocal count
if f.name == 'Split':
count += 1
nn.clear_parameters()
a = nn.Variable.from_numpy_array(np.ones((10, )))
b = nn.Variable.from_numpy_array(np.ones((10, )))
c = F.add2(a, b, inplace=True, outputs=[a.data])
y = F.split(c, axis=0)
nn.forward_all(y, function_pre_hook=func_hook)
assert count == 1
res = [x.d for x in y]
assert_allclose(res, [2.0] * 10)
a = nn.Variable.from_numpy_array(np.ones((10, )))
b = nn.Variable.from_numpy_array(np.ones((10, )))
c = F.add2(a, b, inplace=True, outputs=[a.data])
y = F.split(c, axis=0)
for yy in y:
yy.forward()
res = [x.d for x in y]
assert_allclose(res, [11.0] * 10)
@pytest.mark.parametrize("seed", [311])
@pytest.mark.parametrize("clear_buffer", [True, False])
def test_graph_rewire(seed, clear_buffer):
nn.clear_parameters()
# A. defining graph definition utility
def mlp2(x, scope):
with nn.parameter_scope(scope):
h = F.tanh(PF.affine(x, 10, name='a1'))
h = F.tanh(PF.affine(h, 10, name='a1'))
return h
# A. Create a graph A.
xa = nn.Variable((2, 10), need_grad=True)
ya = mlp2(xa, 'a')
# B. Create a graph B.
xb = nn.Variable((2, 10), need_grad=True)
yb1 = mlp2(xb, 'b1')
yb2 = mlp2(xb, 'b2')
# C. Create directly connected graph.
xc = nn.Variable((2, 10))
h = mlp2(xc, 'a')
yc1 = mlp2(h, 'b1')
yc2 = mlp2(h, 'b2')
# D. Rewire the graphs A and B.
xb.rewire_on(ya)
# E. Check whether the results are the same.
rng = np.random.RandomState(seed)
data = rng.randn(*xa.shape)
xa.d = data
xc.d = data
params = nn.get_parameters()
def zero_grad():
for p in params.values():
p.grad.zero()
def backup_params():
return [p.g.copy() for p in params.values()]
# Checking forward
nn.forward_all([yb1, yb2, yc1, yc2], clear_no_need_grad=clear_buffer)
assert_allclose(yb1.d, yc1.d)
assert_allclose(yb2.d, yc2.d)
# Checking backward for yb1 and yc1
# for now, the first backward cannot be called with clear_buffer=True
zero_grad()
yb1.backward(clear_buffer=False)
gb = backup_params()
zero_grad()
yc1.backward(clear_buffer=False)
gc = backup_params()
assert_allclose(xa.d, xc.d)
for b, c in zip(gb, gc):
assert_allclose(b, c)
# Checking backward for yb2 and yc2
zero_grad()
yb2.backward(clear_buffer=clear_buffer)
gb = backup_params()
zero_grad()
yc2.backward(clear_buffer=clear_buffer)
gc = backup_params()
assert_allclose(xa.d, xc.d)
for b, c in zip(gb, gc):
assert_allclose(b, c)
@pytest.mark.parametrize("clear_buffer, clear_no_need_grad", [
(False, False), (True, False), (False, True),
])
def test_intermediate_outputs(clear_buffer, clear_no_need_grad):
rng = np.random.RandomState(311)
# unuse cached array to clear buffers immediately
nn.prefer_cached_array(False)
x = nn.Variable.from_numpy_array(rng.randn(2, 10))
h1 = x + 1
y1 = h1 + 1
h2 = x + 1
h2.persistent = True
y2 = h2 + 1
nn.forward_all([h1, y1], clear_buffer=clear_buffer,
clear_no_need_grad=clear_no_need_grad)
nn.forward_all([h2, y2], clear_buffer=clear_buffer,
clear_no_need_grad=clear_no_need_grad)
assert_allclose(h1.d, h2.d)
assert_allclose(y1.d, y2.d)
# revert perference (this is also done in conftest.py, but just in case)
nn.prefer_cached_array(True)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv as lib_csv
import os
import re
import sys
from dataclasses import dataclass
from typing import Any, Dict, Iterator, List, Optional, Union
import click
from click.core import Context
try:
from github import BadCredentialsException, Github, PullRequest, Repository
except ModuleNotFoundError:
print("PyGithub is a required package for this script")
exit(1)
SUPERSET_REPO = "apache/superset"
SUPERSET_PULL_REQUEST_TYPES = r"^(fix|feat|chore|refactor|docs|build|ci|/gmi)"
SUPERSET_RISKY_LABELS = r"^(blocking|risk|hold|revert|security vulnerability)"
@dataclass
class GitLog:
"""
Represents a git log entry
"""
sha: str
author: str
time: str
message: str
pr_number: Union[int, None] = None
author_email: str = ""
def __eq__(self, other: object) -> bool:
"""A log entry is considered equal if it has the same PR number"""
if isinstance(other, self.__class__):
return other.pr_number == self.pr_number
return False
def __repr__(self) -> str:
return f"[{self.pr_number}]: {self.message} {self.time} {self.author}"
class GitChangeLog:
"""
Helper class to output a list of logs entries on a superset changelog format
We want to map a git author to a github login, for that we call github's API
"""
def __init__(
self,
version: str,
logs: List[GitLog],
access_token: Optional[str] = None,
risk: Optional[bool] = False,
) -> None:
self._version = version
self._logs = logs
self._pr_logs_with_details: Dict[int, Dict[str, Any]] = {}
self._github_login_cache: Dict[str, Optional[str]] = {}
self._github_prs: Dict[int, Any] = {}
self._wait = 10
github_token = access_token or os.environ.get("GITHUB_TOKEN")
self._github = Github(github_token)
self._show_risk = risk
self._superset_repo: Repository = None
def _fetch_github_pr(self, pr_number: int) -> PullRequest:
"""
Fetches a github PR info
"""
try:
github_repo = self._github.get_repo(SUPERSET_REPO)
self._superset_repo = github_repo
pull_request = self._github_prs.get(pr_number)
if not pull_request:
pull_request = github_repo.get_pull(pr_number)
self._github_prs[pr_number] = pull_request
except BadCredentialsException as ex:
print(
f"Bad credentials to github provided"
f" use access_token parameter or set GITHUB_TOKEN"
)
sys.exit(1)
return pull_request
def _get_github_login(self, git_log: GitLog) -> Optional[str]:
"""
Tries to fetch a github login (username) from a git author
"""
author_name = git_log.author
github_login = self._github_login_cache.get(author_name)
if github_login:
return github_login
if git_log.pr_number:
pr_info = self._fetch_github_pr(git_log.pr_number)
if pr_info:
github_login = pr_info.user.login
else:
github_login = author_name
# set cache
self._github_login_cache[author_name] = github_login
return github_login
def _has_commit_migrations(self, git_sha: str) -> bool:
commit = self._superset_repo.get_commit(sha=git_sha)
return any(
"superset/migrations/versions/" in file.filename for file in commit.files
)
def _get_pull_request_details(self, git_log: GitLog) -> Dict[str, Any]:
pr_number = git_log.pr_number
if pr_number:
detail = self._pr_logs_with_details.get(pr_number)
if detail:
return detail
pr_info = self._fetch_github_pr(pr_number)
has_migrations = self._has_commit_migrations(git_log.sha)
title = pr_info.title if pr_info else git_log.message
pr_type = re.match(SUPERSET_PULL_REQUEST_TYPES, title)
if pr_type:
pr_type = pr_type.group().strip('"')
labels = (" | ").join([label.name for label in pr_info.labels])
is_risky = self._is_risk_pull_request(pr_info.labels)
detail = {
"id": pr_number,
"has_migrations": has_migrations,
"labels": labels,
"title": title,
"type": pr_type,
"is_risky": is_risky or has_migrations,
}
if pr_number:
self._pr_logs_with_details[pr_number] = detail
return detail
def _is_risk_pull_request(self, labels: List[Any]) -> bool:
for label in labels:
risk_label = re.match(SUPERSET_RISKY_LABELS, label.name)
if risk_label is not None:
return True
return False
def _get_changelog_version_head(self) -> str:
return f"### {self._version} ({self._logs[0].time})"
def _parse_change_log(
self, changelog: Dict[str, str], pr_info: Dict[str, str], github_login: str,
) -> None:
formatted_pr = (
f"- [#{pr_info.get('id')}]"
f"(https://github.com/{SUPERSET_REPO}/pull/{pr_info.get('id')}) "
f"{pr_info.get('title')} (@{github_login})\n"
)
if pr_info.get("has_migrations"):
changelog["Database Migrations"] += formatted_pr
elif pr_info.get("type") == "fix":
changelog["Fixes"] += formatted_pr
elif pr_info.get("type") == "feat":
changelog["Features"] += formatted_pr
else:
changelog["Others"] += formatted_pr
def __repr__(self) -> str:
result = f"\n{self._get_changelog_version_head()}\n"
changelog = {
"Database Migrations": "\n",
"Features": "\n",
"Fixes": "\n",
"Others": "\n",
}
for i, log in enumerate(self._logs):
github_login = self._get_github_login(log)
pr_info = self._get_pull_request_details(log)
if not github_login:
github_login = log.author
if self._show_risk:
if pr_info.get("is_risky"):
result += (
f"- [#{log.pr_number}]"
f"(https://github.com/{SUPERSET_REPO}/pull/{log.pr_number}) "
f"{pr_info.get('title')} (@{github_login}) "
f"{pr_info.get('labels')} \n"
)
else:
self._parse_change_log(changelog, pr_info, github_login)
print(f"\r {i}/{len(self._logs)}", end="", flush=True)
if self._show_risk:
return result
for key in changelog:
result += f"**{key}** {changelog[key]}\n"
return result
def __iter__(self) -> Iterator[Dict[str, Any]]:
for log in self._logs:
yield {
"pr_number": log.pr_number,
"pr_link": f"https://github.com/{SUPERSET_REPO}/pull/"
f"{log.pr_number}",
"message": log.message,
"time": log.time,
"author": log.author,
"email": log.author_email,
"sha": log.sha,
}
class GitLogs:
"""
Manages git log entries from a specific branch/tag
Can compare git log entries by PR number
"""
def __init__(self, git_ref: str) -> None:
self._git_ref = git_ref
self._logs: List[GitLog] = []
@property
def git_ref(self) -> str:
return self._git_ref
@property
def logs(self) -> List[GitLog]:
return self._logs
def fetch(self) -> None:
self._logs = list(map(self._parse_log, self._git_logs()))[::-1]
def diff(self, git_logs: "GitLogs") -> List[GitLog]:
return [log for log in git_logs.logs if log not in self._logs]
def __repr__(self) -> str:
return f"{self._git_ref}, Log count:{len(self._logs)}"
@staticmethod
def _git_get_current_head() -> str:
output = os.popen("git status | head -1").read()
match = re.match("(?:HEAD detached at|On branch) (.*)", output)
if not match:
return ""
return match.group(1)
def _git_checkout(self, git_ref: str) -> None:
os.popen(f"git checkout {git_ref}").read()
current_head = self._git_get_current_head()
if current_head != git_ref:
print(f"Could not checkout {git_ref}")
sys.exit(1)
def _git_logs(self) -> List[str]:
# let's get current git ref so we can revert it back
current_git_ref = self._git_get_current_head()
self._git_checkout(self._git_ref)
output = (
os.popen('git --no-pager log --pretty=format:"%h|%an|%ae|%ad|%s|"')
.read()
.split("\n")
)
# revert to git ref, let's be nice
self._git_checkout(current_git_ref)
return output
@staticmethod
def _parse_log(log_item: str) -> GitLog:
pr_number = None
split_log_item = log_item.split("|")
# parse the PR number from the log message
match = re.match(r".*\(\#(\d*)\)", split_log_item[4])
if match:
pr_number = int(match.group(1))
return GitLog(
sha=split_log_item[0],
author=split_log_item[1],
author_email=split_log_item[2],
time=split_log_item[3],
message=split_log_item[4],
pr_number=pr_number,
)
@dataclass
class BaseParameters:
previous_logs: GitLogs
current_logs: GitLogs
def print_title(message: str) -> None:
print(f"{50*'-'}")
print(message)
print(f"{50*'-'}")
@click.group()
@click.pass_context
@click.option("--previous_version", help="The previous release version", required=True)
@click.option("--current_version", help="The current release version", required=True)
def cli(ctx: Context, previous_version: str, current_version: str) -> None:
"""Welcome to change log generator"""
previous_logs = GitLogs(previous_version)
current_logs = GitLogs(current_version)
previous_logs.fetch()
current_logs.fetch()
base_parameters = BaseParameters(previous_logs, current_logs)
ctx.obj = base_parameters
@cli.command("compare")
@click.pass_obj
def compare(base_parameters: BaseParameters) -> None:
"""Compares both versions (by PR)"""
previous_logs = base_parameters.previous_logs
current_logs = base_parameters.current_logs
print_title(
f"Pull requests from " f"{current_logs.git_ref} not in {previous_logs.git_ref}"
)
previous_diff_logs = previous_logs.diff(current_logs)
for diff_log in previous_diff_logs:
print(f"{diff_log}")
print_title(
f"Pull requests from " f"{previous_logs.git_ref} not in {current_logs.git_ref}"
)
current_diff_logs = current_logs.diff(previous_logs)
for diff_log in current_diff_logs:
print(f"{diff_log}")
@cli.command("changelog")
@click.option(
"--csv", help="The csv filename to export the changelog to",
)
@click.option(
"--access_token",
help="The github access token,"
" if not provided will try to fetch from GITHUB_TOKEN env var",
)
@click.option("--risk", is_flag=True, help="show all pull requests with risky labels")
@click.pass_obj
def change_log(
base_parameters: BaseParameters, csv: str, access_token: str, risk: bool
) -> None:
"""Outputs a changelog (by PR)"""
previous_logs = base_parameters.previous_logs
current_logs = base_parameters.current_logs
previous_diff_logs = previous_logs.diff(current_logs)
logs = GitChangeLog(
current_logs.git_ref,
previous_diff_logs[::-1],
access_token=access_token,
risk=risk,
)
if csv:
with open(csv, "w") as csv_file:
log_items = list(logs)
field_names = log_items[0].keys()
writer = lib_csv.DictWriter( # type: ignore
csv_file,
delimiter=",",
quotechar='"',
quoting=lib_csv.QUOTE_ALL,
fieldnames=field_names, # type: ignore
)
writer.writeheader()
for log in logs:
writer.writerow(log)
else:
print("Fetching github usernames, this may take a while:")
print(logs)
cli()
|
|
"""
Tests for the Parsing Library
"""
import json
from datetime import datetime
import parsing_lib
def _test_case_parser(test_case):
""" Parses test case JSON.
Corrects some of the default parsing functionality to work better with the
given test cases
Args:
test_case: the test case to parse
Returns:
the corrected, parsed test case
"""
current_year = datetime.now().year
# Parse test case time
if 'time' in test_case:
date_with_year = '%s-%s' % (str(current_year), test_case['time'])
test_case['time'] = datetime.strptime(date_with_year,
'%Y-%m-%d %H:%M:%S.%f')
# Parse multiline input into one string
if isinstance(test_case.get('inputLines', None), list):
test_case['inputLines'] = '\n'.join(test_case['inputLines'])
# Parse log entry text into one string
if isinstance(test_case.get('expectedResult', None), list):
for log_entry in test_case['expectedResult']:
if 'text' in log_entry and isinstance(log_entry['text'], list):
log_entry['text'] = '\n'.join(log_entry['text'])
return test_case
def test_parse_android():
""" Tests that the LogParser.parse method works properly for Android. """
with open('tests/inputs/test_parse_android.json') as test_case_file:
test_cases = json.load(test_case_file, object_hook=_test_case_parser)
for test_case in test_cases:
test_input = test_case['inputLines']
expected_result = test_case['expectedResult']
assert list(parsing_lib.LogParser.parse(test_input, 'Android')) == \
expected_result
assert list(parsing_lib.LogParser.parse({}, 'Android')) == []
def test_parse_ios():
""" Tests that the LogParser.parse method works properly for iOS. """
with open('tests/inputs/test_parse_ios.json') as test_case_file:
test_cases = json.load(test_case_file, object_hook=_test_case_parser)
for test_case in test_cases:
test_input = test_case['inputLines']
expected_result = test_case['expectedResult']
assert list(parsing_lib.LogParser.parse(test_input, 'iOS')) == \
expected_result
assert list(parsing_lib.LogParser.parse({}, 'iOS')) == []
def test_parse_raw_log_android():
""" Tests that the LogParser.parse_raw_log method works properly for
Android.
"""
with open('tests/inputs/test_parse_android_raw.json') as test_case_file:
test_cases = json.load(test_case_file, object_hook=_test_case_parser)
for test_case in test_cases:
test = test_case['input']
expected_result = test_case['expectedResult']
result = parsing_lib.LogParser.parse_raw_log(test, 'Android')
assert result == expected_result
def test_parse_raw_log_ios():
""" Tests that the LogParser.parse_raw_log method works properly for
iOS.
"""
with open('tests/inputs/test_parse_ios_raw.json') as test_case_file:
test_cases = json.load(test_case_file, object_hook=_test_case_parser)
for test_case in test_cases:
test = test_case['input']
expected_result = test_case['expectedResult']
result = parsing_lib.LogParser.parse_raw_log(test, 'iOS')
assert result == expected_result
def test_convert_line_to_html():
"""Tests that the LogParser.convert_line_to_html works properly"""
current_year = datetime.now().year
tests = [
{
'time': datetime(current_year, 5, 22, 11, 44, 31, 180000),
'processId': '7080',
'threadId': '7080',
'logType': 'Info',
'tag': 'WiDB Example',
'text': 'aX: 3.0262709 aY: 2.0685902',
},
{
'time': datetime(current_year, 5, 22, 11, 44, 32, 191000),
'processId': '7080',
'threadId': '7080',
'logType': 'Warning',
'tag': 'IInputConnectionWrapper',
'text': 'getTextBeforeCursor on inactive InputConnection',
},
]
expected_results = [[
'<tr class="">',
' <td>%s</td>' % datetime(current_year, 5, 22, 11, 44, 31, 180000),
' <td>WiDB Example</td>',
' <td>Info</td>',
' <td>aX: 3.0262709 aY: 2.0685902</td>',
'</tr>',
], [
'<tr class="warning">',
' <td>%s</td>' % datetime(current_year, 5, 22, 11, 44, 32, 191000),
' <td>IInputConnectionWrapper</td>',
' <td>Warning</td>',
' <td>getTextBeforeCursor on inactive InputConnection</td>',
'</tr>',
]]
for test, expected_result in zip(tests, expected_results):
html = parsing_lib.LogParser.convert_line_to_html(test)
html = html.replace(' ', '').replace('\n', '')
expected_result = ''.join(expected_result)
expected_result = expected_result.replace(' ', '').replace('\n', '')
assert html == expected_result
def test_convert_to_html():
"""Tests that LogParser.convert_to_html works properly"""
current_year = datetime.now().year
tests = [[{
'time': datetime(current_year, 5, 22, 11, 44, 31, 180000),
'logType': 'Info',
'tag': 'WiDB Example',
'text': 'aX: 3.0262709 aY: 2.0685902',
}, {
'time': datetime(current_year, 5, 22, 11, 44, 32, 191000),
'processId': '7080',
'threadId': '7080',
'logType': 'Warning',
'tag': 'IInputConnectionWrapper',
'text': 'getTextBeforeCursor on inactive InputConnection',
}, {
'time': datetime(current_year, 5, 24, 12, 12, 49, 247000),
'logType': 'Error',
'tag': 'AndroidRuntime',
'text': ''.join([
'FATAL EXCEPTION: main',
'Process: com.google.wireless.debugging, PID: 23930',
'java.lang.RuntimeException: Forced Crash',
'at com.google.wireless.debugging.example.MainFragment$2.onClick(MainFragment.java:73)',
'at android.view.View.performClick(View.java:4445)',
'at android.view.View$PerformClick.run(View.java:18446)',
'at android.os.Handler.handleCallback(Handler.java:733)',
'at android.os.Handler.dispatchMessage(Handler.java:95)',
'at android.os.Looper.loop(Looper.java:136)',
'at android.app.ActivityThread.main(ActivityThread.java:5146)',
'at java.lang.reflect.Method.invokeNative(Native Method)',
'at java.lang.reflect.Method.invoke(Method.java:515)',
'at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:796)',
'at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:612)',
'at dalvik.system.NativeStart.main(Native Method)'
]),
}]]
expected_results = [[
'<tr class="">',
' <td>%s</td>' % datetime(current_year, 5, 22, 11, 44, 31, 180000),
' <td>WiDB Example</td>',
' <td>Info</td>',
' <td>aX: 3.0262709 aY: 2.0685902</td>',
'</tr>',
'<tr class="warning">',
' <td>%s</td>' % datetime(current_year, 5, 22, 11, 44, 32, 191000),
' <td>IInputConnectionWrapper</td>',
' <td>Warning</td>',
' <td>getTextBeforeCursor on inactive InputConnection</td>',
'</tr>',
'<tr class="danger">',
' <td>%s</td>' % datetime(current_year, 5, 24, 12, 12, 49, 247000),
' <td>AndroidRuntime</td>',
' <td>Error</td>',
' <td>FATAL EXCEPTION: main',
' Process: com.google.wireless.debugging, PID: 23930',
' java.lang.RuntimeException: Forced Crash',
' at com.google.wireless.debugging.example.MainFragment$2.onClick(MainFragment.java:73)',
' at android.view.View.performClick(View.java:4445)',
' at android.view.View$PerformClick.run(View.java:18446)',
' at android.os.Handler.handleCallback(Handler.java:733)',
' at android.os.Handler.dispatchMessage(Handler.java:95)',
' at android.os.Looper.loop(Looper.java:136)',
' at android.app.ActivityThread.main(ActivityThread.java:5146)',
' at java.lang.reflect.Method.invokeNative(Native Method)',
' at java.lang.reflect.Method.invoke(Method.java:515)',
' at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:796)',
' at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:612)',
' at dalvik.system.NativeStart.main(Native Method)',
' </td>',
'</tr>',
]]
for test, expected_result in zip(tests, expected_results):
html = parsing_lib.LogParser.convert_to_html(test)
html = html.replace(' ', '').replace('\n', '')
expected_result = ''.join(expected_result)
expected_result = expected_result.replace(' ', '').replace('\n', '')
assert html == expected_result
|
|
# The absolute import feature is required so that we get the root celery
# module rather than `amo.celery`.
from __future__ import absolute_import
from inspect import isclass
from celery.datastructures import AttributeDict
from django.utils.translation import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
# TODO(davedash): Log these types when pages are present
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
# TODO(davedash): Log these types when pages are present
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Super review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
editor_review_action = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
editor_review_action = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0.name}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {review} for {addon} deleted.')
editor_format = _(u'{user} deleted {review} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class PAYPAL_FAILED(_LOG):
id = 51
format = _(u'{addon} failed checks with PayPal.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} disabled.')
short = _(u'App disabled')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
# TODO(robhudson): Escalation log for editor escalation..
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
u'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
# L10n: {0} is the status
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
# L10n: {0} is the status
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
class ADDON_UNLISTED(_LOG):
id = 128
format = _(u'{addon} unlisted.')
keep = True
class BETA_SIGNED_VALIDATION_PASSED(_LOG):
id = 131
format = _(u'{file} was signed.')
keep = True
class BETA_SIGNED_VALIDATION_FAILED(_LOG):
id = 132
format = _(u'{file} was signed.')
keep = True
class DELETE_ADDON(_LOG):
id = 133
action_class = 'delete'
# L10n: {0} is the add-on GUID.
format = _(u'Addon id {0} with GUID {1} has been deleted')
keep = True
class EXPERIMENT_SIGNED(_LOG):
id = 134
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIGNED_VALIDATION_PASSED(_LOG):
id = 135
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIGNED_VALIDATION_FAILED(_LOG):
id = 136
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIDELOAD_SIGNED_VALIDATION_PASSED(_LOG):
id = 137
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIDELOAD_SIGNED_VALIDATION_FAILED(_LOG):
id = 138
format = _(u'{file} was signed.')
keep = True
class PRELIMINARY_ADDON_MIGRATED(_LOG):
id = 139
format = _(u'{addon} migrated from preliminary.')
keep = True
review_queue = True
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
# Make sure there's no duplicate IDs.
assert len(LOGS) == len(set(log.id for log in LOGS))
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
LOG_EDITOR_REVIEW_ACTION = [
l.id for l in LOGS if hasattr(l, 'editor_review_action')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False) or
l.id in LOG_ADMINS)]
# Review Queue logs to show to developer (i.e. hiding admin/private)
LOG_REVIEW_QUEUE_DEVELOPER = list(set(LOG_EDITOR_REVIEW_ACTION) -
set(LOG_HIDE_DEVELOPER))
def log(action, *args, **kw):
"""
e.g. amo.log(amo.LOG.CREATE_ADDON, []),
amo.log(amo.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from olympia.access.models import Group
from olympia.addons.models import Addon
from olympia.amo import get_user, logger_log
from olympia.devhub.models import (
ActivityLog, AddonLog, CommentLog, GroupLog, UserLog, VersionLog)
from olympia.users.models import UserProfile
from olympia.versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Addon:
AddonLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
elif isinstance(arg, Addon):
AddonLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
|
|
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Merges two android manifest xml files."""
import re
import sys
import xml.dom.minidom
from tools.android import android_permissions
from third_party.py import gflags
FLAGS = gflags.FLAGS
EXCLUDE_ALL_ARG = 'all'
gflags.DEFINE_multistring(
'exclude_permission', None,
'Permissions to be excluded, e.g.: "android.permission.READ_LOGS".'
'This is a multistring, so multiple of those flags can be provided.'
'Pass "%s" to exclude all permissions contributed by mergees.'
% EXCLUDE_ALL_ARG)
gflags.DEFINE_multistring(
'mergee', None,
'Mergee manifest that will be merged to merger manifest.'
'This is a multistring, so multiple of those flags can be provided.')
gflags.DEFINE_string('merger', None,
'Merger AndroidManifest file to be merged.')
gflags.DEFINE_string('output', None, 'Output file with merged manifests.')
USAGE = """Error, invalid arguments.
Usage: merge_manifests.py --merger=<merger> --mergee=<mergee1> --mergee=<merge2>
--exclude_permission=[Exclude permissions from mergee] --output=<output>
Examples:
merge_manifests.py --merger=manifest.xml --mergee=manifest2.xml
--mergee=manifest3.xml --exclude_permission=android.permission.READ_LOGS
--output=AndroidManifest.xml
merge_manifests.py --merger=manifest.xml --mergee=manifest2.xml
--mergee=manifest3.xml --exclude_permission=%s
--output=AndroidManifest.xml
""" % EXCLUDE_ALL_ARG
class UndefinedPlaceholderException(Exception):
"""Exception thrown when encountering a placeholder without a replacement.
"""
pass
class MalformedManifestException(Exception):
"""Exception thrown when encountering a fatally malformed manifest.
"""
pass
class MergeManifests(object):
"""A utility class for merging two android manifest.xml files.
This is useful when including another app as android library.
"""
_ACTIVITY = 'activity'
_ANDROID_NAME = 'android:name'
_ANDROID_LABEL = 'android:label'
_INTENT_FILTER = 'intent-filter'
_MANIFEST = 'manifest'
_USES_PERMISSION = 'uses-permission'
_USES_PERMISSION_SDK_23 = 'uses-permission-sdk-23'
_NODES_TO_COPY_FROM_MERGEE = {
_MANIFEST: [
'instrumentation',
'permission',
_USES_PERMISSION,
_USES_PERMISSION_SDK_23,
'uses-feature',
'permission-group',
],
'application': [
'activity',
'activity-alias',
'provider',
'receiver',
'service',
'uses-library',
'meta-data',
],
}
_NODES_TO_REMOVE_FROM_MERGER = []
_PACKAGE = 'package'
def __init__(self, merger, mergees, exclude_permissions=None):
"""Constructs and initializes the MergeManifests object.
Args:
merger: First (merger) AndroidManifest.xml string.
mergees: mergee AndroidManifest.xml strings, a list.
exclude_permissions: Permissions to be excludeed from merging,
string list. "all" means don't include any permissions.
"""
self._merger = merger
self._mergees = mergees
self._exclude_permissions = exclude_permissions
self._merger_dom = xml.dom.minidom.parseString(self._merger[0])
def _ApplyExcludePermissions(self, dom):
"""Apply exclude filters.
Args:
dom: Document dom object from which to exclude permissions.
"""
if self._exclude_permissions:
exclude_all_permissions = EXCLUDE_ALL_ARG in self._exclude_permissions
for element in (dom.getElementsByTagName(self._USES_PERMISSION) +
dom.getElementsByTagName(self._USES_PERMISSION_SDK_23)):
if element.hasAttribute(self._ANDROID_NAME):
attrib = element.getAttribute(self._ANDROID_NAME)
if exclude_all_permissions or attrib in self._exclude_permissions:
element.parentNode.removeChild(element)
def _ExpandPackageName(self, node):
"""Set the package name if it is in a short form.
Filtering logic for what elements have package expansion:
If the name starts with a dot, always prefix it with the package.
If the name has a dot anywhere else, do not prefix it.
If the name has no dot at all, also prefix it with the package.
The massageManifest function shows where this rule is applied:
In the application element, on the name and backupAgent attributes.
In the activity, service, receiver, provider, and activity-alias elements,
on the name attribute.
In the activity-alias element, on the targetActivity attribute.
Args:
node: Xml Node for which to expand package name.
"""
package_name = node.getElementsByTagName(self._MANIFEST).item(
0).getAttribute(self._PACKAGE)
if not package_name:
return
for element in node.getElementsByTagName('*'):
if element.nodeName not in [
'activity',
'activity-alias',
'application',
'service',
'receiver',
'provider',
]:
continue
self._ExpandPackageNameHelper(package_name, element, self._ANDROID_NAME)
if element.nodeName == 'activity':
self._ExpandPackageNameHelper(package_name, element,
'android:parentActivityName')
if element.nodeName == 'activity-alias':
self._ExpandPackageNameHelper(package_name, element,
'android:targetActivity')
continue
if element.nodeName == 'application':
self._ExpandPackageNameHelper(package_name, element,
'android:backupAgent')
def _ExpandPackageNameHelper(self, package_name, element, attribute_name):
if element.hasAttribute(attribute_name):
class_name = element.getAttribute(attribute_name)
if class_name.startswith('.'):
pass
elif '.' not in class_name:
class_name = '.' + class_name
else:
return
element.setAttribute(attribute_name, package_name + class_name)
def _RemoveFromMerger(self):
"""Remove from merger."""
for tag_name in self._NODES_TO_REMOVE_FROM_MERGER:
elements = self._merger_dom.getElementsByTagName(tag_name)
for element in elements:
element.parentNode.removeChild(element)
def _RemoveAndroidLabel(self, node):
"""Remove android:label.
We do this because it is not required by merger manifest,
and it might contain @string references that will not allow compilation.
Args:
node: Node for which to remove Android labels.
"""
if node.hasAttribute(self._ANDROID_LABEL):
node.removeAttribute(self._ANDROID_LABEL)
def _IsDuplicate(self, node_to_copy, node):
"""Is element a duplicate?"""
for merger_node in self._merger_dom.getElementsByTagName(node_to_copy):
if (merger_node.getAttribute(self._ANDROID_NAME) ==
node.getAttribute(self._ANDROID_NAME)):
return True
return False
def _RemoveIntentFilters(self, node):
"""Remove intent-filter in activity element.
So there are no duplicate apps.
Args:
node: Node for which to remove intent filters.
"""
intent_filters = node.getElementsByTagName(self._INTENT_FILTER)
if intent_filters.length > 0:
for sub_node in intent_filters:
node.removeChild(sub_node)
def _FindElementComment(self, node):
"""Find element's comment.
Assumes that element's comment can be just above the element.
Searches previous siblings and looks for the first non text element
that is of a nodeType of comment node.
Args:
node: Node for which to find a comment.
Returns:
Elements's comment node, None if not found.
"""
while node.previousSibling:
node = node.previousSibling
if node.nodeType is node.COMMENT_NODE:
return node
if node.nodeType is not node.TEXT_NODE:
return None
return None
def _ReplaceArgumentPlaceholders(self, dom):
"""Replaces argument placeholders with their values.
Modifies the attribute values of the input node.
Args:
dom: Xml node that should get placeholders replaced.
"""
placeholders = {
'packageName': self._merger_dom.getElementsByTagName(
self._MANIFEST).item(0).getAttribute(self._PACKAGE),
}
for element in dom.getElementsByTagName('*'):
for i in range(element.attributes.length):
attr = element.attributes.item(i)
attr.value = self._ReplaceArgumentHelper(placeholders, attr.value)
def _ReplaceArgumentHelper(self, placeholders, attr):
"""Replaces argument placeholders within a single string.
Args:
placeholders: A dict mapping between placeholder names and their
replacement values.
attr: A string in which to replace argument placeholders.
Returns:
A string with placeholders replaced, or the same string if no placeholders
were found.
"""
match_placeholder = '\\${([a-zA-Z]*)}'
# Returns the replacement string for found matches.
def PlaceholderReplacer(matchobj):
found_placeholder = matchobj.group(1)
if found_placeholder not in placeholders:
raise UndefinedPlaceholderException(
'Undefined placeholder when substituting arguments: '
+ found_placeholder)
return placeholders[found_placeholder]
attr = re.sub(match_placeholder, PlaceholderReplacer, attr)
return attr
def _SortAliases(self):
applications = self._merger_dom.getElementsByTagName('application')
if not applications:
return
for alias in applications[0].getElementsByTagName('activity-alias'):
comment_node = self._FindElementComment(alias)
while comment_node is not None:
applications[0].appendChild(comment_node)
comment_node = self._FindElementComment(alias)
applications[0].appendChild(alias)
def _FindMergerParent(self, tag_to_copy, destination_tag_name, mergee_dom):
"""Finds merger parent node, or appends mergee equivalent node if none."""
# Merger parent element to which to add merged elements.
if self._merger_dom.getElementsByTagName(destination_tag_name):
return self._merger_dom.getElementsByTagName(destination_tag_name)[0]
else:
mergee_element = mergee_dom.getElementsByTagName(destination_tag_name)[0]
# find the parent
parents = self._merger_dom.getElementsByTagName(
mergee_element.parentNode.tagName)
if not parents:
raise MalformedManifestException(
'Malformed manifest has tag %s but no parent tag %s',
(tag_to_copy, destination_tag_name))
# append the mergee child as the first child.
return parents[0].insertBefore(mergee_element, parents[0].firstChild)
def _OrderManifestChildren(self):
"""Moves elements of the manifest tag into the correct order."""
manifest = self._merger_dom.getElementsByTagName('manifest')[0]
# The application element must be the last element in the manifest tag.
applications = self._merger_dom.getElementsByTagName('application')
if applications:
manifest.appendChild(applications[0])
def _MergeTopLevelNamespaces(self, mergee_dom):
"""Merge the xmlns declarations in the top-level manifest nodes.
This does not handle and ignores xmlns declarations in child nodes.
Overall, this manifest merger does not try to interpret any attributes that
use the android "tools" namespace either. E.g., tools:node="remove".
This functionality is just to help migrate from this manifest merger,
to a new manifest merger that does handle tools annotations (a manifest
may be sent to both mergers during migration).
Args:
mergee_dom: The dom of the mergee manifest.
Raises:
MalformedManifestException: if the mergee and merger manifests contain
xmlns declarations that don't agree.
"""
manifest = self._merger_dom.getElementsByTagName('manifest')[0]
mergee_manifest = mergee_dom.getElementsByTagName('manifest')[0]
for i in range(mergee_manifest.attributes.length):
attr = mergee_manifest.attributes.item(i)
if attr.prefix and attr.prefix == 'xmlns':
if manifest.hasAttribute(attr.name):
main_attr_value = manifest.getAttribute(attr.name)
if main_attr_value != attr.value:
raise MalformedManifestException(
'different values for namespace %s ("%s" vs "%s")' % (
attr.name, main_attr_value, attr.value))
else:
manifest.setAttribute(attr.name, attr.value)
def Merge(self):
"""Takes two manifests, and merges them together to produce a third."""
self._RemoveFromMerger()
self._ExpandPackageName(self._merger_dom)
for dom, filename in self._mergees:
mergee_dom = xml.dom.minidom.parseString(dom)
self._ReplaceArgumentPlaceholders(mergee_dom)
self._ExpandPackageName(mergee_dom)
self._ApplyExcludePermissions(mergee_dom)
self._MergeTopLevelNamespaces(mergee_dom)
for destination, values in sorted(
self._NODES_TO_COPY_FROM_MERGEE.iteritems()):
for node_to_copy in values:
for node in mergee_dom.getElementsByTagName(node_to_copy):
if self._IsDuplicate(node_to_copy, node):
continue
merger_parent = self._FindMergerParent(node_to_copy,
destination,
mergee_dom)
# Append the merge comment.
merger_parent.appendChild(
self._merger_dom.createComment(' Merged from file: %s ' %
filename))
# Append mergee's comment, if present.
comment_node = self._FindElementComment(node)
if comment_node:
merger_parent.appendChild(comment_node)
# Append element from mergee to merger.
merger_parent.appendChild(node)
# Insert top level comment about the merge.
top_comment = (
' *** WARNING *** DO NOT EDIT! THIS IS GENERATED MANIFEST BY '
'MERGE_MANIFEST TOOL.\n'
' Merger manifest:\n %s\n' % self._merger[1] +
' Mergee manifests:\n%s' % '\n'.join(
[' %s' % mergee[1] for mergee in self._mergees]) +
'\n ')
manifest_element = self._merger_dom.getElementsByTagName('manifest')[0]
manifest_element.insertBefore(self._merger_dom.createComment(top_comment),
manifest_element.firstChild)
self._SortAliases()
self._OrderManifestChildren()
return self._merger_dom.toprettyxml(indent=' ')
def _ReadFiles(files):
results = []
for file_name in files:
results.append(_ReadFile(file_name))
return results
def _ReadFile(file_name):
with open(file_name, 'r') as my_file:
return (my_file.read(), file_name,)
def _ValidateAndWarnPermissions(exclude_permissions):
unknown_permissions = (
set(exclude_permissions)
- set([EXCLUDE_ALL_ARG])
- android_permissions.PERMISSIONS)
return '\n'.join([
'WARNING:\n\t Specified permission "%s" is not a standard permission. '
'Is it a typo?' % perm for perm in unknown_permissions])
def main():
if not FLAGS.merger:
raise RuntimeError('Missing merger value.\n' + USAGE)
if len(FLAGS.mergee) < 1:
raise RuntimeError('Missing mergee value.\n' + USAGE)
if not FLAGS.output:
raise RuntimeError('Missing output value.\n' + USAGE)
if FLAGS.exclude_permission:
warning = _ValidateAndWarnPermissions(FLAGS.exclude_permission)
if warning:
print warning
merged_manifests = MergeManifests(_ReadFile(FLAGS.merger),
_ReadFiles(FLAGS.mergee),
FLAGS.exclude_permission
).Merge()
with open(FLAGS.output, 'w') as out_file:
for line in merged_manifests.split('\n'):
if not line.strip():
continue
out_file.write(line.encode('utf8') + '\n')
if __name__ == '__main__':
FLAGS(sys.argv)
main()
|
|
import time
import collections
import traceback
import os
import sys
import types
from hotspotter.other.AbstractPrintable import AbstractPrintable
from PyQt4.Qt import QObject
from PyQt4.QtGui import QMessageBox
from PyQt4.QtCore import Qt
#import os
#os.spawnl(os.P_DETACH, 'some_log_running_command')
# Have this happen.^^
# http://stackoverflow.com/questions/1196074/starting-a-background-process-in-python
## Embaressingly parallel stuff can use this
#import subprocess
#subprocess.Popen(["rm","-r","some.file"])
#
#
class CallInfoObject(AbstractPrintable):
def __init__(self, module, line, func, indent, calltype, prefix):
super(CallInfoObject,self).__init__()
self.module = module
self.line = line
self.func = func
self.indent = indent
self.calltype = calltype
self.prefix = prefix
#---------------
debug_blacklist = '''
helpers.dircheck
_hsPrefs
ChipManager.add_chip
ImageManager.add_img
NameManager.add_name
alloc
IOManager.__
_hsGlobals.x2_info
'''
#---------------
def callinfo(num_up=2):
'finds how much to indent'
stack_list = traceback.extract_stack()
indent = 0
calltype = ' '
for stack_tup in reversed(stack_list):
if stack_tup[2] == 'start_event_loop_qt4':
calltype = 'gui'
break
elif stack_tup[2] == 'execfile':
calltype = 'ini'
break
elif stack_tup[2] == 'run_code':
calltype = 'ipy'
break
elif stack_tup[0] == 'main.py':
calltype = 'cmd'
break
indent += 1
#print stack_list
caller_tup = stack_list[ -num_up ]
modul_name = os.path.split(caller_tup[0])[-1].replace('.py','')
line_num = caller_tup[1]
func_name = caller_tup[2]
#prefix = modul_name+'.'+func_name+'('+str(line_num)+')' # WITH LINE
prefix = modul_name+'.'+func_name # WITHOUT LINE
return CallInfoObject(modul_name, line_num, func_name, indent-num_up, calltype, prefix)
#---------------
class HotSpotterLogger(object):
def enable_global_logs(hsl):
logfile = os.path.realpath('hotspotter_global_logs.log')
print('Enableing active logging to file: ' + logfile)
hsl.global_logs_on = True
if hsl.global_logs_on:
hsl.global_log_file = open(logfile,'a')
def __init__(hsl):
hsl.error_num = 0
hsl.global_logs_on = False
hsl.logged_lines = collections.deque(maxlen=2000)
hsl.debug_blacklist = debug_blacklist
hsl.cursor_x = 0
hsl.cursor = '/-\|'
hsl.kirby = \
['(>\'-\')> %4d',
'<(\'-\'<) %4d',
'^(\'- \')^ %4d',
'<(\'-\'<) %4d']
hsl.delete_kirby = '\b'*13
hsl.prev_time = time.time()
hsl.non_modal_qt_handles = []
def non_modal_critical_dialog(hsl, title, msg):
try:
# Make a non modal critical QMessageBox
msgBox = QMessageBox( None );
msgBox.setAttribute( Qt.WA_DeleteOnClose )
msgBox.setStandardButtons( QMessageBox.Ok )
msgBox.setWindowTitle( title )
msgBox.setText( msg )
msgBox.setModal( False )
msgBox.open( msgBox.close )
msgBox.show()
hsl.non_modal_qt_handles.append(msgBox)
# Old Modal Version: QMessageBox.critical(None, 'ERROR', msg)
except Exception as ex:
print('non_modal_critical_dialog: '+str(ex))
def __str__(hsl):
return hsl.hidden_logs()
def hidden_logs(hsl, use_blacklist_bit=True):
logged_hidden = ''
for line in hsl.logged_lines:
if (not use_blacklist_bit) or all( [len(bl_) == 0 or line.find(bl_) == -1 for bl_ in debug_blacklist.splitlines()] ):
logged_hidden += line + '\n'
return logged_hidden
def log(hsl, msg, noprint=False, noformat=False):
if noformat:
fmted_msg = msg
else:
info = callinfo(4) # Get Prefix Info
indent_str = info.calltype+'. '*info.indent
# Format Info
indent_pfx = indent_str+info.prefix
indent_msg = str(msg).replace('\n', '\n'+(' '*len(indent_str)))
fmted_msg = indent_pfx+': '+indent_msg
hsl.logged_lines.append(fmted_msg)
if hsl.global_logs_on: # Log to global logs
hsl.global_log_file.write(fmted_msg+'\n')
if noprint: # If not print show there is progress
since_last = time.time() - hsl.prev_time
time_thresh = 1
if since_last > time_thresh:
if hsl.cursor_x == -1:
pass
elif hsl.cursor_x == 0:
# Write Kirby
#sys.stdout.write('Working: '+(hsl.kirby[hsl.cursor_x % 4] % hsl.cursor_x))
#sys.stdout.flush()
pass
else:
# Write Kirby
#sys.stdout.write(hsl.delete_kirby+(hsl.kirby[hsl.cursor_x % 4] % hsl.cursor_x))
pass
#sys.stdout.flush()
hsl.cursor_x += 1
hsl.prev_time = time.time()
else:
if hsl.cursor_x > 0:
fmted_msg = '\n'+fmted_msg
hsl.cursor_x = -1
#sys.stdout.write(fmted_msg+'\n')
print(msg)
hsl.prev_time = time.time()
hsl = HotSpotterLogger()
class FuncLogException(Exception):
'FuncLog Exceptsions have already been handled by the wrapper and are benign'
def __init__(self, value):
sys.stdout.flush(); sys.stderr.flush()
self.value = value
def __str__(self):
return str(self.value)
class LogErrorException(Exception):
def __init__(self, error_num=-1):
self.error_num = error_num
def __str__(self):
return str('<LogError Num '+str(self.error_num)+'>')
def logwarn(msg):
hsl.log('<WARN START **************************')
hsl.log('<WARNING-TRACEBACK> '+traceback.format_exc())
hsl.log('<WARNING> '+msg)
hsl.log('WARN END **************************>')
sys.stdout.flush(); sys.stderr.flush()
def logerr(msg=None):
error_num = hsl.error_num
hsl.error_num += 1
hsl.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
hsl.log('<ERROR Number %d>' % error_num)
hsl.log('\n\n *!* HotSpotter Raised Exception: %s \n' % str(msg))
hsl.log('<ERROR Number %d>' % error_num)
hsl.log('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
#hsl.log('\n\n *!* HotSpotter Exception Traceback: \n'+traceback.format_exc())
sys.stdout.flush(); sys.stderr.flush()
hsl.non_modal_critical_dialog('ERROR #%d' % error_num, msg)
raise LogErrorException(error_num)
def logmsg(msg):
hsl.log(msg)
def logdbg(msg):
hsl.log('> '+msg, noprint=True)
def logio(msg):
hsl.log('IO> '+msg, noprint=True)
def func_debug(fn):
import traceback
def func_debug_wraper(*args, **kwargs):
print('\n\n *!!* Func Debug Traceback: \n\n\n'+str(traceback.format_stack()))
logdbg('\n\n *!!* Func Debug Traceback: \n\n'+str(traceback.format_stack()))
return fn(*args, **kwargs)
return func_debug_wraper
def func_log(fn):
def func_log_wraper(*args, **kwargs):
# -- Format Logs
# -- Arg Info
argnames = fn.func_code.co_varnames[:fn.func_code.co_argcount]
argprint = ''
arg_length_cutoff = 100
if len(args) > 0 and\
isinstance(args[0], AbstractPrintable) or\
isinstance(args[0], QObject):
#argprint = ', '.join( '%s=%r' % entry for entry in zip(argnames[1:],args[1:]) + kwargs.items())
arg_rep = lambda argstr: argstr if len(argstr) < arg_length_cutoff else '...'
arg_iter = iter(zip(argnames[1:],args[1:]) + kwargs.items())
argprint = ', '.join( var+'='+arg_rep(repr(val)) for (var,val) in arg_iter)
else:
arg_rep = lambda argstr: argstr if len(argstr) < arg_length_cutoff else '...'
arg_iter = iter(zip(argnames,args) + kwargs.items())
argprint = ', '.join( var+'='+arg_rep(repr(val)) for (var,val) in arg_iter)
# -- Module / Line Info
info = callinfo(3)
module = str(fn.func_code)
module = module[max(module.find('\\'), module.rfind('/'))+1:module.rfind('.py')]
align_space = 80
function_name = fn.func_name
#if info.indent < 1: # Hack to make kirby not appear every time you do anything
# logmsg(module+'.'+function_name+'('+argprint+')')
into_str = 'In '+module+'.'+function_name+'('+argprint+')'
outo_str = 'Out '+module+'.'+function_name+'('+argprint+')'#+'\n'
indent1 = '> '*info.indent
prefix_sep = '--'
fill_length = max(0,align_space-len(indent1)-len(prefix_sep))
indent2 = ' '*fill_length
prefixIN = info.calltype+indent1+prefix_sep+indent2
indent1OUT = ' '*len(indent1)
indent2OUT = indent2.replace(' ',' <')
prefixOUT = info.calltype+indent1OUT+prefix_sep+indent2OUT
# -- Log Enter Function
hsl.log(prefixIN+into_str, noprint=True, noformat=True)
# -- Run Function
ret = None
try:
ret = fn(*args, **kwargs)
except FuncLogException as ex:
logdbg('Caught FuncLog-Exception: '+str(ex))
except LogErrorException as ex:
logdbg('Caught LogError-Exception: '+str(ex))
et, ei, tb = sys.exc_info()
#et, ei, tb = sys.exc_info()
#raise FuncLogException, FuncLogException(e), tb
except Exception as ex:
logmsg('\n\n *!!* HotSpotter Logger Raised Exception: '+str(ex))
logmsg('\n\n *!!* HotSpotter Logger Exception Traceback: \n\n'+traceback.format_exc())
sys.stdout.flush()
et, ei, tb = sys.exc_info()
#raise FuncLogException, FuncLogException(e), tb
# --- Log Exit Function
ret_str = ' returned '+str(ret)
hsl.log(prefixOUT+outo_str+ret_str, noprint=True, noformat=True)
if info.indent < 1:
hsl.log('\n\n', noprint=True, noformat=True)
sys.stdout.flush(); sys.stderr.flush()
return ret
func_log_wraper.__name__ = fn.__name__
#func_log_wraper.__doc__ = fn.__doc__
#func_log_wraper.__dict__.update(fn.__dict__)
return func_log_wraper
|
|
"""
Unified interfaces to minimization algorithms.
Functions
---------
- minimize : minimization of a function of several variables.
- minimize_scalar : minimization of a function of one variable.
"""
__all__ = ['minimize', 'minimize_scalar']
from warnings import warn
import numpy as np
# unconstrained minimization
from ._optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg,
_minimize_bfgs, _minimize_newtoncg,
_minimize_scalar_brent, _minimize_scalar_bounded,
_minimize_scalar_golden, MemoizeJac)
from ._trustregion_dogleg import _minimize_dogleg
from ._trustregion_ncg import _minimize_trust_ncg
from ._trustregion_krylov import _minimize_trust_krylov
from ._trustregion_exact import _minimize_trustregion_exact
from ._trustregion_constr import _minimize_trustregion_constr
# constrained minimization
from ._lbfgsb_py import _minimize_lbfgsb
from ._tnc import _minimize_tnc
from ._cobyla_py import _minimize_cobyla
from ._slsqp_py import _minimize_slsqp
from ._constraints import (old_bound_to_new, new_bounds_to_old,
old_constraint_to_new, new_constraint_to_old,
NonlinearConstraint, LinearConstraint, Bounds)
from ._differentiable_functions import FD_METHODS
MINIMIZE_METHODS = ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg',
'l-bfgs-b', 'tnc', 'cobyla', 'slsqp', 'trust-constr',
'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']
MINIMIZE_SCALAR_METHODS = ['brent', 'bounded', 'golden']
def minimize(fun, x0, args=(), method=None, jac=None, hess=None,
hessp=None, bounds=None, constraints=(), tol=None,
callback=None, options=None):
"""Minimization of scalar function of one or more variables.
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is an 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where ``n`` is the number of independent variables.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` and `hess` functions).
method : str or callable, optional
Type of solver. Should be one of
- 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>`
- 'Powell' :ref:`(see here) <optimize.minimize-powell>`
- 'CG' :ref:`(see here) <optimize.minimize-cg>`
- 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>`
- 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>`
- 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>`
- 'TNC' :ref:`(see here) <optimize.minimize-tnc>`
- 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>`
- 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>`
- 'trust-constr':ref:`(see here) <optimize.minimize-trustconstr>`
- 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>`
- 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>`
- 'trust-exact' :ref:`(see here) <optimize.minimize-trustexact>`
- 'trust-krylov' :ref:`(see here) <optimize.minimize-trustkrylov>`
- custom - a callable object (added in version 0.14.0),
see below for description.
If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``,
depending on whether or not the problem has constraints or bounds.
jac : {callable, '2-point', '3-point', 'cs', bool}, optional
Method for computing the gradient vector. Only for CG, BFGS,
Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg, trust-krylov,
trust-exact and trust-constr.
If it is a callable, it should be a function that returns the gradient
vector:
``jac(x, *args) -> array_like, shape (n,)``
where ``x`` is an array with shape (n,) and ``args`` is a tuple with
the fixed parameters. If `jac` is a Boolean and is True, `fun` is
assumed to return a tuple ``(f, g)`` containing the objective
function and the gradient.
Methods 'Newton-CG', 'trust-ncg', 'dogleg', 'trust-exact', and
'trust-krylov' require that either a callable be supplied, or that
`fun` return the objective and gradient.
If None or False, the gradient will be estimated using 2-point finite
difference estimation with an absolute step size.
Alternatively, the keywords {'2-point', '3-point', 'cs'} can be used
to select a finite difference scheme for numerical estimation of the
gradient with a relative step size. These finite difference schemes
obey any specified `bounds`.
hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy}, optional
Method for computing the Hessian matrix. Only for Newton-CG, dogleg,
trust-ncg, trust-krylov, trust-exact and trust-constr.
If it is callable, it should return the Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
where ``x`` is a (n,) ndarray and ``args`` is a tuple with the fixed
parameters.
The keywords {'2-point', '3-point', 'cs'} can also be used to select
a finite difference scheme for numerical estimation of the hessian.
Alternatively, objects implementing the `HessianUpdateStrategy`
interface can be used to approximate the Hessian. Available
quasi-Newton methods implementing this interface are:
- `BFGS`;
- `SR1`.
Not all of the options are available for each of the methods; for
availability refer to the notes.
hessp : callable, optional
Hessian of objective function times an arbitrary vector p. Only for
Newton-CG, trust-ncg, trust-krylov, trust-constr.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. `hessp` must compute the
Hessian times an arbitrary vector:
``hessp(x, p, *args) -> ndarray shape (n,)``
where ``x`` is a (n,) ndarray, ``p`` is an arbitrary vector with
dimension (n,) and ``args`` is a tuple with the fixed
parameters.
bounds : sequence or `Bounds`, optional
Bounds on variables for Nelder-Mead, L-BFGS-B, TNC, SLSQP, Powell, and
trust-constr methods. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
constraints : {Constraint, dict} or List of {Constraint, dict}, optional
Constraints definition. Only for COBYLA, SLSQP and trust-constr.
Constraints for 'trust-constr' are defined as a single object or a
list of objects specifying constraints to the optimization problem.
Available constraints are:
- `LinearConstraint`
- `NonlinearConstraint`
Constraints for COBYLA, SLSQP are defined as a list of dictionaries.
Each dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. When `tol` is specified, the selected
minimization algorithm sets some relevant solver-specific tolerance(s)
equal to `tol`. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform. Depending on the
method each iteration may use several function evaluations.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options()`.
callback : callable, optional
Called after each iteration. For 'trust-constr' it is a callable with
the signature:
``callback(xk, OptimizeResult state) -> bool``
where ``xk`` is the current parameter vector. and ``state``
is an `OptimizeResult` object, with the same fields
as the ones from the return. If callback returns True
the algorithm execution is terminated.
For all the other methods, the signature is:
``callback(xk)``
where ``xk`` is the current parameter vector.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize_scalar : Interface to minimization algorithms for scalar
univariate functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *BFGS*.
**Unconstrained minimization**
Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate
gradient algorithm by Polak and Ribiere, a variant of the
Fletcher-Reeves method described in [5]_ pp.120-122. Only the
first derivatives are used.
Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton
method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_
pp. 136. It uses the first derivatives only. BFGS has proven good
performance even for non-smooth optimizations. This method also
returns an approximation of the Hessian inverse, stored as
`hess_inv` in the OptimizeResult object.
Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a
Newton-CG algorithm [5]_ pp. 168 (also known as the truncated
Newton method). It uses a CG method to the compute the search
direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm. Suitable for large-scale
problems.
Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg
trust-region algorithm [5]_ for unconstrained minimization. This
algorithm requires the gradient and Hessian; furthermore the
Hessian is required to be positive definite.
Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the
Newton conjugate gradient trust-region algorithm [5]_ for
unconstrained minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
Method :ref:`trust-krylov <optimize.minimize-trustkrylov>` uses
the Newton GLTR trust-region algorithm [14]_, [15]_ for unconstrained
minimization. This algorithm requires the gradient
and either the Hessian or a function that computes the product of
the Hessian with a given vector. Suitable for large-scale problems.
On indefinite problems it requires usually less iterations than the
`trust-ncg` method and is recommended for medium and large-scale problems.
Method :ref:`trust-exact <optimize.minimize-trustexact>`
is a trust-region method for unconstrained minimization in which
quadratic subproblems are solved almost exactly [13]_. This
algorithm requires the gradient and the Hessian (which is
*not* required to be positive definite). It is, in many
situations, the Newton method to converge in fewer iterations
and the most recommended for small and medium-size problems.
**Bound-Constrained minimization**
Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the
Simplex algorithm [1]_, [2]_. This algorithm is robust in many
applications. However, if numerical computation of derivative can be
trusted, other algorithms using the first and/or second derivatives
information might be preferred for their better performance in
general.
Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B
algorithm [6]_, [7]_ for bound constrained minimization.
Method :ref:`Powell <optimize.minimize-powell>` is a modification
of Powell's method [3]_, [4]_ which is a conjugate direction
method. It performs sequential one-dimensional minimizations along
each vector of the directions set (`direc` field in `options` and
`info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken. If bounds are not provided, then an
unbounded line search will be used. If bounds are provided and
the initial guess is within the bounds, then every function
evaluation throughout the minimization procedure will be within
the bounds. If bounds are provided, the initial guess is outside
the bounds, and `direc` is full rank (default has full rank), then
some function evaluations during the first iteration may be
outside the bounds, but every function evaluation after the first
iteration will be within the bounds. If `direc` is not full rank,
then some parameters may not be optimized and the solution is not
guaranteed to be within the bounds.
Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton
algorithm [5]_, [8]_ to minimize a function with variables subject
to bounds. This algorithm uses gradient information; it is also
called Newton Conjugate-Gradient. It differs from the *Newton-CG*
method described above as it wraps a C implementation and allows
each variable to be given upper and lower bounds.
**Constrained Minimization**
Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the
Constrained Optimization BY Linear Approximation (COBYLA) method
[9]_, [10]_, [11]_. The algorithm is based on linear
approximations to the objective function and each constraint. The
method wraps a FORTRAN implementation of the algorithm. The
constraints functions 'fun' may return either a single number
or an array or list of numbers.
Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential
Least SQuares Programming to minimize a function of several
variables with any combination of bounds, equality and inequality
constraints. The method wraps the SLSQP Optimization subroutine
originally implemented by Dieter Kraft [12]_. Note that the
wrapper handles infinite values in bounds by converting them into
large floating values.
Method :ref:`trust-constr <optimize.minimize-trustconstr>` is a
trust-region algorithm for constrained optimization. It swiches
between two implementations depending on the problem definition.
It is the most versatile constrained minimization algorithm
implemented in SciPy and the most appropriate for large-scale problems.
For equality constrained problems it is an implementation of Byrd-Omojokun
Trust-Region SQP method described in [17]_ and in [5]_, p. 549. When
inequality constraints are imposed as well, it swiches to the trust-region
interior point method described in [16]_. This interior point algorithm,
in turn, solves inequality constraints by introducing slack variables
and solving a sequence of equality-constrained barrier problems
for progressively smaller values of the barrier parameter.
The previously described equality constrained SQP method is
used to solve the subproblems with increasing levels of accuracy
as the iterate gets closer to a solution.
**Finite-Difference Options**
For Method :ref:`trust-constr <optimize.minimize-trustconstr>`
the gradient and the Hessian may be approximated using
three finite-difference schemes: {'2-point', '3-point', 'cs'}.
The scheme 'cs' is, potentially, the most accurate but it
requires the function to correctly handle complex inputs and to
be differentiable in the complex plane. The scheme '3-point' is more
accurate than '2-point' but requires twice as many operations. If the
gradient is estimated via finite-differences the Hessian must be
estimated using one of the quasi-Newton strategies.
**Method specific options for the** `hess` **keyword**
+--------------+------+----------+-------------------------+-----+
| method/Hess | None | callable | '2-point/'3-point'/'cs' | HUS |
+==============+======+==========+=========================+=====+
| Newton-CG | x | (n, n) | x | x |
| | | LO | | |
+--------------+------+----------+-------------------------+-----+
| dogleg | | (n, n) | | |
+--------------+------+----------+-------------------------+-----+
| trust-ncg | | (n, n) | x | x |
+--------------+------+----------+-------------------------+-----+
| trust-krylov | | (n, n) | x | x |
+--------------+------+----------+-------------------------+-----+
| trust-exact | | (n, n) | | |
+--------------+------+----------+-------------------------+-----+
| trust-constr | x | (n, n) | x | x |
| | | LO | | |
| | | sp | | |
+--------------+------+----------+-------------------------+-----+
where LO=LinearOperator, sp=Sparse matrix, HUS=HessianUpdateStrategy
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using a frontend to this method such as `scipy.optimize.basinhopping`
or a different library. You can simply pass a callable as the ``method``
parameter.
The callable is called as ``method(fun, x0, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `callback`, `hess`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. Also, if
`jac` has been passed as a bool type, `jac` and `fun` are mangled so that
`fun` returns just the function values and `jac` is converted to a function
returning the Jacobian. The method shall return an `OptimizeResult`
object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
References
----------
.. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function
Minimization. The Computer Journal 7: 308-13.
.. [2] Wright M H. 1996. Direct search methods: Once scorned, now
respectable, in Numerical Analysis 1995: Proceedings of the 1995
Dundee Biennial Conference in Numerical Analysis (Eds. D F
Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK.
191-208.
.. [3] Powell, M J D. 1964. An efficient method for finding the minimum of
a function of several variables without calculating derivatives. The
Computer Journal 7: 155-162.
.. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery.
Numerical Recipes (any edition), Cambridge University Press.
.. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization.
Springer New York.
.. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory
Algorithm for Bound Constrained Optimization. SIAM Journal on
Scientific and Statistical Computing 16 (5): 1190-1208.
.. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm
778: L-BFGS-B, FORTRAN routines for large scale bound constrained
optimization. ACM Transactions on Mathematical Software 23 (4):
550-560.
.. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method.
1984. SIAM Journal of Numerical Analysis 21: 770-778.
.. [9] Powell, M J D. A direct search optimization method that models
the objective and constraint functions by linear interpolation.
1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez
and J-P Hennart, Kluwer Academic (Dordrecht), 51-67.
.. [10] Powell M J D. Direct search algorithms for optimization
calculations. 1998. Acta Numerica 7: 287-336.
.. [11] Powell M J D. A view of algorithms for optimization without
derivatives. 2007.Cambridge University Technical Report DAMTP
2007/NA03
.. [12] Kraft, D. A software package for sequential quadratic
programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace
Center -- Institute for Flight Mechanics, Koln, Germany.
.. [13] Conn, A. R., Gould, N. I., and Toint, P. L.
Trust region methods. 2000. Siam. pp. 169-200.
.. [14] F. Lenders, C. Kirches, A. Potschka: "trlib: A vector-free
implementation of the GLTR method for iterative solution of
the trust region problem", :arxiv:`1611.04718`
.. [15] N. Gould, S. Lucidi, M. Roma, P. Toint: "Solving the
Trust-Region Subproblem using the Lanczos Method",
SIAM J. Optim., 9(2), 504--525, (1999).
.. [16] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. 1999.
An interior point algorithm for large-scale nonlinear programming.
SIAM Journal on Optimization 9.4: 877-900.
.. [17] Lalee, Marucha, Jorge Nocedal, and Todd Plantega. 1998. On the
implementation of an algorithm for large-scale equality constrained
optimization. SIAM Journal on Optimization 8.3: 682-706.
Examples
--------
Let us consider the problem of minimizing the Rosenbrock function. This
function (and its respective derivatives) is implemented in `rosen`
(resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`.
>>> from scipy.optimize import minimize, rosen, rosen_der
A simple application of the *Nelder-Mead* method is:
>>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2]
>>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6)
>>> res.x
array([ 1., 1., 1., 1., 1.])
Now using the *BFGS* algorithm, using the first derivative and a few
options:
>>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der,
... options={'gtol': 1e-6, 'disp': True})
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 26
Function evaluations: 31
Gradient evaluations: 31
>>> res.x
array([ 1., 1., 1., 1., 1.])
>>> print(res.message)
Optimization terminated successfully.
>>> res.hess_inv
array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary
[ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269],
[ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151],
[ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ],
[ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]])
Next, consider a minimization problem with several constraints (namely
Example 16.4 from [5]_). The objective function is:
>>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
There are three constraints defined as:
>>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
And variables must be positive, hence the following bounds:
>>> bnds = ((0, None), (0, None))
The optimization problem is solved using the SLSQP method as:
>>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds,
... constraints=cons)
It should converge to the theoretical solution (1.4 ,1.7).
"""
x0 = np.atleast_1d(np.asarray(x0))
if x0.ndim != 1:
message = ('Use of `minimize` with `x0.ndim != 1` is deprecated. '
'Currently, singleton dimensions will be removed from '
'`x0`, but an error may be raised in the future.')
warn(message, DeprecationWarning, stacklevel=2)
x0 = np.squeeze(x0)
if x0.dtype.kind in np.typecodes["AllInteger"]:
x0 = np.asarray(x0, dtype=float)
if not isinstance(args, tuple):
args = (args,)
if method is None:
# Select automatically
if constraints:
method = 'SLSQP'
elif bounds is not None:
method = 'L-BFGS-B'
else:
method = 'BFGS'
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
# check if optional parameters are supported by the selected method
# - jac
if meth in ('nelder-mead', 'powell', 'cobyla') and bool(jac):
warn('Method %s does not use gradient information (jac).' % method,
RuntimeWarning)
# - hess
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', 'trust-exact', '_custom') and hess is not None:
warn('Method %s does not use Hessian information (hess).' % method,
RuntimeWarning)
# - hessp
if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr',
'trust-krylov', '_custom') \
and hessp is not None:
warn('Method %s does not use Hessian-vector product '
'information (hessp).' % method, RuntimeWarning)
# - constraints or bounds
if (meth in ('cg', 'bfgs', 'newton-cg', 'dogleg', 'trust-ncg')
and (bounds is not None or np.any(constraints))):
warn('Method %s cannot handle constraints nor bounds.' % method,
RuntimeWarning)
if meth in ('nelder-mead', 'l-bfgs-b', 'tnc', 'powell') and np.any(constraints):
warn('Method %s cannot handle constraints.' % method,
RuntimeWarning)
if meth == 'cobyla' and bounds is not None:
warn('Method %s cannot handle bounds.' % method,
RuntimeWarning)
# - return_all
if (meth in ('l-bfgs-b', 'tnc', 'cobyla', 'slsqp') and
options.get('return_all', False)):
warn('Method %s does not support the return_all option.' % method,
RuntimeWarning)
# check gradient vector
if callable(jac):
pass
elif jac is True:
# fun returns func and grad
fun = MemoizeJac(fun)
jac = fun.derivative
elif (jac in FD_METHODS and
meth in ['trust-constr', 'bfgs', 'cg', 'l-bfgs-b', 'tnc', 'slsqp']):
# finite differences with relative step
pass
elif meth in ['trust-constr']:
# default jac calculation for this method
jac = '2-point'
elif jac is None or bool(jac) is False:
# this will cause e.g. LBFGS to use forward difference, absolute step
jac = None
else:
# default if jac option is not understood
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth == 'nelder-mead':
options.setdefault('xatol', tol)
options.setdefault('fatol', tol)
if meth in ('newton-cg', 'powell', 'tnc'):
options.setdefault('xtol', tol)
if meth in ('powell', 'l-bfgs-b', 'tnc', 'slsqp'):
options.setdefault('ftol', tol)
if meth in ('bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg',
'trust-ncg', 'trust-exact', 'trust-krylov'):
options.setdefault('gtol', tol)
if meth in ('cobyla', '_custom'):
options.setdefault('tol', tol)
if meth == 'trust-constr':
options.setdefault('xtol', tol)
options.setdefault('gtol', tol)
options.setdefault('barrier_tol', tol)
if meth == '_custom':
# custom method called before bounds and constraints are 'standardised'
# custom method should be able to accept whatever bounds/constraints
# are provided to it.
return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp,
bounds=bounds, constraints=constraints,
callback=callback, **options)
constraints = standardize_constraints(constraints, x0, meth)
remove_vars = False
if bounds is not None:
if meth in {"tnc", "slsqp", "l-bfgs-b"}:
# These methods can't take the finite-difference derivatives they
# need when a variable is fixed by the bounds. To avoid this issue,
# remove fixed variables from the problem.
# convert to new-style bounds so we only have to consider one case
bounds = standardize_bounds(bounds, x0, 'new')
# determine whether any variables are fixed
i_fixed = (bounds.lb == bounds.ub)
# determine whether finite differences are needed for any grad/jac
fd_needed = (not callable(jac))
for con in constraints:
if not callable(con.get('jac', None)):
fd_needed = True
# If finite differences are ever used, remove all fixed variables
# Always remove fixed variables for TNC; see gh-14565
remove_vars = i_fixed.any() and (fd_needed or meth == "tnc")
if remove_vars:
x_fixed = (bounds.lb)[i_fixed]
x0 = x0[~i_fixed]
bounds = _remove_from_bounds(bounds, i_fixed)
fun = _remove_from_func(fun, i_fixed, x_fixed)
if callable(callback):
callback = _remove_from_func(callback, i_fixed, x_fixed)
if callable(jac):
jac = _remove_from_func(jac, i_fixed, x_fixed, remove=1)
# make a copy of the constraints so the user's version doesn't
# get changed. (Shallow copy is ok)
constraints = [con.copy() for con in constraints]
for con in constraints: # yes, guaranteed to be a list
con['fun'] = _remove_from_func(con['fun'], i_fixed,
x_fixed, min_dim=1,
remove=0)
if callable(con.get('jac', None)):
con['jac'] = _remove_from_func(con['jac'], i_fixed,
x_fixed, min_dim=2,
remove=1)
bounds = standardize_bounds(bounds, x0, meth)
if meth == 'nelder-mead':
res = _minimize_neldermead(fun, x0, args, callback, bounds=bounds,
**options)
elif meth == 'powell':
res = _minimize_powell(fun, x0, args, callback, bounds, **options)
elif meth == 'cg':
res = _minimize_cg(fun, x0, args, jac, callback, **options)
elif meth == 'bfgs':
res = _minimize_bfgs(fun, x0, args, jac, callback, **options)
elif meth == 'newton-cg':
res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,
**options)
elif meth == 'l-bfgs-b':
res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
callback=callback, **options)
elif meth == 'tnc':
res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback,
**options)
elif meth == 'cobyla':
res = _minimize_cobyla(fun, x0, args, constraints, callback=callback,
**options)
elif meth == 'slsqp':
res = _minimize_slsqp(fun, x0, args, jac, bounds,
constraints, callback=callback, **options)
elif meth == 'trust-constr':
res = _minimize_trustregion_constr(fun, x0, args, jac, hess, hessp,
bounds, constraints,
callback=callback, **options)
elif meth == 'dogleg':
res = _minimize_dogleg(fun, x0, args, jac, hess,
callback=callback, **options)
elif meth == 'trust-ncg':
res = _minimize_trust_ncg(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-krylov':
res = _minimize_trust_krylov(fun, x0, args, jac, hess, hessp,
callback=callback, **options)
elif meth == 'trust-exact':
res = _minimize_trustregion_exact(fun, x0, args, jac, hess,
callback=callback, **options)
else:
raise ValueError('Unknown solver %s' % method)
if remove_vars:
res.x = _add_to_array(res.x, i_fixed, x_fixed)
res.jac = _add_to_array(res.jac, i_fixed, np.nan)
if "hess_inv" in res:
res.hess_inv = None # unknown
return res
def minimize_scalar(fun, bracket=None, bounds=None, args=(),
method='brent', tol=None, options=None):
"""Minimization of scalar function of one variable.
Parameters
----------
fun : callable
Objective function.
Scalar function, must return a scalar.
bracket : sequence, optional
For methods 'brent' and 'golden', `bracket` defines the bracketing
interval and can either have three items ``(a, b, c)`` so that
``a < b < c`` and ``fun(b) < fun(a), fun(c)`` or two items ``a`` and
``c`` which are assumed to be a starting interval for a downhill
bracket search (see `bracket`); it doesn't always mean that the
obtained solution will satisfy ``a <= x <= c``.
bounds : sequence, optional
For method 'bounded', `bounds` is mandatory and must have two items
corresponding to the optimization bounds.
args : tuple, optional
Extra arguments passed to the objective function.
method : str or callable, optional
Type of solver. Should be one of:
- :ref:`Brent <optimize.minimize_scalar-brent>`
- :ref:`Bounded <optimize.minimize_scalar-bounded>`
- :ref:`Golden <optimize.minimize_scalar-golden>`
- custom - a callable object (added in version 0.14.0), see below
See the 'Notes' section for details of each solver.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
options : dict, optional
A dictionary of solver options.
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
See :func:`show_options()` for solver-specific options.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
minimize : Interface to minimization algorithms for scalar multivariate
functions
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *Brent*.
Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's
algorithm to find a local minimum. The algorithm uses inverse
parabolic interpolation when possible to speed up convergence of
the golden section method.
Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the
golden section search technique. It uses analog of the bisection
method to decrease the bracketed interval. It is usually
preferable to use the *Brent* method.
Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can
perform bounded minimization. It uses the Brent method to find a
local minimum in the interval x1 < xopt < x2.
**Custom minimizers**
It may be useful to pass a custom minimization method, for example
when using some library frontend to minimize_scalar. You can simply
pass a callable as the ``method`` parameter.
The callable is called as ``method(fun, args, **kwargs, **options)``
where ``kwargs`` corresponds to any other parameters passed to `minimize`
(such as `bracket`, `tol`, etc.), except the `options` dict, which has
its contents also passed as `method` parameters pair by pair. The method
shall return an `OptimizeResult` object.
The provided `method` callable must be able to accept (and possibly ignore)
arbitrary parameters; the set of parameters accepted by `minimize` may
expand in future versions and then these parameters will be passed to
the method. You can find an example in the scipy.optimize tutorial.
.. versionadded:: 0.11.0
Examples
--------
Consider the problem of minimizing the following function.
>>> def f(x):
... return (x - 2) * x * (x + 2)**2
Using the *Brent* method, we find the local minimum as:
>>> from scipy.optimize import minimize_scalar
>>> res = minimize_scalar(f)
>>> res.x
1.28077640403
Using the *Bounded* method, we find a local minimum with specified
bounds as:
>>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded')
>>> res.x
-2.0000002026
"""
if not isinstance(args, tuple):
args = (args,)
if callable(method):
meth = "_custom"
else:
meth = method.lower()
if options is None:
options = {}
if tol is not None:
options = dict(options)
if meth == 'bounded' and 'xatol' not in options:
warn("Method 'bounded' does not support relative tolerance in x; "
"defaulting to absolute tolerance.", RuntimeWarning)
options['xatol'] = tol
elif meth == '_custom':
options.setdefault('tol', tol)
else:
options.setdefault('xtol', tol)
# replace boolean "disp" option, if specified, by an integer value.
disp = options.get('disp')
if isinstance(disp, bool):
options['disp'] = 2 * int(disp)
if meth == '_custom':
return method(fun, args=args, bracket=bracket, bounds=bounds, **options)
elif meth == 'brent':
return _minimize_scalar_brent(fun, bracket, args, **options)
elif meth == 'bounded':
if bounds is None:
raise ValueError('The `bounds` parameter is mandatory for '
'method `bounded`.')
return _minimize_scalar_bounded(fun, bounds, args, **options)
elif meth == 'golden':
return _minimize_scalar_golden(fun, bracket, args, **options)
else:
raise ValueError('Unknown solver %s' % method)
def _remove_from_bounds(bounds, i_fixed):
"""Removes fixed variables from a `Bounds` instance"""
lb = bounds.lb[~i_fixed]
ub = bounds.ub[~i_fixed]
return Bounds(lb, ub) # don't mutate original Bounds object
def _remove_from_func(fun_in, i_fixed, x_fixed, min_dim=None, remove=0):
"""Wraps a function such that fixed variables need not be passed in"""
def fun_out(x_in, *args, **kwargs):
x_out = np.zeros_like(i_fixed, dtype=x_in.dtype)
x_out[i_fixed] = x_fixed
x_out[~i_fixed] = x_in
y_out = fun_in(x_out, *args, **kwargs)
y_out = np.array(y_out)
if min_dim == 1:
y_out = np.atleast_1d(y_out)
elif min_dim == 2:
y_out = np.atleast_2d(y_out)
if remove == 1:
y_out = y_out[..., ~i_fixed]
elif remove == 2:
y_out = y_out[~i_fixed, ~i_fixed]
return y_out
return fun_out
def _add_to_array(x_in, i_fixed, x_fixed):
"""Adds fixed variables back to an array"""
i_free = ~i_fixed
if x_in.ndim == 2:
i_free = i_free[:, None] @ i_free[None, :]
x_out = np.zeros_like(i_free, dtype=x_in.dtype)
x_out[~i_free] = x_fixed
x_out[i_free] = x_in.ravel()
return x_out
def standardize_bounds(bounds, x0, meth):
"""Converts bounds to the form required by the solver."""
if meth in {'trust-constr', 'powell', 'nelder-mead', 'new'}:
if not isinstance(bounds, Bounds):
lb, ub = old_bound_to_new(bounds)
bounds = Bounds(lb, ub)
elif meth in ('l-bfgs-b', 'tnc', 'slsqp', 'old'):
if isinstance(bounds, Bounds):
bounds = new_bounds_to_old(bounds.lb, bounds.ub, x0.shape[0])
return bounds
def standardize_constraints(constraints, x0, meth):
"""Converts constraints to the form required by the solver."""
all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)
new_constraint_types = all_constraint_types[:-1]
if constraints is None:
constraints = []
elif isinstance(constraints, all_constraint_types):
constraints = [constraints]
else:
constraints = list(constraints) # ensure it's a mutable sequence
if meth == 'trust-constr':
for i, con in enumerate(constraints):
if not isinstance(con, new_constraint_types):
constraints[i] = old_constraint_to_new(i, con)
else:
# iterate over copy, changing original
for i, con in enumerate(list(constraints)):
if isinstance(con, new_constraint_types):
old_constraints = new_constraint_to_old(con, x0)
constraints[i] = old_constraints[0]
constraints.extend(old_constraints[1:]) # appends 1 if present
return constraints
|
|
import pytest
def _type_length():
# Not a fixture as we need to parameterize tests on this
from encoded.loadxl import ORDER
from pkg_resources import resource_stream
import codecs
import json
utf8 = codecs.getreader("utf-8")
return {
name: len(json.load(utf8(resource_stream('encoded', 'tests/data/inserts/%s.json' % name))))
for name in ORDER
if name != "access_key"
}
TYPE_LENGTH = _type_length()
PUBLIC_COLLECTIONS = [
'source',
'platform',
'treatment',
'lab',
'award',
'target',
'organism',
]
def test_home(anonhtmltestapp):
res = anonhtmltestapp.get('/', status=200)
assert res.body.startswith(b'<!DOCTYPE html>')
def test_home_json(testapp):
res = testapp.get('/', status=200)
assert res.json['@type']
def test_home_app_version(testapp):
res = testapp.get('/', status=200)
assert 'app_version' in res.json
def test_vary_html(anonhtmltestapp):
res = anonhtmltestapp.get('/', status=200)
assert res.vary is not None
assert 'Accept' in res.vary
def test_vary_json(anontestapp):
res = anontestapp.get('/', status=200)
assert res.vary is not None
assert 'Accept' in res.vary
@pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user'])
def test_collections_anon(workbook, anontestapp, item_type):
res = anontestapp.get('/' + item_type).follow(status=200)
assert '@graph' in res.json
@pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user'])
def test_html_collections_anon(workbook, anonhtmltestapp, item_type):
res = anonhtmltestapp.get('/' + item_type).follow(status=200)
assert res.body.startswith(b'<!DOCTYPE html>')
@pytest.mark.parametrize('item_type', TYPE_LENGTH)
def test_html_collections(workbook, htmltestapp, item_type):
res = htmltestapp.get('/' + item_type).follow(status=200)
assert res.body.startswith(b'<!DOCTYPE html>')
@pytest.mark.slow
@pytest.mark.parametrize('item_type', TYPE_LENGTH)
def test_html_pages(workbook, testapp, htmltestapp, item_type):
res = testapp.get('/%s?limit=all' % item_type).follow(status=200)
for item in res.json['@graph']:
res = htmltestapp.get(item['@id'])
assert res.body.startswith(b'<!DOCTYPE html>')
@pytest.mark.slow
@pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user'])
def test_html_server_pages(workbook, item_type, wsgi_server):
from webtest import TestApp
testapp = TestApp(wsgi_server)
res = testapp.get(
'/%s?limit=all' % item_type,
headers={'Accept': 'application/json'},
).follow(
status=200,
headers={'Accept': 'application/json'},
)
for item in res.json['@graph']:
res = testapp.get(item['@id'], status=200)
assert res.body.startswith(b'<!DOCTYPE html>')
assert b'Internal Server Error' not in res.body
@pytest.mark.parametrize('item_type', TYPE_LENGTH)
def test_json(testapp, item_type):
res = testapp.get('/' + item_type).follow(status=200)
assert res.json['@type']
def test_json_basic_auth(anonhtmltestapp):
from base64 import b64encode
from pyramid.compat import ascii_native_
url = '/'
value = "Authorization: Basic %s" % ascii_native_(b64encode(b'nobody:pass'))
res = anonhtmltestapp.get(url, headers={'Authorization': value}, status=401)
assert res.content_type == 'application/json'
def test_load_sample_data(
analysis_step,
analysis_step_run,
antibody_characterization,
antibody_lot,
award,
biosample,
biosample_characterization,
document,
experiment,
file,
lab,
library,
mouse_donor,
organism,
pipeline,
publication,
publication_data,
replicate,
software,
software_version,
source,
submitter,
target,
ucsc_browser_composite,
):
assert True, 'Fixtures have loaded sample data'
def test_abstract_collection(testapp, experiment):
testapp.get('/Dataset/{accession}'.format(**experiment))
testapp.get('/datasets/{accession}'.format(**experiment))
@pytest.mark.slow
@pytest.mark.parametrize(('item_type', 'length'), TYPE_LENGTH.items())
def test_load_workbook(workbook, testapp, item_type, length):
# testdata must come before testapp in the funcargs list for their
# savepoints to be correctly ordered.
res = testapp.get('/%s/?limit=all' % item_type).maybe_follow(status=200)
assert len(res.json['@graph']) == length
@pytest.mark.slow
def test_collection_limit(workbook, testapp):
res = testapp.get('/antibodies/?limit=2', status=200)
assert len(res.json['@graph']) == 2
def test_collection_post(testapp):
item = {
'name': 'human',
'scientific_name': 'Homo sapiens',
'taxon_id': '9606',
'status': 'released'
}
return testapp.post_json('/organism', item, status=201)
def test_collection_post_bad_json(testapp):
item = {'foo': 'bar'}
res = testapp.post_json('/organism', item, status=422)
assert res.json['errors']
def test_collection_post_malformed_json(testapp):
item = '{'
headers = {'Content-Type': 'application/json'}
res = testapp.post('/organism', item, status=400, headers=headers)
assert res.json['detail'].startswith('Expecting')
def test_collection_post_missing_content_type(testapp):
item = '{}'
testapp.post('/organism', item, status=415)
def test_collection_post_bad_(anontestapp):
from base64 import b64encode
from pyramid.compat import ascii_native_
value = "Authorization: Basic %s" % ascii_native_(b64encode(b'nobody:pass'))
anontestapp.post_json('/organism', {}, headers={'Authorization': value}, status=401)
def test_collection_actions_filtered_by_permission(workbook, testapp, anontestapp):
res = testapp.get('/pages/')
assert any(action for action in res.json.get('actions', []) if action['name'] == 'add')
res = anontestapp.get('/pages/')
assert not any(action for action in res.json.get('actions', []) if action['name'] == 'add')
def test_item_actions_filtered_by_permission(testapp, authenticated_testapp, source):
location = source['@id']
res = testapp.get(location)
assert any(action for action in res.json.get('actions', []) if action['name'] == 'edit')
res = authenticated_testapp.get(location)
assert not any(action for action in res.json.get('actions', []) if action['name'] == 'edit')
def test_collection_put(testapp, execute_counter):
initial = {
'name': 'human',
'scientific_name': 'Homo sapiens',
'taxon_id': '9606',
}
item_url = testapp.post_json('/organism', initial).location
with execute_counter.expect(1):
item = testapp.get(item_url).json
for key in initial:
assert item[key] == initial[key]
update = {
'name': 'mouse',
'scientific_name': 'Mus musculus',
'taxon_id': '10090',
}
testapp.put_json(item_url, update, status=200)
res = testapp.get('/' + item['uuid']).follow().json
for key in update:
assert res[key] == update[key]
def test_post_duplicate_uuid(testapp, mouse):
item = {
'uuid': mouse['uuid'],
'name': 'human',
'scientific_name': 'Homo sapiens',
'taxon_id': '9606',
}
testapp.post_json('/organism', item, status=409)
def test_user_effective_principals(submitter, lab, anontestapp, execute_counter):
email = submitter['email']
with execute_counter.expect(1):
res = anontestapp.get('/@@testing-user',
extra_environ={'REMOTE_USER': str(email)})
assert sorted(res.json['effective_principals']) == [
'group.submitter',
'lab.%s' % lab['uuid'],
'remoteuser.%s' % email,
'submits_for.%s' % lab['uuid'],
'system.Authenticated',
'system.Everyone',
'userid.%s' % submitter['uuid'],
'viewing_group.ENCODE3',
]
def test_page_toplevel(workbook, anontestapp):
res = anontestapp.get('/test-section/', status=200)
assert res.json['@id'] == '/test-section/'
res = anontestapp.get('/pages/test-section/', status=301)
assert res.location == 'http://localhost/test-section/'
def test_page_nested(workbook, anontestapp):
res = anontestapp.get('/test-section/subpage/', status=200)
assert res.json['@id'] == '/test-section/subpage/'
def test_page_nested_in_progress(workbook, anontestapp):
return anontestapp.get('/test-section/subpage-in-progress/', status=403)
def test_page_homepage(workbook, anontestapp, testapp):
res = anontestapp.get('/', status=200)
assert 'portal_title' in res.json
assert res.json['@id'] == '/'
def test_page_collection_default(workbook, anontestapp):
res = anontestapp.get('/pages/images/', status=200)
assert res.json['canonical_uri'] == '/images/'
res = anontestapp.get('/images/', status=200)
assert 'default_page' in res.json
assert res.json['default_page']['@id'] == '/pages/images/'
def test_jsonld_context(testapp):
res = testapp.get('/terms/')
assert res.json
def test_jsonld_term(testapp):
res = testapp.get('/terms/submitted_by')
assert res.json
@pytest.mark.slow
@pytest.mark.parametrize('item_type', TYPE_LENGTH)
def test_index_data_workbook(workbook, testapp, indexer_testapp, item_type):
res = testapp.get('/%s?limit=all' % item_type).follow(status=200)
for item in res.json['@graph']:
indexer_testapp.get(item['@id'] + '@@index-data')
@pytest.mark.parametrize('item_type', TYPE_LENGTH)
def test_profiles(testapp, item_type):
from jsonschema_serialize_fork import Draft4Validator
res = testapp.get('/profiles/%s.json' % item_type).maybe_follow(status=200)
errors = Draft4Validator.check_schema(res.json)
assert not errors
def test_bad_frame(testapp, human):
res = testapp.get(human['@id'] + '?frame=bad', status=404)
assert res.json['detail'] == '?frame=bad'
|
|
#!/usr/bin/env python3
"""
RDS Functions
"""
import sys
from datetime import datetime, timedelta
from operator import itemgetter
from exclusions import EXCLUDED_INSTANCES
import boto3
import botocore
## Session/client setup operations
def get_session(access_key_id, secret_access_key):
" Establishes a session with AWS "
return boto3.session.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key
)
## EC2 operations
def get_vpc_ids(client):
" Returns a list of VPC IDs in the account "
vpc_ids = []
vpcs = client.describe_vpcs()['Vpcs']
for vpc in vpcs:
vpc_ids.append(vpc['VpcId'])
return vpc_ids
def get_isolated_sgs(client):
" Returns a dict of rds-isolate SG IDs for each VPC in account. "
vpc_ids = get_vpc_ids(client)
isolated_sgs = {}
for vpc in vpc_ids:
sec_groups = client.describe_security_groups(
Filters=[
{
"Name": "vpc-id",
"Values": [vpc]
},
{
"Name": "group-name",
"Values": ["rds-isolate"]
}
]
)['SecurityGroups']
try:
isolated_sgs[vpc] = sec_groups[0]['GroupId']
except IndexError:
print("No rds-isolate group found for VPC: {}".format(vpc))
return isolated_sgs
## Cloudwatch operations
def get_connections_statistics(client, rds_instances):
" Returns a dict of all instances and their avg DB conns over all datapoints "
rds_stats = {}
for rds_instance in rds_instances:
stats = client.get_metric_statistics(
Namespace="AWS/RDS",
MetricName="DatabaseConnections",
Statistics=['Average'],
Period=57600,
StartTime=(datetime.today() - timedelta(days=3)),
EndTime=datetime.today(),
Dimensions=[
{
'Name': 'DBInstanceIdentifier',
'Value': rds_instance['DBInstanceIdentifier']
}
]
)['Datapoints']
datapoints = []
for stat in stats:
datapoints.append(stat['Average'])
if len(datapoints) > 0:
dp_conns = sum(datapoints)/float(len(datapoints))
rds_stats[rds_instance['DBInstanceIdentifier']] = dp_conns
else:
print("Instance: %s has no datapoints." % rds_instance['DBInstanceIdentifier'])
return rds_stats
## RDS operations
def get_rds_instances(client, vpc_id=None):
" Gets all RDS instances, per VPC, if specified. "
rds_instances = []
resp = client.describe_db_instances()
while 'Marker' in resp:
rds_instances.extend(resp['DBInstances'])
resp = client.describe_db_instances(Marker=resp['Marker'])
rds_instances.extend(resp['DBInstances'])
if not vpc_id:
return rds_instances
else:
return [r for r in rds_instances if r['DBSubnetGroup']['VpcId'] == vpc_id]
def set_no_multiaz(client, rds_instance):
" Takes a rds instance obj and turns off MultiAZ "
try:
client.modify_db_instance(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
MultiAZ=False,
ApplyImmediately=True
)
except botocore.exceptions.ClientError:
print("Error setting no-multiaz on instance %s" % rds_instance['DBInstanceIdentifier'])
def set_security_group(client, rds_instance, sg_id):
" Sets the rds_instance Security Group to sg_id "
try:
client.modify_db_instance(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
VpcSecurityGroupIds=[sg_id]
)
except botocore.exceptions.ClientError:
print("Error setting SG on instance %s" % rds_instance['DBInstanceIdentifier'])
def set_instance_size(client, rds_instance, size=None):
" Sets instance to the smallest available size "
if not size:
available_sizes = client.describe_orderable_db_instance_options(
Engine=rds_instance['Engine']
)['OrderableDBInstanceOptions']
size = available_sizes[0]['DBInstanceClass']
try:
client.modify_db_instance(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
DBInstanceClass=size,
ApplyImmediately=True
)
except botocore.exceptions.ClientError:
print("Error setting size on instance %s" % rds_instance['DBInstanceIdentifier'])
def get_instances_with_sg(client, sg_id, vpc_id=None):
""" Gets all RDS instances that are using the sg_id """
rds_instances = get_rds_instances(client, vpc_id)
instances_with_sg = []
for instance in rds_instances:
security_groups = instance['VpcSecurityGroups']
for security_group in security_groups:
if security_group['VpcSecurityGroupId'] == sg_id:
instances_with_sg.append(instance)
return instances_with_sg
def get_snaps_for_instance(client, rds_instance, snapshot_type=''):
""" Gets all snapshots for a RDS instance"""
snapshots = []
resp = client.describe_db_snapshots(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
SnapshotType=snapshot_type
)
while 'Marker' in resp:
snapshots.extend(resp['DBSnapshots'])
resp = client.describe_db_snapshots(
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
SnapshotType=snapshot_type,
Marker=resp['Marker']
)
snapshots.extend(resp['DBSnapshots'])
return snapshots
def get_latest_snap(client, rds_instance, debug=True):
""" Gets the latest snapshot for a RDS instance """
snapshots = get_snaps_for_instance(client, rds_instance, 'automated')
sorted_snapshots = sorted(snapshots, key=itemgetter('SnapshotCreateTime'), reverse=True)
if len(sorted_snapshots) == 0:
return None
if debug:
# for sorted_snapshot in sorted_snapshots:
# print("DEBUG: Snapshot %s, created on: %s" % (sorted_snapshot['DBSnapshotIdentifier'],
# sorted_snapshot['SnapshotCreateTime']))
print("DEBUG: The latest snap should be: %s" % sorted_snapshots[0]['DBSnapshotIdentifier'])
return sorted_snapshots[0]
def check_final_snap(client, rds_instance):
""" Check if the final snapshot has already been created """
snapshots = get_snaps_for_instance(client, rds_instance, 'manual')
for snapshot in snapshots:
if snapshot['DBSnapshotIdentifier'].startswith('%s-final-snapshot'
% rds_instance['DBInstanceIdentifier']):
return True
return False
def copy_snapshot(client, rds_instance, debug=True):
""" Copy a snapshot the latest automated snapshot """
latest_snap = get_latest_snap(client, rds_instance, debug)
try:
resp = client.copy_db_snapshot(
SourceDBSnapshotIdentifier=latest_snap['DBSnapshotIdentifier'],
TargetDBSnapshotIdentifier='%s-final-snapshot-%s'
% (rds_instance['DBInstanceIdentifier'],
datetime.today().strftime('%Y%m%d-%H%M%S')),
CopyTags=True
)
print("Copied final snapshot for %s, %s --> %s"
% (rds_instance['DBInstanceIdentifier'],
latest_snap['DBSnapshotIdentifier'],
resp['DBSnapshot']['DBSnapshotIdentifier']))
except botocore.exceptions.ClientError as exception:
print("Unable to take a snapshot of instance: %s" % rds_instance['DBInstanceIdentifier'])
print(exception)
def take_snapshot(client, rds_instance):
""" Takes a snapshot of an RDS instance """
try:
resp = client.create_db_snapshot(
DBSnapshotIdentifier='%s-final-snapshot' % rds_instance['DBInstanceIdentifier'],
DBInstanceIdentifier=rds_instance['DBInstanceIdentifier'],
)
print("Created final snapshot for %s, %s"
% (rds_instance['DBInstanceIdentifier'], resp['DBSnapshot']['DBSnapshotIdentifier']))
except botocore.exceptions.ClientError as exception:
print("Unable to take a snapshot of instance: %s" % rds_instance['DBInstanceIdentifier'])
print(exception)
## CloudFormation operations
def get_all_cfn_stacks(cfn):
""" Returns all CFN stacks """
stacks = []
resp = cfn.describe_stacks()
while 'NextToken' in resp:
stacks.extend(resp['Stacks'])
resp = cfn.describe_stacks(NextToken=resp['NextToken'])
stacks.extend(resp['Stacks'])
return stacks
def get_cfn_stack_for_rds(cfn, rds_instances, debug=True):
""" Gets all CFN stacks for the given RDS instances """
stacks = get_all_cfn_stacks(cfn)
old_stacks = []
for instance in rds_instances:
for stack in stacks:
if stack['StackName'] == instance['DBInstanceIdentifier']:
old_stacks.append(stack)
if debug:
print("Stack: %s" % stack['StackName'])
return old_stacks
def destroy_cfn_stack(cfn, stack, dry_run=True):
""" Destroys a Cloudformation stack """
if not dry_run:
try:
cfn.delete_stack(StackName=stack['StackName'])
except botocore.exceptions.ClientError as exception:
print("ERROR: Delete stack: %s failed with error: %s" % (stack['StackName'], exception))
print("Deleted stack: %s" % stack['StackName'])
else:
print("DRYRUN: Would have deleted stack: %s" % stack['StackName'])
##
def get_old_instances(ec2, rds, debug=True):
""" Gets RDS instances slated for decomm """
isolated_sgs = get_isolated_sgs(ec2)
old_instances = []
for group in isolated_sgs.values():
isolated_instances = get_instances_with_sg(rds, group)
for instance in isolated_instances:
old_instances.append(instance)
if debug:
for instance in old_instances:
print(instance['DBInstanceIdentifier'])
print("%s instances found." % len(old_instances))
return old_instances
def get_old_stacks(cfn, old_instances, debug=True):
""" Gets all of the stacks for the old RDS instances """
old_stacks = get_cfn_stack_for_rds(cfn, old_instances, debug)
if debug:
print("DEBUG: Old stacks found: %s" % len(old_stacks))
return old_stacks
def destroy_old_cfn_stacks(cfn, old_stacks, dry_run=True):
""" Destroys all old CFN stacks """
for stack in old_stacks:
destroy_cfn_stack(cfn, stack, dry_run)
def snapshot_old_rds_instances(rds, old_instances, dry_run=True, debug=True):
""" Performs a final snapshot on old RDS instances. """
for instance in old_instances:
has_final_snap = check_final_snap(rds, instance)
latest_snap = get_latest_snap(rds, instance, debug)
if not dry_run and latest_snap is not None and not has_final_snap:
copy_snapshot(rds, instance, debug)
elif not dry_run and latest_snap is not None and has_final_snap:
print("%s already has a final snapshot. Skipping." % instance['DBInstanceIdentifier'])
elif dry_run and latest_snap is not None and not has_final_snap:
print("DRYRUN: Would have copied a snapshot of %s from %s"
% (instance['DBInstanceIdentifier'], latest_snap['DBSnapshotIdentifier']))
elif dry_run and latest_snap is not None and has_final_snap:
print("DRYRUN: %s already has a final snapshot. Would have skipped."
% instance['DBInstanceIdentifier'])
else:
print("No automated snapshots found for %s." % instance['DBInstanceIdentifier'])
def prep_rds_instances_for_decomm(ec2, rds, cloudwatch, dry_run=True, debug=True):
"""
Finds RDS instances with low connection counts and
applies an isolated SG, sizes it down and sets to single AZ
"""
isolated_sgs = get_isolated_sgs(ec2)
all_rds_instances = get_rds_instances(rds)
all_rds_stats = get_connections_statistics(cloudwatch, all_rds_instances)
if debug:
print("DEBUG: Number of RDS instances found: %s" % len(all_rds_instances))
print("DEBUG: Isolated SGs {}".format(isolated_sgs))
print("DEBUG: All RDS Instances: ")
for instance in all_rds_instances:
print(instance['DBInstanceIdentifier'])
abandoned_instances = []
if len(EXCLUDED_INSTANCES) > 0:
print("\nThe following instances meet low connections criteria, but have been excluded.")
for key in all_rds_stats:
if all_rds_stats[key] == 0 and key not in EXCLUDED_INSTANCES:
abandoned_instances.append(key)
elif all_rds_stats[key] == 0 and key in EXCLUDED_INSTANCES:
print(key)
if debug:
print("DEBUG: Instance: %s. Connections: %s" % (key, all_rds_stats[key]))
if len(abandoned_instances) > 0:
print("\nThe following instances appear to be abandoned. Please investigate.")
for instance in abandoned_instances:
print(instance)
else:
print("\nNo instances appear to be abandoned.")
sys.exit(0)
print("\nTaking action on the following instances: ")
for rds_instance in all_rds_instances:
if rds_instance['DBInstanceIdentifier'] in abandoned_instances and dry_run:
print("DRYRUN: %s would have been isolated and downsized."
% rds_instance['DBInstanceIdentifier'])
elif rds_instance['DBInstanceIdentifier'] in abandoned_instances and not dry_run:
print("Isolating and downsizing instance: %s"
% rds_instance['DBInstanceIdentifier'])
set_security_group(rds,
rds_instance,
isolated_sgs[rds_instance['DBSubnetGroup']['VpcId']])
set_instance_size(rds,
rds_instance,
'db.t2.small')
set_no_multiaz(rds, rds_instance)
def main():
""" main execution """
dry_run = True
debug = True
session = get_session('', '')
ec2 = session.client('ec2')
rds = session.client('rds')
cdw = session.client('cloudwatch')
cfn = session.client('cloudformation')
# prep_rds_instances_for_decomm(ec2, rds, cdw, dry_run, debug)
old_instances = get_old_instances(ec2, rds, debug)
# snapshot_old_rds_instances(rds, old_instances, dry_run, debug)
old_stacks = get_old_stacks(cfn, old_instances, debug)
# destroy_old_cfn_stacks(cfn, old_stacks, dry_run)
main()
|
|
from unittest.mock import patch, Mock
from django.conf import settings
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from bulk_sms.models import Broadcast, Batch
from bulk_sms.tests.factories import BroadcastFactory
from libya_elections.tests.utils import ResponseCheckerMixin
from libya_site.tests.factories import UserFactory
from register.tests.factories import RegistrationCenterFactory
from staff.tests.base import StaffUserMixin
class BroadcastHelper(StaffUserMixin):
permissions = ['add_broadcast', 'browse_broadcast', 'read_broadcast', 'approve_broadcast']
def setUp(self):
self.staff_user = self.create_staff_user()
self.login(self.staff_user)
def create_staff_user(self):
user = UserFactory(username=self.username, email=self.email,
password=self.password)
user.is_staff = True
user.save()
return user
@staticmethod
def add_permissions(user, permissions):
for perm in permissions:
user.user_permissions.add(Permission.objects.get(codename=perm))
@staticmethod
def remove_permissions(user, permissions):
for perm in permissions:
user.user_permissions.remove(Permission.objects.get(codename=perm))
class BroadcastBreadTest(ResponseCheckerMixin, BroadcastHelper, TestCase):
def setUp(self):
super(BroadcastBreadTest, self).setUp()
self.broadcast = BroadcastFactory(message='test')
self.add_via_simple_form_url = reverse('add_broadcast')
self.add_via_csv_upload = reverse('upload_broadcast')
self.approve_url = reverse('approve_reject_broadcast',
kwargs={'broadcast_id': self.broadcast.id})
def test_browse_broadcasts(self):
perms = ['browse_broadcast']
# user with browse_broadcast permission can browse
self.add_permissions(self.staff_user, perms)
response = self.client.get(reverse('browse_broadcasts'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name='bulk_sms/broadcast_browse.html')
# users without the browse_broadcast permission can't get to that page
self.remove_permissions(self.staff_user, perms)
self.assertForbidden(self.client.get(reverse('browse_broadcasts')))
def test_read_broadcast(self):
broadcast = BroadcastFactory()
perms = ['read_broadcast']
# user with read_broadcast permission can browse
self.add_permissions(self.staff_user, perms)
response = self.client.get(reverse('read_broadcast', kwargs={'pk': broadcast.id}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
template_name='bulk_sms/broadcast_approve_reject.html')
# users without the read_broadcast permission can't get to that page
self.remove_permissions(self.staff_user, perms)
self.assertForbidden(self.client.get(reverse('read_broadcast',
kwargs={'pk': broadcast.id})))
def test_add_broadcast_via_simple_form(self):
perms = ['add_broadcast']
data = {'audience': 'staff', 'message': 'test broadcasting message'}
broadcast_count = Broadcast.objects.count()
# users with add_broadcast permission can view the add broadcast form
self.add_permissions(self.staff_user, perms)
response = self.client.get(self.add_via_simple_form_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
template_name="bulk_sms/broadcast_add_via_form.html")
# users with add_broadcast permission can create broadcasts
response = self.client.post(self.add_via_simple_form_url, data=data)
self.assertEqual(response.status_code, 302)
new_broadcast_count = Broadcast.objects.count()
self.assertEqual(broadcast_count + 1, new_broadcast_count)
# users without add_broadcast permission can't create broadcasts
self.remove_permissions(self.staff_user, perms)
self.assertForbidden(self.client.post(self.add_via_simple_form_url, data=data))
def test_add_broadcast_via_csv_upload(self):
perms = ['add_broadcast']
mock_file = Mock()
mock_file.read.return_value = "218911234567,the quick brown fox etc.\n"
mock_file.name = 'foo.csv'
data = {'name': 'test_batch', 'description': 'test broadcasting description',
'csv': mock_file}
broadcast_count = Broadcast.objects.count()
# users with add_broadcast permission can view the add broadcast form
self.add_permissions(self.staff_user, perms)
response = self.client.get(self.add_via_csv_upload)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, template_name="bulk_sms/broadcast_add_via_csv.html")
# users with add_broadcast permission can create broadcasts
response = self.client.post(self.add_via_csv_upload, data=data)
self.assertEqual(response.status_code, 302)
new_broadcast_count = Broadcast.objects.count()
self.assertEqual(broadcast_count + 1, new_broadcast_count)
# users without add_broadcast permission can't create broadcasts
self.remove_permissions(self.staff_user, perms)
self.assertForbidden(self.client.post(self.add_via_csv_upload, data=data))
def test_center_required(self):
# center is a required field when creating a broadcast for a single
# center.
perms = ['add_broadcast']
data = {'audience': 'single_center', 'message': 'test broadcasting message'}
broadcast_count = Broadcast.objects.count()
center = RegistrationCenterFactory()
# form will have errors if the center field was not filled.
self.add_permissions(self.staff_user, perms)
response = self.client.post(self.add_via_simple_form_url, data=data)
self.assertEqual(response.status_code, 400)
self.assertTemplateUsed(response,
template_name="bulk_sms/broadcast_add_via_form.html")
errors = response.context['form'].errors
self.assertEqual(len(errors), 1)
# submitting all required fields
data['center'] = center.id
response = self.client.post(self.add_via_simple_form_url, data=data)
self.assertEqual(response.status_code, 302)
new_broadcast_count = Broadcast.objects.count()
self.assertEqual(broadcast_count + 1, new_broadcast_count)
@patch('bulk_sms.tasks.approve_broadcast.delay', autospec=True)
def test_approve_broadcast(self, approve_task):
# broadcasts can be approved by users with the `approve_broadcast` permission.
# approving a broadcast fires up the `approve_broadcast` task.
perms = ['approve_broadcast']
data = {'approve': True}
# user does not have permission to approve broadcasts
response = self.client.post(self.approve_url, data=data)
self.assertEqual(response.status_code, 403)
# give user permission to approve broadcasts
self.add_permissions(self.staff_user, perms)
self.client.post(self.approve_url, data=data)
# approve_broadcast task was fired
approve_task.assert_called_once_with(self.broadcast.id)
def test_reject_broadcast(self):
# broadcasts can be rejected.
self.assertEqual(self.broadcast.batch.status, Batch.PENDING)
perms = ['approve_broadcast']
data = {'reject': True}
# user does not have permission to approve broadcasts
response = self.client.post(self.approve_url, data=data)
self.assertEqual(response.status_code, 403)
# give user permission to approve broadcasts
self.add_permissions(self.staff_user, perms)
self.client.post(self.approve_url, data=data)
broadcast = Broadcast.objects.get(pk=self.broadcast.id)
self.assertEqual(broadcast.batch.status, Batch.REJECTED)
self.assertEqual(broadcast.reviewed_by, self.staff_user)
# an approved message can be unapproved
broadcast.batch.status = Batch.APPROVED
broadcast.batch.save()
self.client.post(self.approve_url, data=data)
broadcast = Broadcast.objects.get(pk=self.broadcast.id)
self.assertEqual(broadcast.batch.status, Batch.REJECTED)
self.assertEqual(broadcast.reviewed_by, self.staff_user)
class BroadcastModelTest(BroadcastHelper, TestCase):
def setUp(self):
super(BroadcastModelTest, self).setUp()
self.broadcast = Broadcast.objects.create(created_by=self.staff_user, message='test')
def test_batch(self):
# a batch is created the first time you save an instance.
self.assertIsInstance(self.broadcast.batch, Batch)
def test_status(self):
# the status for a broadcast is the same as that of the batch associated with
# it.
self.assertEqual(self.broadcast.batch.status, Batch.PENDING)
self.assertEqual(self.broadcast.status, _("Pending Approval"))
self.broadcast.batch.status = Batch.APPROVED
self.assertEqual(self.broadcast.status, self.broadcast.batch.get_status_display())
def test_get_messages(self):
# get_messages() yields tuples (phone_number, message, shortcode) for each individual
# in the audience.
# broadcast directed to staff users only (audience defaults to STAFF_ONLY)
broadcast = self.broadcast
broadcasting_message = "Broadcast for staff"
broadcast.message = broadcasting_message
with patch.object(Broadcast, 'get_numbers_for_staff') as staff_numbers:
phone_numbers = ['1', '2', '3']
staff_numbers.return_value = phone_numbers
messages = [message for message in self.broadcast.get_messages()]
staff_numbers.assert_called_once_with()
for index, (phone_number, message, shortcode) in enumerate(messages):
self.assertEqual(phone_number, phone_numbers[index])
self.assertEqual(message, broadcasting_message)
# STAFF_ONLY message, so from_shortcode should be REPORTS_SHORT_CODE
self.assertEqual(shortcode, settings.REPORTS_SHORT_CODE)
# broadcast directed to all registrants
broadcasting_message = "Broadcast for all registrants"
broadcast.audience = Broadcast.ALL_REGISTRANTS
broadcast.message = broadcasting_message
with patch.object(Broadcast, 'get_numbers_for_all_centers') as all_registrants:
phone_numbers = ['1', '2', '3']
all_registrants.return_value = phone_numbers
messages = [message for message in self.broadcast.get_messages()]
all_registrants.assert_called_once_with()
for index, (phone_number, message, shortcode) in enumerate(messages):
self.assertEqual(phone_number, phone_numbers[index])
self.assertEqual(message, broadcasting_message)
# ALL_REGISTRANTS message, so from_shortcode should be None
# (which will trigger default shortcode to be used)
self.assertEqual(shortcode, None)
# broadcasting message for a single center
broadcasting_message = "Broadcast for single center"
broadcast.audience = Broadcast.SINGLE_CENTER
broadcast.message = broadcasting_message
with patch.object(Broadcast, 'get_numbers_for_single_center') as single_center:
phone_numbers = ['1', '2', '3']
single_center.return_value = phone_numbers
messages = [message for message in self.broadcast.get_messages()]
single_center.assert_called_once_with()
for index, (phone_number, message, shortcode) in enumerate(messages):
self.assertEqual(phone_number, phone_numbers[index])
self.assertEqual(message, broadcasting_message)
# SINGLE_CENTER message, so from_shortcode should be None
# (which will trigger default shortcode to be used)
self.assertEqual(shortcode, None)
|
|
from __future__ import unicode_literals
import logging
import uuid
from django.apps import apps
from django.conf import settings
from django.contrib.contenttypes import fields as ct_fields
from django.contrib.contenttypes import models as ct_models
from django.core import validators
from django.core.mail import send_mail
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.lru_cache import lru_cache
from model_utils.models import TimeStampedModel
import requests
from waldur_core.core.fields import JSONField, UUIDField
from waldur_core.core.utils import timestamp_to_datetime
from waldur_core.logging import managers
logger = logging.getLogger(__name__)
class UuidMixin(models.Model):
# There is circular dependency between logging and core applications.
# Core models are loggable. So we cannot use UUID mixin here.
class Meta:
abstract = True
uuid = UUIDField()
class Alert(UuidMixin, TimeStampedModel):
class Meta:
unique_together = ("content_type", "object_id", "alert_type", "is_closed")
class SeverityChoices(object):
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CHOICES = ((DEBUG, 'Debug'), (INFO, 'Info'), (WARNING, 'Warning'), (ERROR, 'Error'))
alert_type = models.CharField(max_length=50, db_index=True)
message = models.CharField(max_length=255)
severity = models.SmallIntegerField(choices=SeverityChoices.CHOICES)
closed = models.DateTimeField(null=True, blank=True)
# Hack: This field stays blank until alert closing.
# After closing it gets unique value to avoid unique together constraint break.
is_closed = models.CharField(blank=True, max_length=32)
acknowledged = models.BooleanField(default=False)
context = JSONField(blank=True)
content_type = models.ForeignKey(ct_models.ContentType, null=True, on_delete=models.SET_NULL)
object_id = models.PositiveIntegerField(null=True)
scope = ct_fields.GenericForeignKey('content_type', 'object_id')
objects = managers.AlertManager()
def close(self):
self.closed = timezone.now()
self.is_closed = uuid.uuid4().hex
self.save()
def acknowledge(self):
self.acknowledged = True
self.save()
def cancel_acknowledgment(self):
self.acknowledged = False
self.save()
class AlertThresholdMixin(models.Model):
"""
It is expected that model has scope field.
"""
class Meta(object):
abstract = True
threshold = models.FloatField(default=0, validators=[validators.MinValueValidator(0)])
def is_over_threshold(self):
"""
If returned value is True, alert is generated.
"""
raise NotImplementedError
@classmethod
@lru_cache(maxsize=1)
def get_all_models(cls):
from django.apps import apps
return [model for model in apps.get_models() if issubclass(model, cls)]
@classmethod
def get_checkable_objects(cls):
"""
It should return queryset of objects that should be checked.
"""
return cls.objects.all()
class EventTypesMixin(models.Model):
"""
Mixin to add a event_types and event_groups fields.
"""
class Meta(object):
abstract = True
event_types = JSONField('List of event types')
event_groups = JSONField('List of event groups', default=list)
@classmethod
@lru_cache(maxsize=1)
def get_all_models(cls):
return [model for model in apps.get_models() if issubclass(model, cls)]
class BaseHook(EventTypesMixin, UuidMixin, TimeStampedModel):
class Meta:
abstract = True
user = models.ForeignKey(settings.AUTH_USER_MODEL)
is_active = models.BooleanField(default=True)
# This timestamp would be updated periodically when event is sent via this hook
last_published = models.DateTimeField(default=timezone.now)
@property
def all_event_types(self):
self_types = set(self.event_types)
try:
hook_ct = ct_models.ContentType.objects.get_for_model(self)
base_types = SystemNotification.objects.get(hook_content_type=hook_ct)
except SystemNotification.DoesNotExist:
return self_types
else:
return self_types | set(base_types.event_types)
@classmethod
def get_active_hooks(cls):
return [obj for hook in cls.__subclasses__() for obj in hook.objects.filter(is_active=True)]
@classmethod
@lru_cache(maxsize=1)
def get_all_models(cls):
return [model for model in apps.get_models() if issubclass(model, cls)]
@classmethod
def get_all_content_types(cls):
ctypes = ct_models.ContentType.objects.get_for_models(*cls.get_all_models())
ids = [ctype.id for ctype in ctypes.values()]
return ct_models.ContentType.objects.filter(id__in=ids)
class WebHook(BaseHook):
class ContentTypeChoices(object):
JSON = 1
FORM = 2
CHOICES = ((JSON, 'json'), (FORM, 'form'))
destination_url = models.URLField()
content_type = models.SmallIntegerField(
choices=ContentTypeChoices.CHOICES,
default=ContentTypeChoices.JSON
)
def process(self, event):
logger.debug('Submitting web hook to URL %s, payload: %s', self.destination_url, event)
# encode event as JSON
if self.content_type == WebHook.ContentTypeChoices.JSON:
requests.post(self.destination_url, json=event, verify=settings.VERIFY_WEBHOOK_REQUESTS)
# encode event as form
elif self.content_type == WebHook.ContentTypeChoices.FORM:
requests.post(self.destination_url, data=event, verify=settings.VERIFY_WEBHOOK_REQUESTS)
class PushHook(BaseHook):
class Type:
IOS = 1
ANDROID = 2
CHOICES = ((IOS, 'iOS'), (ANDROID, 'Android'))
class Meta:
unique_together = 'user', 'device_id', 'type'
type = models.SmallIntegerField(choices=Type.CHOICES)
device_id = models.CharField(max_length=255, null=True, unique=True)
device_manufacturer = models.CharField(max_length=255, null=True, blank=True)
device_model = models.CharField(max_length=255, null=True, blank=True)
token = models.CharField(max_length=255, null=True, unique=True)
def process(self, event):
""" Send events as push notification via Google Cloud Messaging.
Expected settings as follows:
# https://developers.google.com/mobile/add
WALDUR_CORE['GOOGLE_API'] = {
'NOTIFICATION_TITLE': "Waldur notification",
'Android': {
'server_key': 'AIzaSyA2_7UaVIxXfKeFvxTjQNZbrzkXG9OTCkg',
},
'iOS': {
'server_key': 'AIzaSyA34zlG_y5uHOe2FmcJKwfk2vG-3RW05vk',
}
}
"""
conf = settings.WALDUR_CORE.get('GOOGLE_API') or {}
keys = conf.get(dict(self.Type.CHOICES)[self.type])
if not keys or not self.token:
return
endpoint = 'https://gcm-http.googleapis.com/gcm/send'
headers = {
'Content-Type': 'application/json',
'Authorization': 'key=%s' % keys['server_key'],
}
payload = {
'to': self.token,
'notification': {
'body': event.get('message', 'New event'),
'title': conf.get('NOTIFICATION_TITLE', 'Waldur notification'),
'image': 'icon',
},
'data': {
'event': event
},
}
if self.type == self.Type.IOS:
payload['content-available'] = '1'
logger.debug('Submitting GCM push notification with headers %s, payload: %s' % (headers, payload))
requests.post(endpoint, json=payload, headers=headers)
class EmailHook(BaseHook):
email = models.EmailField(max_length=75)
def process(self, event):
if not self.email:
logger.debug('Skipping processing of email hook (PK=%s) because email is not defined' % self.pk)
return
# Prevent mutations of event because otherwise subsequent hook processors would fail
context = event.copy()
subject = 'Notifications from Waldur'
context['timestamp'] = timestamp_to_datetime(event['timestamp'])
text_message = context['message']
html_message = render_to_string('logging/email.html', {'events': [context]})
logger.debug('Submitting email hook to %s, payload: %s', self.email, context)
send_mail(subject, text_message, settings.DEFAULT_FROM_EMAIL, [self.email], html_message=html_message)
class SystemNotification(EventTypesMixin, models.Model):
hook_content_type = models.OneToOneField(ct_models.ContentType, related_name='+')
|
|
# -*- coding: utf-8 -*-
import cPickle
import json
import logging
from rest_framework_gis import serializers as geo_serializers
from django.contrib.gis.geos import GEOSGeometry
from django.utils import timezone
from django.utils.translation import ugettext as _
from rest_framework import serializers
import validators
from jobs.models import (
ExportConfig, ExportFormat, Job, Region, RegionMask, Tag
)
from tasks.models import (
ExportRun, ExportTask, ExportTaskException, ExportTaskResult
)
try:
from collections import OrderedDict
# python 2.6
except ImportError:
from ordereddict import OrderedDict
# Get an instance of a logger
logger = logging.getLogger(__name__)
"""
class GroupSerializer(serializers.Serializer):
id = serializers.IntegerField()
name = serializers.CharField()
class UserSerializer(serializers.Serializer):
id = serializers.IntegerField()
username = serializers.CharField()
class UserGroupSerializer(serializers.Serializer):
id = serializers.IntegerField()
username = serializers.CharField()
groups = GroupSerializer(many=True)
"""
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('key', 'value', 'data_model', 'geom_types')
class SimpleExportConfigSerializer(serializers.Serializer):
uid = serializers.UUIDField(read_only=True)
name = serializers.CharField()
config_type = serializers.CharField()
filename = serializers.CharField()
published = serializers.BooleanField()
created = serializers.SerializerMethodField()
url = serializers.HyperlinkedIdentityField(
view_name='api:configs-detail',
lookup_field='uid'
)
def get_created(self, obj):
return obj.created_at
class ExportConfigSerializer(serializers.Serializer):
uid = serializers.UUIDField(read_only=True)
url = serializers.HyperlinkedIdentityField(
view_name='api:configs-detail',
lookup_field='uid'
)
name = serializers.CharField(max_length=255)
config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION', 'TRANSFORM'])
filename = serializers.CharField(max_length=255, read_only=True, default='')
size = serializers.SerializerMethodField()
content_type = serializers.CharField(max_length=50, read_only=True)
upload = serializers.FileField(allow_empty_file=False, max_length=100)
published = serializers.BooleanField()
created = serializers.SerializerMethodField()
owner = serializers.SerializerMethodField(read_only=True)
user = serializers.HiddenField(
default=serializers.CurrentUserDefault()
)
def create(self, validated_data):
logger.debug(validated_data)
return ExportConfig.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.config_type = validated_data.get('config_type', instance.config_type)
instance.upload.delete(False) # delete the old file..
instance.upload = validated_data.get('upload', instance.upload)
instance.name = validated_data.get('name', instance.name)
instance.filename = validated_data.get('filename', instance.filename)
instance.content_type = validated_data.get('content_type', instance.content_type)
instance.updated_at = timezone.now()
instance.save()
return instance
def validate(self, data):
logger.debug(data)
upload = data['upload']
config_type = data['config_type']
content_type = validators.validate_content_type(upload, config_type)
data['content_type'] = content_type
fname = data['upload'].name
data['filename'] = fname.replace(' ', '_').lower()
return data
def get_size(self, obj):
size = obj.upload.size
return size
def get_created(self, obj):
return obj.created_at
def get_owner(self, obj):
return obj.user.username
class ExportTaskResultSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
size = serializers.SerializerMethodField()
class Meta:
model = ExportTaskResult
fields = ('filename', 'size', 'url',)
def get_url(self, obj):
request = self.context['request']
return request.build_absolute_uri(obj.download_url)
def get_size(self, obj):
return "{0:.3f} MB".format(obj.size)
class ExportTaskExceptionSerializer(serializers.ModelSerializer):
exception = serializers.SerializerMethodField()
class Meta:
model = ExportTaskException
fields = ('exception',)
def get_exception(self, obj):
exc_info = cPickle.loads(str(obj.exception)).exc_info
return str(exc_info[1])
class ExportTaskSerializer(serializers.ModelSerializer):
result = serializers.SerializerMethodField()
errors = serializers.SerializerMethodField()
started_at = serializers.SerializerMethodField()
finished_at = serializers.SerializerMethodField()
duration = serializers.SerializerMethodField()
url = serializers.HyperlinkedIdentityField(
view_name='api:tasks-detail',
lookup_field='uid'
)
class Meta:
model = ExportTask
fields = ('uid', 'url', 'name', 'status', 'started_at', 'finished_at', 'duration', 'result', 'errors',)
def get_result(self, obj):
try:
result = obj.result
serializer = ExportTaskResultSerializer(result, many=False, context=self.context)
return serializer.data
except ExportTaskResult.DoesNotExist as e:
return None # no result yet
def get_errors(self, obj):
try:
errors = obj.exceptions
serializer = ExportTaskExceptionSerializer(errors, many=True, context=self.context)
return serializer.data
except ExportTaskException.DoesNotExist as e:
return None
def get_started_at(self, obj):
if (not obj.started_at):
return None # not started yet
else:
return obj.started_at
def get_finished_at(self, obj):
if (not obj.finished_at):
return None # not finished yet
else:
return obj.finished_at
def get_duration(self, obj):
started = obj.started_at
finished = obj.finished_at
if started and finished:
return str(finished - started)
else:
return None # can't compute yet
class SimpleJobSerializer(serializers.Serializer):
uid = serializers.SerializerMethodField()
name = serializers.CharField()
description = serializers.CharField()
url = serializers.HyperlinkedIdentityField(
view_name='api:jobs-detail',
lookup_field='uid'
)
extent = serializers.SerializerMethodField()
def get_uid(self, obj):
return obj.uid
def get_extent(self, obj):
uid = str(obj.uid)
name = obj.name
geom = obj.the_geom
geometry = json.loads(GEOSGeometry(geom).geojson)
feature = OrderedDict()
feature['type'] = 'Feature'
feature['properties'] = {'uid': uid, 'name': name}
feature['geometry'] = geometry
return feature
class ExportRunSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='api:runs-detail',
lookup_field='uid'
)
job = SimpleJobSerializer()
tasks = ExportTaskSerializer(many=True)
finished_at = serializers.SerializerMethodField()
duration = serializers.SerializerMethodField()
user = serializers.SerializerMethodField()
class Meta:
model = ExportRun
fields = ('uid', 'url', 'started_at', 'finished_at', 'duration', 'user', 'status', 'job', 'tasks')
def get_finished_at(self, obj):
if (not obj.finished_at):
return {}
else:
return obj.finished_at
def get_duration(self, obj):
started = obj.started_at
finished = obj.finished_at
if started and finished:
return str(finished - started)
else:
return None
def get_user(self, obj):
return obj.user.username
class UserSerializer(serializers.Serializer):
id = serializers.IntegerField()
class RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):
"""
Returns a GeoJSON representation of the region mask.
"""
class Meta:
model = RegionMask
geo_field = 'the_geom'
fields = ('the_geom',)
class RegionSerializer(geo_serializers.GeoFeatureModelSerializer):
"""
Serializer returning GeoJSON representation of Regions.
"""
url = serializers.HyperlinkedIdentityField(
view_name='api:regions-detail',
lookup_field='uid'
)
id = serializers.SerializerMethodField()
class Meta:
model = Region
geo_field = 'the_geom'
fields = ('id', 'uid', 'name', 'description', 'url', 'the_geom')
def get_id(self, obj):
return obj.uid
class SimpleRegionSerializer(serializers.ModelSerializer):
"""
Serializer for returning Region data without geometry.
"""
url = serializers.HyperlinkedIdentityField(
view_name='api:regions-detail',
lookup_field='uid'
)
class Meta:
model = Region
fields = ('uid', 'name', 'description', 'url')
class ExportFormatSerializer(serializers.ModelSerializer):
"""
Representation of ExportFormat.
"""
url = serializers.HyperlinkedIdentityField(
view_name='api:formats-detail',
lookup_field='slug'
)
class Meta:
model = ExportFormat
fields = ('uid', 'url', 'slug', 'name', 'description')
class JobSerializer(serializers.Serializer):
"""
Job Serializer.
"""
"""
Would prefer if these were loaded at runtime,
but the MultipleChoiceField loads data from non-test db during tests.
"""
EXPORT_FORMAT_CHOICES = (
('shp', 'Shapefile Format'),
('obf', 'OBF Format'),
('kml', 'KML Format'),
('garmin', 'Garmin Format'),
('sqlite', 'SQLITE Format'),
('thematic', 'Thematic Shapefile Format')
)
formats = serializers.MultipleChoiceField(
choices=EXPORT_FORMAT_CHOICES,
allow_blank=False,
write_only=True,
error_messages={
'invalid_choice': _("invalid export format."),
'not_a_list': _('Expected a list of items but got type "{input_type}".')
}
)
uid = serializers.UUIDField(read_only=True)
url = serializers.HyperlinkedIdentityField(
view_name='api:jobs-detail',
lookup_field='uid'
)
name = serializers.CharField(
max_length=100,
)
description = serializers.CharField(
max_length=255,
)
event = serializers.CharField(
max_length=100,
allow_blank=True
)
created_at = serializers.DateTimeField(read_only=True)
owner = serializers.SerializerMethodField(read_only=True)
exports = serializers.SerializerMethodField()
configurations = serializers.SerializerMethodField()
published = serializers.BooleanField(required=False)
feature_save = serializers.BooleanField(required=False)
feature_pub = serializers.BooleanField(required=False)
# configs = ExportConfigSerializer(many=True)
xmin = serializers.FloatField(
max_value=180, min_value=-180, write_only=True,
error_messages={
'required': _('xmin is required.'),
'invalid': _('invalid xmin value.'),
}
)
ymin = serializers.FloatField(
max_value=90, min_value=-90, write_only=True,
error_messages={
'required': _('ymin is required.'),
'invalid': _('invalid ymin value.'),
}
)
xmax = serializers.FloatField(
max_value=180, min_value=-180, write_only=True,
error_messages={
'required': _('xmax is required.'),
'invalid': _('invalid xmax value.'),
}
)
ymax = serializers.FloatField(
max_value=90, min_value=-90, write_only=True,
error_messages={
'required': _('ymax is required.'),
'invalid': _('invalid ymax value.'),
}
)
region = SimpleRegionSerializer(read_only=True)
extent = serializers.SerializerMethodField(read_only=True)
user = serializers.HiddenField(
default=serializers.CurrentUserDefault()
)
tags = serializers.SerializerMethodField()
def create(self, validated_data):
return Job.objects.create(**validated_data)
def update(self, instance, validated_data):
pass
def validate(self, data):
user = data['user']
validators.validate_formats(data)
extents = validators.validate_bbox_params(data)
bbox = validators.validate_bbox(extents, user=user)
the_geom = GEOSGeometry(bbox, srid=4326)
data['the_geom'] = the_geom
regions = Region.objects.filter(the_geom__intersects=the_geom).intersection(the_geom, field_name='the_geom')
sorted_regions = sorted(regions.all(), key=lambda a: a.intersection.area, reverse=True) # order by largest area of intersection
data['region'] = validators.validate_region(sorted_regions)
# remove unwanted fields
data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'), data.pop('formats')
return data
def get_extent(self, obj):
uid = str(obj.uid)
name = obj.name
geom = obj.the_geom
geometry = json.loads(GEOSGeometry(geom).geojson)
feature = OrderedDict()
feature['type'] = 'Feature'
feature['properties'] = {'uid': uid, 'name': name}
feature['geometry'] = geometry
return feature
def get_exports(self, obj):
formats = [format for format in obj.formats.all()]
serializer = ExportFormatSerializer(formats, many=True, context={'request': self.context['request']})
return serializer.data
def get_configurations(self, obj):
configs = obj.configs.all()
serializer = SimpleExportConfigSerializer(configs, many=True,
context={'request': self.context['request']})
return serializer.data
def get_tags(self, obj):
tags = obj.tags.all()
serializer = TagSerializer(tags, many=True)
return serializer.data
def get_owner(self, obj):
return obj.user.username
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 RedHat Inc.
# Copyright 2018 Verizon Wireless
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all storage backends"""
from functools import wraps
import logging
from oslo_config import cfg
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import units
import six
from glance_store import capabilities
from glance_store import exceptions
from glance_store.i18n import _
LOG = logging.getLogger(__name__)
_MULTI_BACKEND_OPTS = [
cfg.StrOpt('store_description',
help=_("""
This option will be used to provide a constructive information about
the store backend to end users. Using /v2/stores-info call user can
seek more information on all available backends.
"""))
]
class Store(capabilities.StoreCapability):
OPTIONS = None
MULTI_BACKEND_OPTIONS = _MULTI_BACKEND_OPTS
READ_CHUNKSIZE = 4 * units.Mi # 4M
WRITE_CHUNKSIZE = READ_CHUNKSIZE
def __init__(self, conf, backend=None):
"""
Initialize the Store
"""
super(Store, self).__init__()
self.conf = conf
self.backend_group = backend
self.store_location_class = None
self._url_prefix = None
try:
if self.OPTIONS is not None:
group = 'glance_store'
if self.backend_group:
group = self.backend_group
if self.MULTI_BACKEND_OPTIONS is not None:
self.conf.register_opts(
self.MULTI_BACKEND_OPTIONS, group=group)
self.conf.register_opts(self.OPTIONS, group=group)
except cfg.DuplicateOptError:
pass
@property
def url_prefix(self):
return self._url_prefix
def configure(self, re_raise_bsc=False):
"""
Configure the store to use the stored configuration options
and initialize capabilities based on current configuration.
Any store that needs special configuration should implement
this method.
"""
try:
self.configure_add()
except exceptions.BadStoreConfiguration as e:
self.unset_capabilities(capabilities.BitMasks.WRITE_ACCESS)
msg = (_(u"Failed to configure store correctly: %s "
"Disabling add method.")
% encodeutils.exception_to_unicode(e))
LOG.warning(msg)
if re_raise_bsc:
raise
finally:
self.update_capabilities()
def get_schemes(self):
"""
Returns a tuple of schemes which this store can handle.
"""
raise NotImplementedError
def get_store_location_class(self):
"""
Returns the store location class that is used by this store.
"""
if not self.store_location_class:
class_name = "%s.StoreLocation" % (self.__module__)
LOG.debug("Late loading location class %s", class_name)
self.store_location_class = importutils.import_class(class_name)
return self.store_location_class
def configure_add(self):
"""
This is like `configure` except that it's specifically for
configuring the store to accept objects.
If the store was not able to successfully configure
itself, it should raise `exceptions.BadStoreConfiguration`.
"""
# NOTE(flaper87): This should probably go away
@capabilities.check
def get(self, location, offset=0, chunk_size=None, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: `glance.exceptions.NotFound` if image does not exist
"""
raise NotImplementedError
def get_size(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns the size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: `glance_store.exceptions.NotFound` if image does not exist
"""
raise NotImplementedError
# NOTE(rosmaita): use the @glance_store.driver.back_compat_add
# annotation on implementions for backward compatibility with
# pre-0.26.0 add(). Need backcompat because pre-0.26.0 returned
# a 4 tuple, this returns a 5-tuple
@capabilities.check
def add(self, image_id, image_file, image_size, hashing_algo,
context=None, verifier=None):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:param hashing_algo: A hashlib algorithm identifier (string)
:param context: A context object
:param verifier: An object used to verify signatures for images
:returns: tuple of: (1) URL in backing store, (2) bytes written,
(3) checksum, (4) multihash value, and (5) a dictionary
with storage system specific information
:raises: `glance_store.exceptions.Duplicate` if the image already
exists
"""
raise NotImplementedError
@capabilities.check
def delete(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file to delete
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: `glance_store.exceptions.NotFound` if image does not exist
"""
raise NotImplementedError
def set_acls(self, location, public=False, read_tenants=None,
write_tenants=None, context=None):
"""
Sets the read and write access control list for an image in the
backend store.
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:param public: A boolean indicating whether the image should be public.
:param read_tenants: A list of tenant strings which should be granted
read access for an image.
:param write_tenants: A list of tenant strings which should be granted
write access for an image.
"""
raise NotImplementedError
def back_compat_add(store_add_fun):
"""
Provides backward compatibility for the 0.26.0+ Store.add() function.
In 0.26.0, the 'hashing_algo' parameter is introduced and Store.add()
returns a 5-tuple containing a computed 'multihash' value.
This wrapper behaves as follows:
If no hashing_algo identifier is supplied as an argument, the response
is the pre-0.26.0 4-tuple of::
(backend_url, bytes_written, checksum, metadata_dict)
If a hashing_algo is supplied, the response is a 5-tuple::
(backend_url, bytes_written, checksum, multihash, metadata_dict)
The wrapper detects the presence of a 'hashing_algo' argument both
by examining named arguments and positionally.
"""
@wraps(store_add_fun)
def add_adapter(*args, **kwargs):
"""
Wrapper for the store 'add' function. If no hashing_algo identifier
is supplied, the response is the pre-0.25.0 4-tuple of::
(backend_url, bytes_written, checksum, metadata_dict)
If a hashing_algo is supplied, the response is a 5-tuple::
(backend_url, bytes_written, checksum, multihash, metadata_dict)
"""
# strategy: assume this until we determine otherwise
back_compat_required = True
# specify info about 0.26.0 Store.add() call (can't introspect
# this because the add method is wrapped by the capabilities
# check)
p_algo = 4
max_args = 7
num_args = len(args)
num_kwargs = len(kwargs)
if num_args + num_kwargs == max_args:
# everything is present, including hashing_algo
back_compat_required = False
elif ('hashing_algo' in kwargs or
(num_args >= p_algo + 1 and isinstance(args[p_algo],
six.string_types))):
# there is a hashing_algo argument present
back_compat_required = False
else:
# this is a pre-0.26.0-style call, so let's figure out
# whether to insert the hashing_algo in the args or kwargs
if kwargs and 'image_' in ''.join(kwargs):
# if any of the image_* is named, everything after it
# must be named as well, so slap the algo into kwargs
kwargs['hashing_algo'] = 'md5'
else:
args = args[:p_algo] + ('md5',) + args[p_algo:]
# business time
(backend_url,
bytes_written,
checksum,
multihash,
metadata_dict) = store_add_fun(*args, **kwargs)
if back_compat_required:
return (backend_url, bytes_written, checksum, metadata_dict)
return (backend_url, bytes_written, checksum, multihash,
metadata_dict)
return add_adapter
|
|
import logging
import sys
import unittest
try:
from urllib.parse import urlencode
except:
from urllib import urlencode
from urllib3 import encode_multipart_formdata, HTTPConnectionPool
from urllib3.exceptions import (
EmptyPoolError,
DecodeError,
MaxRetryError,
TimeoutError,
)
from urllib3.packages.six import u
from socket import timeout as SocketTimeout
from dummyserver.testcase import HTTPDummyServerTestCase
log = logging.getLogger('urllib3.connectionpool')
log.setLevel(logging.NOTSET)
log.addHandler(logging.StreamHandler(sys.stdout))
class TestConnectionPool(HTTPDummyServerTestCase):
def setUp(self):
self.pool = HTTPConnectionPool(self.host, self.port)
def test_get(self):
r = self.pool.request('GET', '/specific_method',
fields={'method': 'GET'})
self.assertEqual(r.status, 200, r.data)
def test_post_url(self):
r = self.pool.request('POST', '/specific_method',
fields={'method': 'POST'})
self.assertEqual(r.status, 200, r.data)
def test_urlopen_put(self):
r = self.pool.urlopen('PUT', '/specific_method?method=PUT')
self.assertEqual(r.status, 200, r.data)
def test_wrong_specific_method(self):
# To make sure the dummy server is actually returning failed responses
r = self.pool.request('GET', '/specific_method',
fields={'method': 'POST'})
self.assertEqual(r.status, 400, r.data)
r = self.pool.request('POST', '/specific_method',
fields={'method': 'GET'})
self.assertEqual(r.status, 400, r.data)
def test_upload(self):
data = "I'm in ur multipart form-data, hazing a cheezburgr"
fields = {
'upload_param': 'filefield',
'upload_filename': 'lolcat.txt',
'upload_size': len(data),
'filefield': ('lolcat.txt', data),
}
r = self.pool.request('POST', '/upload', fields=fields)
self.assertEqual(r.status, 200, r.data)
def test_one_name_multiple_values(self):
fields = [
('foo', 'a'),
('foo', 'b'),
]
# urlencode
r = self.pool.request('GET', '/echo', fields=fields)
self.assertEqual(r.data, b'foo=a&foo=b')
# multipart
r = self.pool.request('POST', '/echo', fields=fields)
self.assertEqual(r.data.count(b'name="foo"'), 2)
def test_unicode_upload(self):
fieldname = u('myfile')
filename = u('\xe2\x99\xa5.txt')
data = u('\xe2\x99\xa5').encode('utf8')
size = len(data)
fields = {
u('upload_param'): fieldname,
u('upload_filename'): filename,
u('upload_size'): size,
fieldname: (filename, data),
}
r = self.pool.request('POST', '/upload', fields=fields)
self.assertEqual(r.status, 200, r.data)
def test_timeout(self):
url = '/sleep?seconds=0.005'
timeout = 0.001
# Pool-global timeout
pool = HTTPConnectionPool(self.host, self.port, timeout=timeout)
conn = pool._get_conn()
self.assertRaises(SocketTimeout, pool._make_request,
conn, 'GET', url)
pool._put_conn(conn)
self.assertRaises(TimeoutError, pool.request, 'GET', url)
# Request-specific timeout
pool = HTTPConnectionPool(self.host, self.port, timeout=0.5)
conn = pool._get_conn()
self.assertRaises(SocketTimeout, pool._make_request,
conn, 'GET', url, timeout=timeout)
pool._put_conn(conn)
self.assertRaises(TimeoutError, pool.request,
'GET', url, timeout=timeout)
def test_redirect(self):
r = self.pool.request('GET', '/redirect', fields={'target': '/'}, redirect=False)
self.assertEqual(r.status, 303)
r = self.pool.request('GET', '/redirect', fields={'target': '/'})
self.assertEqual(r.status, 200)
self.assertEqual(r.data, b'Dummy server!')
def test_maxretry(self):
try:
self.pool.request('GET', '/redirect',
fields={'target': '/'},
retries=0)
self.fail("Failed to raise MaxRetryError exception")
except MaxRetryError:
pass
def test_keepalive(self):
pool = HTTPConnectionPool(self.host, self.port, block=True, maxsize=1)
r = pool.request('GET', '/keepalive?close=0')
r = pool.request('GET', '/keepalive?close=0')
self.assertEqual(r.status, 200)
self.assertEqual(pool.num_connections, 1)
self.assertEqual(pool.num_requests, 2)
def test_keepalive_close(self):
# NOTE: This used to run against apache.org but it made the test suite
# really slow and fail half the time. Setting it to skip until we can
# make this run better locally.
pool = HTTPConnectionPool(self.host, self.port,
block=True, maxsize=1, timeout=2)
r = pool.request('GET', '/keepalive?close=1', retries=0,
headers={
"Connection": "close",
})
self.assertEqual(pool.num_connections, 1)
# The dummyserver will have responded with Connection:close,
# and httplib will properly cleanup the socket.
# We grab the HTTPConnection object straight from the Queue,
# because _get_conn() is where the check & reset occurs
# pylint: disable-msg=W0212
conn = pool.pool.get()
self.assertEqual(conn.sock, None)
pool._put_conn(conn)
# Now with keep-alive
r = pool.request('GET', '/keepalive?close=0', retries=0,
headers={
"Connection": "keep-alive",
})
# The dummyserver responded with Connection:keep-alive, the connection
# persists.
conn = pool.pool.get()
self.assertNotEqual(conn.sock, None)
pool._put_conn(conn)
# Another request asking the server to close the connection. This one
# should get cleaned up for the next request.
r = pool.request('GET', '/keepalive?close=1', retries=0,
headers={
"Connection": "close",
})
self.assertEqual(r.status, 200)
conn = pool.pool.get()
self.assertEqual(conn.sock, None)
pool._put_conn(conn)
# Next request
r = pool.request('GET', '/keepalive?close=0')
def test_post_with_urlencode(self):
data = {'banana': 'hammock', 'lol': 'cat'}
r = self.pool.request('POST', '/echo', fields=data, encode_multipart=False)
self.assertEqual(r.data.decode('utf-8'), urlencode(data))
def test_post_with_multipart(self):
data = {'banana': 'hammock', 'lol': 'cat'}
r = self.pool.request('POST', '/echo',
fields=data,
encode_multipart=True)
body = r.data.split(b'\r\n')
encoded_data = encode_multipart_formdata(data)[0]
expected_body = encoded_data.split(b'\r\n')
# TODO: Get rid of extra parsing stuff when you can specify
# a custom boundary to encode_multipart_formdata
"""
We need to loop the return lines because a timestamp is attached
from within encode_multipart_formdata. When the server echos back
the data, it has the timestamp from when the data was encoded, which
is not equivalent to when we run encode_multipart_formdata on
the data again.
"""
for i, line in enumerate(body):
if line.startswith(b'--'):
continue
self.assertEqual(body[i], expected_body[i])
def test_check_gzip(self):
r = self.pool.request('GET', '/encodingrequest',
headers={'accept-encoding': 'gzip'})
self.assertEqual(r.headers.get('content-encoding'), 'gzip')
self.assertEqual(r.data, b'hello, world!')
def test_check_deflate(self):
r = self.pool.request('GET', '/encodingrequest',
headers={'accept-encoding': 'deflate'})
self.assertEqual(r.headers.get('content-encoding'), 'deflate')
self.assertEqual(r.data, b'hello, world!')
def test_bad_decode(self):
self.assertRaises(DecodeError, self.pool.request,
'GET', '/encodingrequest',
headers={'accept-encoding': 'garbage-deflate'})
self.assertRaises(DecodeError, self.pool.request,
'GET', '/encodingrequest',
headers={'accept-encoding': 'garbage-gzip'})
def test_connection_count(self):
pool = HTTPConnectionPool(self.host, self.port, maxsize=1)
pool.request('GET', '/')
pool.request('GET', '/')
pool.request('GET', '/')
self.assertEqual(pool.num_connections, 1)
self.assertEqual(pool.num_requests, 3)
def test_connection_count_bigpool(self):
http_pool = HTTPConnectionPool(self.host, self.port, maxsize=16)
http_pool.request('GET', '/')
http_pool.request('GET', '/')
http_pool.request('GET', '/')
self.assertEqual(http_pool.num_connections, 1)
self.assertEqual(http_pool.num_requests, 3)
def test_partial_response(self):
pool = HTTPConnectionPool(self.host, self.port, maxsize=1)
req_data = {'lol': 'cat'}
resp_data = urlencode(req_data).encode('utf-8')
r = pool.request('GET', '/echo', fields=req_data, preload_content=False)
self.assertEqual(r.read(5), resp_data[:5])
self.assertEqual(r.read(), resp_data[5:])
def test_lazy_load_twice(self):
# This test is sad and confusing. Need to figure out what's
# going on with partial reads and socket reuse.
pool = HTTPConnectionPool(self.host, self.port, block=True, maxsize=1, timeout=2)
payload_size = 1024 * 2
first_chunk = 512
boundary = 'foo'
req_data = {'count': 'a' * payload_size}
resp_data = encode_multipart_formdata(req_data, boundary=boundary)[0]
req2_data = {'count': 'b' * payload_size}
resp2_data = encode_multipart_formdata(req2_data, boundary=boundary)[0]
r1 = pool.request('POST', '/echo', fields=req_data, multipart_boundary=boundary, preload_content=False)
self.assertEqual(r1.read(first_chunk), resp_data[:first_chunk])
try:
r2 = pool.request('POST', '/echo', fields=req2_data, multipart_boundary=boundary,
preload_content=False, pool_timeout=0.001)
# This branch should generally bail here, but maybe someday it will
# work? Perhaps by some sort of magic. Consider it a TODO.
self.assertEqual(r2.read(first_chunk), resp2_data[:first_chunk])
self.assertEqual(r1.read(), resp_data[first_chunk:])
self.assertEqual(r2.read(), resp2_data[first_chunk:])
self.assertEqual(pool.num_requests, 2)
except EmptyPoolError:
self.assertEqual(r1.read(), resp_data[first_chunk:])
self.assertEqual(pool.num_requests, 1)
self.assertEqual(pool.num_connections, 1)
def test_for_double_release(self):
MAXSIZE=5
# Check default state
pool = HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE)
self.assertEqual(pool.num_connections, 0)
self.assertEqual(pool.pool.qsize(), MAXSIZE)
# Make an empty slot for testing
pool.pool.get()
self.assertEqual(pool.pool.qsize(), MAXSIZE-1)
# Check state after simple request
pool.urlopen('GET', '/')
self.assertEqual(pool.pool.qsize(), MAXSIZE-1)
# Check state without release
pool.urlopen('GET', '/', preload_content=False)
self.assertEqual(pool.pool.qsize(), MAXSIZE-2)
pool.urlopen('GET', '/')
self.assertEqual(pool.pool.qsize(), MAXSIZE-2)
# Check state after read
pool.urlopen('GET', '/').data
self.assertEqual(pool.pool.qsize(), MAXSIZE-2)
pool.urlopen('GET', '/')
self.assertEqual(pool.pool.qsize(), MAXSIZE-2)
def test_release_conn_parameter(self):
MAXSIZE=5
pool = HTTPConnectionPool(self.host, self.port, maxsize=MAXSIZE)
self.assertEqual(pool.pool.qsize(), MAXSIZE)
# Make request without releasing connection
pool.request('GET', '/', release_conn=False, preload_content=False)
self.assertEqual(pool.pool.qsize(), MAXSIZE-1)
## FIXME: This borks on OSX because sockets on invalid hosts refuse to timeout. :(
#def test_dns_error(self):
# pool = HTTPConnectionPool('thishostdoesnotexist.invalid', self.port, timeout=0.001)
#
# with self.assertRaises(MaxRetryError):
# pool.request('GET', '/test', retries=2)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2010,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the search network device command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestSearchNetworkDevice(TestBrokerCommand):
def testwithinterfacecsv(self):
command = ["search_network_device", "--network_device=ut3gd1r06.aqd-unittest.ms.com",
"--format=csv"]
out = self.commandtest(command)
ip = self.net["ut_net_mgmt"].usable[4]
self.matchoutput(out,
"ut3gd1r06.aqd-unittest.ms.com,%s,tor,ut3,ut,"
"generic,temp_switch,,xge49,%s" % (ip, ip.mac),
command)
def testwithoutinterfacecsv(self):
command = ["search_network_device", "--network_device=ut3gd1r01.aqd-unittest.ms.com",
"--format=csv"]
out = self.commandtest(command)
ip = self.net["tor_net_12"].usable[0]
self.matchoutput(out,
"ut3gd1r01.aqd-unittest.ms.com,%s,bor,ut3,ut,"
"hp,uttorswitch,SNgd1r01,xge49," % ip,
command)
def testbuilding(self):
command = ["search_network_device", "--building=ut"]
out = self.commandtest(command)
self.matchoutput(out, "ut3gd1r05.aqd-unittest.ms.com", command)
self.matchoutput(out, "ut3gd1r06.aqd-unittest.ms.com", command)
self.matchoutput(out, "switchinbuilding.aqd-unittest.ms.com", command)
def testbuildingexact(self):
command = ["search_network_device", "--building=ut", "--exact_location"]
out = self.commandtest(command)
self.matchoutput(out, "switchinbuilding.aqd-unittest.ms.com", command)
self.matchclean(out, "ut3gd1r05", command)
self.matchclean(out, "ut3gd1r06", command)
def testcityexact(self):
command = ["search_network_device", "--city=ny", "--exact_location"]
self.noouttest(command)
def testrack(self):
command = ["search_network_device", "--rack=ut4"]
out = self.commandtest(command)
self.matchoutput(out, "ut3gd1r05.aqd-unittest.ms.com", command)
self.matchclean(out, "ut3gd1r06.aqd-unittest.ms.com", command)
def testmodel(self):
command = ["search_network_device", "--model=uttorswitch"]
out = self.commandtest(command)
self.matchoutput(out, "ut3gd1r04.aqd-unittest.ms.com", command)
self.matchoutput(out, "ut3gd1r05.aqd-unittest.ms.com", command)
self.matchclean(out, "ut3gd1r06.aqd-unittest.ms.com", command)
def testvendor(self):
command = ["search_network_device", "--vendor=hp"]
out = self.commandtest(command)
self.matchoutput(out, "ut3gd1r04.aqd-unittest.ms.com", command)
self.matchoutput(out, "ut3gd1r05.aqd-unittest.ms.com", command)
self.matchclean(out, "ut3gd1r06.aqd-unittest.ms.com", command)
def testtype(self):
command = ["search_network_device", "--type=bor"]
out = self.commandtest(command)
self.matchoutput(out, "ut3gd1r04.aqd-unittest.ms.com", command)
self.matchclean(out, "ut3gd1r05.aqd-unittest.ms.com", command)
self.matchclean(out, "ut3gd1r06.aqd-unittest.ms.com", command)
def testserial(self):
command = ["search_network_device", "--serial=SNgd1r05_new"]
out = self.commandtest(command)
self.matchoutput(out, "ut3gd1r05.aqd-unittest.ms.com", command)
self.matchclean(out, "ut3gd1r04.aqd-unittest.ms.com", command)
def testserialandfullinfo(self):
command = ["search_network_device", "--serial=SNgd1r05_new", "--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Switch: ut3gd1r05", command)
self.matchclean(out, "ut3gd1r04", command)
def testfullinfocsv(self):
command = ["search_network_device", "--serial=SNgd1r05_new", "--fullinfo",
"--format=csv"]
out = self.commandtest(command)
ip = self.net["ut_net_mgmt"].usable[0]
self.matchoutput(out,
"ut3gd1r05.aqd-unittest.ms.com,%s,tor,ut4,ut,"
"hp,uttorswitch,SNgd1r05_new,xge49," % ip,
command)
def testsearchswitchall(self):
command = ["search_network_device", "--all", "--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Switch: ut3gd1r01", command)
self.matchoutput(out,
"Primary Name: ut3gd1r01.aqd-unittest.ms.com [%s]" %
self.net["tor_net_12"].usable[0],
command)
self.matchoutput(out, "Switch Type: bor", command)
self.matchoutput(out, "Rack: ut3", command)
self.matchoutput(out, "Vendor: hp Model: uttorswitch", command)
self.matchoutput(out, "Serial: SNgd1r01", command)
self.matchoutput(out, "Switch: ut3gd1r04", command)
self.matchoutput(out,
"Primary Name: ut3gd1r04.aqd-unittest.ms.com [%s]" %
self.net["ut10_eth1"].usable[1],
command)
self.matchoutput(out, "Switch Type: tor", command)
def testsearchswitchswitch(self):
command = ["search_network_device", "--network_device=ut3gd1r04.aqd-unittest.ms.com",
"--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Switch: ut3gd1r04", command)
self.matchoutput(out,
"Primary Name: ut3gd1r04.aqd-unittest.ms.com [%s]" %
self.net["ut10_eth1"].usable[1],
command)
self.matchoutput(out, "Switch Type: bor", command)
self.matchoutput(out, "Rack: ut3", command)
self.matchoutput(out, "Vendor: hp Model: uttorswitch", command)
def testsearchswitchallcsv(self):
command = ["search_network_device", "--all", "--format=csv"]
out = self.commandtest(command)
ip = self.net["ut_net_mgmt"].usable[4]
self.matchoutput(out,
"ut3gd1r06.aqd-unittest.ms.com,%s,tor,ut3,ut,"
"generic,temp_switch,,xge49,%s" % (ip, ip.mac),
command)
ip = self.net["tor_net_12"].usable[0]
self.matchoutput(out,
"ut3gd1r01.aqd-unittest.ms.com,%s,bor,ut3,ut,"
"hp,uttorswitch,SNgd1r01,xge49," % ip,
command)
def testsearchswitchip(self):
ip = self.net["tor_net_0"].usable[0]
command = ["search_network_device", "--ip=%s" % ip]
out = self.commandtest(command)
self.matchoutput(out, "ut01ga1s02.aqd-unittest.ms.com", command)
def testsearchswitchipfullinfo(self):
ip = self.net["tor_net_0"].usable[0]
command = ["search_network_device", "--ip=%s" % ip, "--fullinfo"]
out = self.commandtest(command)
self.matchoutput(out, "Switch: ut01ga1s02", command)
self.matchoutput(out,
"Primary Name: ut01ga1s02.aqd-unittest.ms.com"
" [%s]" % ip, command)
self.matchoutput(out, "Switch Type: tor", command)
out = self.commandtest(command)
def testsearchswitchipcsv(self):
ip = self.net["tor_net_0"].usable[0]
command = ["search_network_device", "--ip=%s" % ip, "--format=csv"]
out = self.commandtest(command)
self.matchoutput(out, "ut01ga1s02.aqd-unittest.ms.com,%s,tor,ut8"
",ut,bnt,rs g8000,,xge49,%s" % (ip, ip.mac),
command)
def testsearchvlan(self):
command = ["search_network_device", "--vlan", "701"]
out = self.commandtest(command)
self.matchoutput(out, "ut01ga2s01.aqd-unittest.ms.com", command)
self.matchoutput(out, "ut01ga2s02.aqd-unittest.ms.com", command)
self.matchoutput(out, "ut01ga2s03.aqd-unittest.ms.com", command)
self.matchoutput(out, "ut01ga2s04.aqd-unittest.ms.com", command)
# Not a ToR switch
self.matchclean(out, "ut3gd1r01.aqd-unittest.ms.com", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSearchNetworkDevice)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Authors: Jean-Emile DARTOIS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from oslo_log import log
from oslo_utils import timeutils
from watcher._i18n import _
from watcher.common import clients
from watcher.common import exception
from watcher.decision_engine.datasources import base
LOG = log.getLogger(__name__)
try:
from ceilometerclient import exc
HAS_CEILCLIENT = True
except ImportError:
HAS_CEILCLIENT = False
class CeilometerHelper(base.DataSourceBase):
NAME = 'ceilometer'
METRIC_MAP = dict(host_cpu_usage='compute.node.cpu.percent',
host_ram_usage='hardware.memory.used',
host_outlet_temp='hardware.ipmi.node.outlet_temperature',
host_inlet_temp='hardware.ipmi.node.temperature',
host_airflow='hardware.ipmi.node.airflow',
host_power='hardware.ipmi.node.power',
instance_cpu_usage='cpu_util',
instance_ram_usage='memory.resident',
instance_ram_allocated='memory',
instance_l3_cache_usage='cpu_l3_cache',
instance_root_disk_size='disk.root.size',
)
def __init__(self, osc=None):
""":param osc: an OpenStackClients instance"""
self.osc = osc if osc else clients.OpenStackClients()
self.ceilometer = self.osc.ceilometer()
LOG.warning("Ceilometer API is deprecated and Ceilometer Datasource "
"module is no longer maintained. We recommend to use "
"Gnocchi instead.")
@staticmethod
def format_query(user_id, tenant_id, resource_id,
user_ids, tenant_ids, resource_ids):
query = []
def query_append(query, _id, _ids, field):
if _id:
_ids = [_id]
for x_id in _ids:
query.append({"field": field, "op": "eq", "value": x_id})
query_append(query, user_id, (user_ids or []), "user_id")
query_append(query, tenant_id, (tenant_ids or []), "project_id")
query_append(query, resource_id, (resource_ids or []), "resource_id")
return query
def _timestamps(self, start_time, end_time):
def _format_timestamp(_time):
if _time:
if isinstance(_time, datetime.datetime):
return _time.isoformat()
return _time
return None
start_timestamp = _format_timestamp(start_time)
end_timestamp = _format_timestamp(end_time)
if ((start_timestamp is not None) and (end_timestamp is not None) and
(timeutils.parse_isotime(start_timestamp) >
timeutils.parse_isotime(end_timestamp))):
raise exception.Invalid(
_("Invalid query: %(start_time)s > %(end_time)s") % dict(
start_time=start_timestamp, end_time=end_timestamp))
return start_timestamp, end_timestamp
def build_query(self, user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None,
start_time=None, end_time=None):
"""Returns query built from given parameters.
This query can be then used for querying resources, meters and
statistics.
:param user_id: user_id, has a priority over list of ids
:param tenant_id: tenant_id, has a priority over list of ids
:param resource_id: resource_id, has a priority over list of ids
:param user_ids: list of user_ids
:param tenant_ids: list of tenant_ids
:param resource_ids: list of resource_ids
:param start_time: datetime from which measurements should be collected
:param end_time: datetime until which measurements should be collected
"""
query = self.format_query(user_id, tenant_id, resource_id,
user_ids, tenant_ids, resource_ids)
start_timestamp, end_timestamp = self._timestamps(start_time,
end_time)
if start_timestamp:
query.append({"field": "timestamp", "op": "ge",
"value": start_timestamp})
if end_timestamp:
query.append({"field": "timestamp", "op": "le",
"value": end_timestamp})
return query
def query_retry_reset(self, exception_instance):
if isinstance(exception_instance, exc.HTTPUnauthorized):
self.osc.reset_clients()
self.ceilometer = self.osc.ceilometer()
def list_metrics(self):
"""List the user's meters."""
meters = self.query_retry(f=self.ceilometer.meters.list)
if not meters:
return set()
else:
return meters
def check_availability(self):
status = self.query_retry(self.ceilometer.resources.list)
if status:
return 'available'
else:
return 'not available'
def query_sample(self, meter_name, query, limit=1):
return self.query_retry(f=self.ceilometer.samples.list,
meter_name=meter_name,
limit=limit,
q=query)
def statistic_aggregation(self, resource=None, resource_type=None,
meter_name=None, period=300, granularity=300,
aggregate='mean'):
end_time = datetime.datetime.utcnow()
start_time = end_time - datetime.timedelta(seconds=int(period))
meter = self._get_meter(meter_name)
if aggregate == 'mean':
aggregate = 'avg'
elif aggregate == 'count':
aggregate = 'avg'
LOG.warning('aggregate type count not supported by ceilometer,'
' replaced with mean.')
resource_id = resource.uuid
if resource_type == 'compute_node':
resource_id = "%s_%s" % (resource.hostname, resource.hostname)
query = self.build_query(
resource_id=resource_id, start_time=start_time, end_time=end_time)
statistic = self.query_retry(f=self.ceilometer.statistics.list,
meter_name=meter,
q=query,
period=period,
aggregates=[
{'func': aggregate}])
item_value = None
if statistic:
item_value = statistic[-1]._info.get('aggregate').get(aggregate)
if meter_name == 'host_airflow':
# Airflow from hardware.ipmi.node.airflow is reported as
# 1/10 th of actual CFM
item_value *= 10
return item_value
def statistic_series(self, resource=None, resource_type=None,
meter_name=None, start_time=None, end_time=None,
granularity=300):
raise NotImplementedError(
_('Ceilometer helper does not support statistic series method'))
def get_host_cpu_usage(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'compute_node', 'host_cpu_usage', period,
aggregate, granularity)
def get_host_ram_usage(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'compute_node', 'host_ram_usage', period,
aggregate, granularity)
def get_host_outlet_temp(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'compute_node', 'host_outlet_temp', period,
aggregate, granularity)
def get_host_inlet_temp(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'compute_node', 'host_inlet_temp', period,
aggregate, granularity)
def get_host_airflow(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'compute_node', 'host_airflow', period,
aggregate, granularity)
def get_host_power(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'compute_node', 'host_power', period,
aggregate, granularity)
def get_instance_cpu_usage(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'instance', 'instance_cpu_usage', period,
aggregate, granularity)
def get_instance_ram_usage(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'instance', 'instance_ram_usage', period,
aggregate, granularity)
def get_instance_ram_allocated(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'instance', 'instance_ram_allocated', period,
aggregate, granularity)
def get_instance_l3_cache_usage(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'instance', 'instance_l3_cache_usage', period,
aggregate, granularity)
def get_instance_root_disk_size(self, resource, period,
aggregate, granularity=None):
return self.statistic_aggregation(
resource, 'instance', 'instance_root_disk_size', period,
aggregate, granularity)
|
|
from collections import OrderedDict
import contextlib
import gym
import numpy as np
from typing import Dict, List, Any, Union
from ray.rllib.models.preprocessors import get_preprocessor, \
RepeatedValuesPreprocessor
from ray.rllib.models.repeated_values import RepeatedValues
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.utils.annotations import DeveloperAPI, PublicAPI
from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
TensorType
from ray.rllib.utils.spaces.repeated import Repeated
from ray.rllib.utils.typing import ModelConfigDict, TensorStructType
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class ModelV2:
"""Defines an abstract neural network model for use with RLlib.
Custom models should extend either TFModelV2 or TorchModelV2 instead of
this class directly.
Data flow:
obs -> forward() -> model_out
value_function() -> V(s)
"""
def __init__(self, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, num_outputs: int,
model_config: ModelConfigDict, name: str, framework: str):
"""Initializes a ModelV2 object.
This method should create any variables used by the model.
Args:
obs_space (gym.spaces.Space): Observation space of the target gym
env. This may have an `original_space` attribute that
specifies how to unflatten the tensor into a ragged tensor.
action_space (gym.spaces.Space): Action space of the target gym
env.
num_outputs (int): Number of output units of the model.
model_config (ModelConfigDict): Config for the model, documented
in ModelCatalog.
name (str): Name (scope) for the model.
framework (str): Either "tf" or "torch".
"""
self.obs_space: gym.spaces.Space = obs_space
self.action_space: gym.spaces.Space = action_space
self.num_outputs: int = num_outputs
self.model_config: ModelConfigDict = model_config
self.name: str = name or "default_model"
self.framework: str = framework
self._last_output = None
self.time_major = self.model_config.get("_time_major")
self.inference_view_requirements = {
SampleBatch.OBS: ViewRequirement(shift=0),
}
@PublicAPI
def get_initial_state(self) -> List[np.ndarray]:
"""Get the initial recurrent state values for the model.
Returns:
List[np.ndarray]: List of np.array objects containing the initial
hidden state of an RNN, if applicable.
Examples:
>>> def get_initial_state(self):
>>> return [
>>> np.zeros(self.cell_size, np.float32),
>>> np.zeros(self.cell_size, np.float32),
>>> ]
"""
return []
@PublicAPI
def forward(self, input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType) -> (TensorType, List[TensorType]):
"""Call the model with the given input tensors and state.
Any complex observations (dicts, tuples, etc.) will be unpacked by
__call__ before being passed to forward(). To access the flattened
observation tensor, refer to input_dict["obs_flat"].
This method can be called any number of times. In eager execution,
each call to forward() will eagerly evaluate the model. In symbolic
execution, each call to forward creates a computation graph that
operates over the variables of this model (i.e., shares weights).
Custom models should override this instead of __call__.
Args:
input_dict (dict): dictionary of input tensors, including "obs",
"obs_flat", "prev_action", "prev_reward", "is_training",
"eps_id", "agent_id", "infos", and "t".
state (list): list of state tensors with sizes matching those
returned by get_initial_state + the batch dimension
seq_lens (Tensor): 1d tensor holding input sequence lengths
Returns:
(outputs, state): The model output tensor of size
[BATCH, num_outputs], and the new RNN state.
Examples:
>>> def forward(self, input_dict, state, seq_lens):
>>> model_out, self._value_out = self.base_model(
... input_dict["obs"])
>>> return model_out, state
"""
raise NotImplementedError
@PublicAPI
def value_function(self) -> TensorType:
"""Returns the value function output for the most recent forward pass.
Note that a `forward` call has to be performed first, before this
methods can return anything and thus that calling this method does not
cause an extra forward pass through the network.
Returns:
value estimate tensor of shape [BATCH].
"""
raise NotImplementedError
@PublicAPI
def custom_loss(self, policy_loss: TensorType,
loss_inputs: Dict[str, TensorType]) -> TensorType:
"""Override to customize the loss function used to optimize this model.
This can be used to incorporate self-supervised losses (by defining
a loss over existing input and output tensors of this model), and
supervised losses (by defining losses over a variable-sharing copy of
this model's layers).
You can find an runnable example in examples/custom_loss.py.
Args:
policy_loss (Union[List[Tensor],Tensor]): List of or single policy
loss(es) from the policy.
loss_inputs (dict): map of input placeholders for rollout data.
Returns:
Union[List[Tensor],Tensor]: List of or scalar tensor for the
customized loss(es) for this model.
"""
return policy_loss
@PublicAPI
def metrics(self) -> Dict[str, TensorType]:
"""Override to return custom metrics from your model.
The stats will be reported as part of the learner stats, i.e.,
info:
learner:
model:
key1: metric1
key2: metric2
Returns:
Dict of string keys to scalar tensors.
"""
return {}
def __call__(
self,
input_dict: Dict[str, TensorType],
state: List[Any] = None,
seq_lens: TensorType = None) -> (TensorType, List[TensorType]):
"""Call the model with the given input tensors and state.
This is the method used by RLlib to execute the forward pass. It calls
forward() internally after unpacking nested observation tensors.
Custom models should override forward() instead of __call__.
Args:
input_dict (dict): dictionary of input tensors, including "obs",
"prev_action", "prev_reward", "is_training"
state (list): list of state tensors with sizes matching those
returned by get_initial_state + the batch dimension
seq_lens (Tensor): 1d tensor holding input sequence lengths
Returns:
(outputs, state): The model output tensor of size
[BATCH, output_spec.size] or a list of tensors corresponding to
output_spec.shape_list, and a list of state tensors of
[BATCH, state_size_i].
"""
restored = input_dict.copy()
restored["obs"] = restore_original_dimensions(
input_dict["obs"], self.obs_space, self.framework)
if len(input_dict["obs"].shape) > 2:
restored["obs_flat"] = flatten(input_dict["obs"], self.framework)
else:
restored["obs_flat"] = input_dict["obs"]
with self.context():
res = self.forward(restored, state or [], seq_lens)
if ((not isinstance(res, list) and not isinstance(res, tuple))
or len(res) != 2):
raise ValueError(
"forward() must return a tuple of (output, state) tensors, "
"got {}".format(res))
outputs, state = res
try:
shape = outputs.shape
except AttributeError:
raise ValueError("Output is not a tensor: {}".format(outputs))
else:
if len(shape) != 2 or int(shape[1]) != self.num_outputs:
raise ValueError(
"Expected output shape of [None, {}], got {}".format(
self.num_outputs, shape))
if not isinstance(state, list):
raise ValueError("State output is not a list: {}".format(state))
self._last_output = outputs
return outputs, state
@PublicAPI
def from_batch(self, train_batch: SampleBatch,
is_training: bool = True) -> (TensorType, List[TensorType]):
"""Convenience function that calls this model with a tensor batch.
All this does is unpack the tensor batch to call this model with the
right input dict, state, and seq len arguments.
"""
input_dict = {
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": is_training,
}
if SampleBatch.PREV_ACTIONS in train_batch:
input_dict["prev_actions"] = train_batch[SampleBatch.PREV_ACTIONS]
if SampleBatch.PREV_REWARDS in train_batch:
input_dict["prev_rewards"] = train_batch[SampleBatch.PREV_REWARDS]
states = []
i = 0
while "state_in_{}".format(i) in train_batch:
states.append(train_batch["state_in_{}".format(i)])
i += 1
return self.__call__(input_dict, states, train_batch.get("seq_lens"))
def import_from_h5(self, h5_file: str) -> None:
"""Imports weights from an h5 file.
Args:
h5_file (str): The h5 file name to import weights from.
Example:
>>> trainer = MyTrainer()
>>> trainer.import_policy_model_from_h5("/tmp/weights.h5")
>>> for _ in range(10):
>>> trainer.train()
"""
raise NotImplementedError
@PublicAPI
def last_output(self) -> TensorType:
"""Returns the last output returned from calling the model."""
return self._last_output
@PublicAPI
def context(self) -> contextlib.AbstractContextManager:
"""Returns a contextmanager for the current forward pass."""
return NullContextManager()
@PublicAPI
def variables(self, as_dict: bool = False
) -> Union[List[TensorType], Dict[str, TensorType]]:
"""Returns the list (or a dict) of variables for this model.
Args:
as_dict(bool): Whether variables should be returned as dict-values
(using descriptive keys).
Returns:
Union[List[any],Dict[str,any]]: The list (or dict if `as_dict` is
True) of all variables of this ModelV2.
"""
raise NotImplementedError
@PublicAPI
def trainable_variables(
self, as_dict: bool = False
) -> Union[List[TensorType], Dict[str, TensorType]]:
"""Returns the list of trainable variables for this model.
Args:
as_dict(bool): Whether variables should be returned as dict-values
(using descriptive keys).
Returns:
Union[List[any],Dict[str,any]]: The list (or dict if `as_dict` is
True) of all trainable (tf)/requires_grad (torch) variables
of this ModelV2.
"""
raise NotImplementedError
@PublicAPI
def is_time_major(self) -> bool:
"""If True, data for calling this ModelV2 must be in time-major format.
Returns
bool: Whether this ModelV2 requires a time-major (TxBx...) data
format.
"""
return self.time_major is True
class NullContextManager:
"""No-op context manager"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
@DeveloperAPI
def flatten(obs: TensorType, framework: str) -> TensorType:
"""Flatten the given tensor."""
if framework in ["tf2", "tf", "tfe"]:
return tf1.keras.layers.Flatten()(obs)
elif framework == "torch":
assert torch is not None
return torch.flatten(obs, start_dim=1)
else:
raise NotImplementedError("flatten", framework)
@DeveloperAPI
def restore_original_dimensions(obs: TensorType,
obs_space: gym.spaces.Space,
tensorlib: Any = tf) -> TensorStructType:
"""Unpacks Dict and Tuple space observations into their original form.
This is needed since we flatten Dict and Tuple observations in transit
within a SampleBatch. Before sending them to the model though, we should
unflatten them into Dicts or Tuples of tensors.
Args:
obs (TensorType): The flattened observation tensor.
obs_space (gym.spaces.Space): The flattened obs space. If this has the
`original_space` attribute, we will unflatten the tensor to that
shape.
tensorlib: The library used to unflatten (reshape) the array/tensor.
Returns:
single tensor or dict / tuple of tensors matching the original
observation space.
"""
if hasattr(obs_space, "original_space"):
if tensorlib == "tf":
tensorlib = tf
elif tensorlib == "torch":
assert torch is not None
tensorlib = torch
return _unpack_obs(obs, obs_space.original_space, tensorlib=tensorlib)
else:
return obs
# Cache of preprocessors, for if the user is calling unpack obs often.
_cache = {}
def _unpack_obs(obs: TensorType, space: gym.Space,
tensorlib: Any = tf) -> TensorStructType:
"""Unpack a flattened Dict or Tuple observation array/tensor.
Args:
obs: The flattened observation tensor, with last dimension equal to
the flat size and any number of batch dimensions. For example, for
Box(4,), the obs may have shape [B, 4], or [B, N, M, 4] in case
the Box was nested under two Repeated spaces.
space: The original space prior to flattening
tensorlib: The library used to unflatten (reshape) the array/tensor
"""
if (isinstance(space, gym.spaces.Dict)
or isinstance(space, gym.spaces.Tuple)
or isinstance(space, Repeated)):
if id(space) in _cache:
prep = _cache[id(space)]
else:
prep = get_preprocessor(space)(space)
# Make an attempt to cache the result, if enough space left.
if len(_cache) < 999:
_cache[id(space)] = prep
if len(obs.shape) < 2 or obs.shape[-1] != prep.shape[0]:
raise ValueError(
"Expected flattened obs shape of [..., {}], got {}".format(
prep.shape[0], obs.shape))
offset = 0
if tensorlib == tf:
batch_dims = [v.value for v in obs.shape[:-1]]
batch_dims = [-1 if v is None else v for v in batch_dims]
else:
batch_dims = list(obs.shape[:-1])
if isinstance(space, gym.spaces.Tuple):
assert len(prep.preprocessors) == len(space.spaces), \
(len(prep.preprocessors) == len(space.spaces))
u = []
for p, v in zip(prep.preprocessors, space.spaces):
obs_slice = obs[..., offset:offset + p.size]
offset += p.size
u.append(
_unpack_obs(
tensorlib.reshape(obs_slice,
batch_dims + list(p.shape)),
v,
tensorlib=tensorlib))
elif isinstance(space, gym.spaces.Dict):
assert len(prep.preprocessors) == len(space.spaces), \
(len(prep.preprocessors) == len(space.spaces))
u = OrderedDict()
for p, (k, v) in zip(prep.preprocessors, space.spaces.items()):
obs_slice = obs[..., offset:offset + p.size]
offset += p.size
u[k] = _unpack_obs(
tensorlib.reshape(obs_slice, batch_dims + list(p.shape)),
v,
tensorlib=tensorlib)
elif isinstance(space, Repeated):
assert isinstance(prep, RepeatedValuesPreprocessor), prep
child_size = prep.child_preprocessor.size
# The list lengths are stored in the first slot of the flat obs.
lengths = obs[..., 0]
# [B, ..., 1 + max_len * child_sz] -> [B, ..., max_len, child_sz]
with_repeat_dim = tensorlib.reshape(
obs[..., 1:], batch_dims + [space.max_len, child_size])
# Retry the unpack, dropping the List container space.
u = _unpack_obs(
with_repeat_dim, space.child_space, tensorlib=tensorlib)
return RepeatedValues(
u, lengths=lengths, max_len=prep._obs_space.max_len)
else:
assert False, space
return u
else:
return obs
|
|
import autosar
import apx
import os
from copy import deepcopy
from apx.parser import apx_split_line, Parser
class Node:
"""
Represents an APX node
Example:
>>> import sys
>>> import apx
>>> node = apx.Node()
>>> node.append(apx.ProvidePort('TestSignal1','C'))
0
>>> node.append(apx.RequirePort('TestSignal2','S'))
0
>>> node.write(sys.stdout)
N"None"
P"TestSignal1"C
R"TestSignal2"S
"""
def __init__(self,name=None):
self.name=name
self.isFinalized = False
self.dataTypes = []
self.requirePorts=[]
self.providePorts=[]
self.dataTypeMap = {}
@classmethod
def from_autosar_swc(cls, swc, name=None, reverse=False):
assert(isinstance(swc, autosar.component.AtomicSoftwareComponent))
node = cls()
node.import_autosar_swc(swc, name=name)
return node
@classmethod
def from_text(cls, text):
return Parser().loads(text)
def _updateDataType(self, ws, port):
portInterface = ws.find(port.portInterfaceRef)
if isinstance(portInterface,autosar.portinterface.SenderReceiverInterface):
if len(portInterface.dataElements)==1:
dataType = ws.find(portInterface.dataElements[0].typeRef)
assert(dataType is not None)
if dataType.name not in self.dataTypeMap:
item = apx.AutosarDataType(ws,dataType, self)
item.id=len(self.dataTypes)
self.dataTypeMap[dataType.name]=item
self.dataTypes.append(item)
assert (item is not None)
return item
else:
item = self.dataTypeMap[dataType.name]
assert (item is not None)
return item
elif len(portInterface.dataElements)>1:
raise NotImplementedError('SenderReceiverInterface with more than 1 element not supported')
return None
def _calcAttributeFromAutosarPort(self,ws,port):
"""
returns string
"""
if (len(port.comspec)==1) and isinstance(port.comspec[0],autosar.component.DataElementComSpec):
if port.comspec[0].initValueRef is not None:
initValue = ws.find(port.comspec[0].initValueRef)
if initValue is None:
raise ValueError('invalid init value reference: '+port.comspec[0].initValueRef)
if isinstance(initValue, autosar.constant.Constant):
initValue=initValue.value
return "="+self._deriveInitValueFromAutosarConstant(initValue)
return None
def _deriveInitValueFromAutosarConstant(self,item):
if isinstance(item,autosar.constant.IntegerValue):
if (item.value>255):
return "0x%02X"%item.value
else:
return "%d"%item.value
elif isinstance(item,autosar.constant.StringValue):
return '"%s"'%item.value
elif isinstance(item, autosar.constant.BooleanValue):
return "1" if item.value else "0"
elif isinstance(item,autosar.constant.RecordValue):
tmp = [self._deriveInitValueFromAutosarConstant(x) for x in item.elements]
return "{"+','.join(tmp)+"}"
elif isinstance(item,autosar.constant.ArrayValue):
tmp = [self._deriveInitValueFromAutosarConstant(x) for x in item.elements]
return "{"+','.join(tmp)+"}"
else:
raise NotImplementedError(str(type(item)))
def import_autosar_swc(self, swc, ws=None, name=None):
assert(isinstance(swc, autosar.component.AtomicSoftwareComponent))
if name is None:
self.name=swc.name
else:
self.name = name
if ws is None:
ws=swc.rootWS()
for port in swc.providePorts:
self.add_autosar_port(port, ws)
for port in swc.requirePorts:
self.add_autosar_port(port, ws)
self.resolve_types()
return self
def add_autosar_port(self, port, ws=None):
"""
adds an autosar port to the node
returns the port ID of the newly added port
"""
if ws is None:
ws=port.rootWS()
assert(ws is not None)
dataType=self._updateDataType(ws, port)
if dataType is not None:
if isinstance(port, autosar.component.RequirePort):
apx_port = apx.RequirePort(port.name, "T[%s]"%dataType.id, self._calcAttributeFromAutosarPort(ws, port))
return self.add_require_port(apx_port)
elif isinstance(port, autosar.component.ProvidePort):
apx_port = apx.ProvidePort(port.name, "T[%s]"%dataType.id, self._calcAttributeFromAutosarPort(ws, port))
return self.add_provide_port(apx_port)
else:
raise ValueError('invalid type '+str(type(port)))
def append(self, item):
"""
Adds the item to the node.
Item can be of type DataType, RequirePort and ProvidePort
returns the object (port or datatype)
"""
if isinstance(item, apx.DataType):
return self.add_type(item)
if isinstance(item, apx.RequirePort):
return self.add_require_port(item)
elif isinstance(item, apx.ProvidePort):
return self.add_provide_port(item)
elif isinstance(item, autosar.component.Port):
return self.add_autosar_port(item)
elif isinstance(item, str):
parts = apx_split_line(item)
if len(parts) != 4:
raise ValueError("invalid APX string: '%s'"%item)
if parts[0]=='R':
newPort = apx.RequirePort(parts[1],parts[2],parts[3])
if newPort is not None:
return self.add_require_port(newPort)
else:
raise ValueError('apx.RequirePort() returned None')
elif parts[0]=='P':
newPort = apx.ProvidePort(parts[1],parts[2],parts[3])
if newPort is not None:
return self.add_provide_port(newPort)
else:
raise ValueError('apx.ProvidePort() returned None')
else:
raise ValueError(parts[0])
else:
raise ValueError(type(item))
def add_type(self, new_type):
if new_type.name not in self.dataTypeMap:
new_type.dsg.resolve_data_element(self.dataTypes)
self.dataTypeMap[new_type.name]=new_type
self.dataTypes.append(new_type)
return new_type
else:
existing_type = self.dataTypeMap[new_type.name]
self._verify_data_types_are_equal(existing_type, new_type)
return existing_type
def extend(self, other_node):
"""
Copies all port from other_node and adds them to this node
"""
for port in other_node.requirePorts+other_node.providePorts:
self.add_port_from_node(other_node, port)
return self
def add_require_port(self, port):
port.id = len(self.requirePorts)
if port.dsg.dataElement.isReference:
port.resolve_type(self.dataTypes)
self.requirePorts.append(port)
return port
def add_provide_port(self, port):
port.id = len(self.providePorts)
if port.dsg.dataElement.isReference:
port.resolve_type(self.dataTypes)
self.providePorts.append(port)
return port
def save_apx(self, output_dir='.', output_file = None, normalized=False):
"""
Saves node in the .apx file format
If normalized is True it uses the traditional type reference by ID.
If normalized is False it uses the newer type reference by name which is not fully supported yet by all clients.
If output_file is None (default) it generates a file based on the node name.
If output_file is None it generates the file in output_dir (defaults to current directory)
"""
if not self.isFinalized:
self.finalize_sorted()
if output_file is None:
output_file = os.path.normpath(os.path.join(output_dir, self.name+'.apx'))
with open(output_file, "w", newline='\n') as fp:
fp.write("APX/1.2\n") #APX Text header
self.write(fp, normalized)
fp.write("\n") #Add extra newline at end of file
def save_apx_normalized(self, output_dir='.', output_file = None):
self.save_apx(output_dir, output_file, True)
def dumps(self, normalized=False):
if not self.isFinalized:
self.finalize_sorted()
lines = self.lines(normalized)
text = 'APX/1.2\n'+'\n'.join(lines)+'\n'
return text
def dumps_normalized(self):
return self.dumps(True)
def write(self, fp, normalized=False):
"""
writes node as text in fp
"""
print('N"%s"'%self.name, file=fp)
for dataType in self.dataTypes:
print(str(dataType), file=fp)
for port in self.providePorts:
print(port.to_string(normalized), file=fp)
for port in self.requirePorts:
print(port.to_string(normalized), file=fp)
def lines(self, normalized=False):
"""
returns node as list of strings (one line at a time)
"""
lines = ['N"%s"'%self.name]
for dataType in self.dataTypes:
lines.append(str(dataType))
for port in self.providePorts:
lines.append(port.to_string(normalized))
for port in self.requirePorts:
lines.append(port.to_string(normalized))
return lines
def mirror(self, name=None):
"""
clones the node in a version where all provide and require ports are reversed
"""
if name is None:
name = self.name
mirror = Node(name)
mirror.dataTypes = deepcopy(self.dataTypes)
mirror.requirePorts = [port.mirror() for port in self.providePorts]
mirror.providePorts = [port.mirror() for port in self.requirePorts]
for dataType in mirror.dataTypes:
mirror.dataTypeMap[dataType.name]=dataType
mirror.resolve_types()
return mirror
def compact(self, name=None):
"""
clones the node in a version where all type definitions are removed
"""
if name is None:
name = self.name
compact = Node(name)
compact.requirePorts = [apx.RequirePort(port.name, port.dsg.resolve_data_element().to_string(normalized=True), str(port.attr) if port.attr is not None else None) for port in self.requirePorts]
compact.providePorts = [apx.ProvidePort(port.name, port.dsg.resolve_data_element().to_string(normalized=True), str(port.attr) if port.attr is not None else None) for port in self.providePorts]
return compact
def add_port_from_node(self, from_node, from_port, ignore_duplicates=False):
"""
Attempts to clone the port from the other node, including all its data types
If a port with from_port.name already exists it is ignored if ignore_duplicates is True,
otherwise it generates an error.
"""
if not isinstance(from_node, apx.Node):
raise ValueError('from_node argument must be of type apx.Node')
if not isinstance(from_port, apx.Port):
raise ValueError('from_node argument must derive from type apx.Port')
port_list = self.providePorts if isinstance(from_port, apx.ProvidePort) else self.requirePorts
existing_port = self._check_duplicate_port(from_port)
if existing_port is not None:
return existing_port
to_port = from_port.clone()
from_data_element = from_port.dsg.dataElement
if from_data_element.typeCode == apx.REFERENCE_TYPE_CODE:
from_data_type = from_data_element.typeReference
if not isinstance(from_data_type, apx.base.DataType):
raise RuntimeError('Node.finalize() method must be called before this method can be used')
to_data_type = self.find(from_data_type.name)
if to_data_type is None:
to_data_type = self.add_data_type_from_node(from_node, from_data_type)
else:
self._verify_data_types_are_equal(to_data_type, from_data_type)
to_port.dsg.dataElement.typeReference = to_data_type
self.append(to_port)
return to_port
def add_data_type_from_node(self, from_node, from_data_type):
"""
Attempts to clone the data type from other node to this node
"""
if not isinstance(from_node, apx.Node):
raise ValueError('from_node argument must be of type apx.Node')
if not isinstance(from_data_type, apx.DataType):
raise ValueError('from_data_type argument must be of type apx.DataType')
from_data_element = from_data_type.dsg.dataElement
if (from_data_element.typeCode >= apx.UINT8_TYPE_CODE) and (from_data_element.typeCode < apx.RECORD_TYPE_CODE):
pass #no further action needed
elif (from_data_element.typeCode == apx.RECORD_TYPE_CODE):
for elem in from_data_element.elements:
if elem.typeCode == apx.REFERENCE_TYPE_CODE:
self.add_data_type_from_node(from_node, elem.typeReference)
else:
raise NotImplementedError(from_data_element.typeCode)
to_data_type = from_data_type.clone()
self.append(to_data_type)
return to_data_type
def find(self, name):
"""
Finds type or port by name.
If the variable name is a list, it finds multiple items
"""
if isinstance(name, list):
result = []
for inner_name in name:
result.append(self._inner_find(inner_name))
return result
else:
return self._inner_find(name)
def _inner_find(self, name):
"""
finds type or port by name (internal implementation)
"""
for elem in self.dataTypes+self.requirePorts+self.providePorts:
if elem.name == name:
return elem
def compact_types(self):
"""
give short sequential names to datatypes
"""
idx = 0
for type in self.dataTypes:
type.short_name = 'T' + str(idx)
idx += 1
def resolve_types(self):
"""
Resolves all integer and string type references with their actual object counter-parts
"""
for port in self.requirePorts+self.providePorts:
if port.dsg.dataElement.isReference:
port.resolve_type(self.dataTypes)
def finalize(self, sort=False, compact=False):
if not self.isFinalized:
self._sort_elements(sort)
self.resolve_types()
if compact:
self.compact_types()
self._set_type_ids()
self.isFinalized = True
return self
def finalize_sorted(self):
return self.finalize(sort=True)
def _verify_data_types_are_equal(self, existing_type, new_type):
existing_type_signature = existing_type.to_string(normalized=True)
new_type_signature = new_type.to_string(normalized=True)
if existing_type_signature != new_type_signature:
raise ValueError("Data type '{}' already exist but with different signature\nExpected: {}\nGot: {}".format(existing_type.name, existing_type_signature, new_type_signature))
def _check_duplicate_port(self, new_port):
for existing_port in self.providePorts+self.requirePorts:
if existing_port.name == new_port.name:
if isinstance(existing_port, apx.RequirePort) and isinstance(new_port, apx.ProvidePort):
raise ValueError("Cannot add provide-port with same name. Port '{}' already exists as require-port.".format(new_port.name))
elif isinstance(existing_port, apx.ProvidePort) and isinstance(new_port, apx.RequirePort):
raise ValueError("Cannot add require-port with same name. Port '{}' already exists as provide-port.".format(new_port.name))
else:
self._verify_ports_are_equal(existing_port, new_port)
return existing_port
return None
def _verify_ports_are_equal(self, existing_port, new_port):
existing_port_signature = existing_port.to_string(normalized=True)
new_port_signature = new_port.to_string(normalized=True)
if existing_port_signature != new_port_signature:
raise ValueError("Port '{}' already exist but with different signature\nExpected: {}\nGot: {}".format(existing_port.name, existing_port_signature, new_port_signature))
def _sort_elements(self, sort_all):
kfunc = lambda x: x.name
if len(self.dataTypes) > 0:
self.dataTypes = sorted(self.dataTypes, key=kfunc)
if sort_all:
if len(self.requirePorts) > 0:
self.requirePorts = sorted(self.requirePorts, key=kfunc)
if len(self.providePorts) > 0:
self.providePorts = sorted(self.providePorts, key=kfunc)
def _set_type_ids(self):
for i,data_type in enumerate(self.dataTypes):
data_type.id = i
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import ip_lib
from neutron.common import exceptions
from neutron.tests import base
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
class TestSubProcessBase(base.BaseTestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('neutron.agent.linux.utils.execute')
self.execute = self.execute_p.start()
self.addCleanup(self.execute_p.stop)
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute('o', 'link', ('list',), 'sudo')
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
root_helper='sudo')
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
root_helper=None)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
root_helper=None)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase('sudo')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
root_helper=None)
def test_run_namespace(self):
base = ip_lib.SubProcessBase('sudo', 'ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
root_helper='sudo')
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase('sudo', 'ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
root_helper='sudo')
def test_as_root_no_root_helper(self):
base = ip_lib.SubProcessBase()
self.assertRaises(exceptions.SudoRequired,
base._as_root,
[], 'link', ('list',))
class TestIpWrapper(base.BaseTestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
self.addCleanup(self.execute_p.stop)
def test_get_devices(self):
self.execute.return_value = '\n'.join(LINK_SAMPLE)
retval = ip_lib.IPWrapper('sudo').get_devices()
self.assertEqual(retval,
[ip_lib.IPDevice('lo'),
ip_lib.IPDevice('eth0'),
ip_lib.IPDevice('br-int'),
ip_lib.IPDevice('gw-ddc717df-49')])
self.execute.assert_called_once_with('o', 'link', ('list',),
'sudo', None)
def test_get_devices_malformed_line(self):
self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish'])
retval = ip_lib.IPWrapper('sudo').get_devices()
self.assertEqual(retval,
[ip_lib.IPDevice('lo'),
ip_lib.IPDevice('eth0'),
ip_lib.IPDevice('br-int'),
ip_lib.IPDevice('gw-ddc717df-49')])
self.execute.assert_called_once_with('o', 'link', ('list',),
'sudo', None)
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces('sudo')
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with('', 'netns', ('list',),
root_helper='sudo')
def test_add_tuntap(self):
ip_lib.IPWrapper('sudo').add_tuntap('tap0')
self.execute.assert_called_once_with('', 'tuntap',
('add', 'tap0', 'mode', 'tap'),
'sudo', None)
def test_add_veth(self):
ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1')
self.execute.assert_called_once_with('', 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
'sudo', None)
def test_add_veth_with_namespaces(self):
ns2 = 'ns2'
with mock.patch.object(ip_lib.IPWrapper, 'ensure_namespace') as en:
ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1', namespace2=ns2)
en.assert_has_calls([mock.call(ns2)])
self.execute.assert_called_once_with('', 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1',
'netns', ns2),
'sudo', None)
def test_get_device(self):
dev = ip_lib.IPWrapper('sudo', 'ns').device('eth0')
self.assertEqual(dev.root_helper, 'sudo')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper('sudo')
with mock.patch.object(ip.netns, 'exists') as ns_exists:
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'), 'sudo', None)])
ip_dev.assert_has_calls([mock.call('lo', 'sudo', 'ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper('sudo').ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper('sudo', 'ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper('sudo').add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(base.BaseTestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'sudo', 'ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1')
self.assertNotEqual(dev1, None)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.root_helper = 'sudo'
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run('link', 'show')
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run('link', options='o')
self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))])
def test_as_root(self):
self.ip_cmd._as_root('link')
self.ip.assert_has_calls(
[mock.call._as_root([], 'foo', ('link', ), False)])
def test_as_root_with_options(self):
self.ip_cmd._as_root('link', options='o')
self.ip.assert_has_calls(
[mock.call._as_root('o', 'foo', ('link', ), False)])
class TestIPDeviceCommandBase(base.BaseTestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev.root_helper = 'sudo'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(base.BaseTestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
self.parent.root_helper = 'sudo'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
def _assert_sudo(self, options, args, force_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
force_root_namespace)])
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
self.link_cmd.set_up()
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
self.link_cmd.set_down()
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call('o', ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255')
self._assert_sudo([4],
('add', '192.168.45.100/24', 'brd', '192.168.45.255',
'scope', 'global', 'dev', 'tap0'))
def test_add_address_scoped(self):
self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255',
scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24', 'brd', '192.168.45.255',
'scope', 'link', 'dev', 'tap0'))
def test_del_address(self):
self.addr_cmd.delete(4, '192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush()
self._assert_sudo([], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24',
broadcast='172.16.77.255'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64',
broadcast='::'),
dict(ip_version=6, scope='link',
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64',
broadcast='::')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(self.addr_cmd.list(), expected)
self._assert_call([], ('show', 'tap0'))
def test_list_filtered(self):
expected = [
dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24',
broadcast='172.16.77.255')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
def test_add_gateway(self):
gateway = '192.168.45.100'
metric = 100
self.route_cmd.add_gateway(gateway, metric)
self._assert_sudo([],
('replace', 'default', 'via', gateway,
'metric', metric,
'dev', self.parent.name))
def test_del_gateway(self):
gateway = '192.168.45.100'
self.route_cmd.delete_gateway(gateway)
self._assert_sudo([],
('del', 'default', 'via', gateway,
'dev', self.parent.name))
def test_get_gateway(self):
test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}}]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), force_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
def test_delete_namespace(self):
with mock.patch('neutron.agent.linux.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True)
def test_namespace_exists(self):
retval = '\n'.join(NETNS_SAMPLE)
self.parent._as_root.return_value = retval
self.assertTrue(
self.netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
self._assert_sudo('o', ('list',), force_root_namespace=True)
def test_namespace_doest_not_exist(self):
retval = '\n'.join(NETNS_SAMPLE)
self.parent._as_root.return_value = retval
self.assertFalse(
self.netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
self._assert_sudo('o', ('list',), force_root_namespace=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.linux.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
root_helper='sudo',
check_exit_code=True)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('neutron.agent.linux.utils.execute') as execute:
env = dict(FOO=1, BAR=2)
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['ip', 'netns', 'exec', 'ns', 'env', 'FOO=1', 'BAR=2',
'ip', 'link', 'list'],
root_helper='sudo', check_exit_code=True)
class TestDeviceExists(base.BaseTestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with('o', 'link', ('show', 'eth0'))
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
|
|
#!/usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Samsung Electronics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Trace a subunit stream in reasonable detail and high accuracy."""
import argparse
import functools
import os
import re
import sys
import mimeparse
import subunit
import testtools
DAY_SECONDS = 60 * 60 * 24
FAILS = []
RESULTS = {}
class Starts(testtools.StreamResult):
def __init__(self, output):
super(Starts, self).__init__()
self._output = output
def startTestRun(self):
self._neednewline = False
self._emitted = set()
def status(self, test_id=None, test_status=None, test_tags=None,
runnable=True, file_name=None, file_bytes=None, eof=False,
mime_type=None, route_code=None, timestamp=None):
super(Starts, self).status(
test_id, test_status,
test_tags=test_tags, runnable=runnable, file_name=file_name,
file_bytes=file_bytes, eof=eof, mime_type=mime_type,
route_code=route_code, timestamp=timestamp)
if not test_id:
if not file_bytes:
return
if not mime_type or mime_type == 'test/plain;charset=utf8':
mime_type = 'text/plain; charset=utf-8'
primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
content_type = testtools.content_type.ContentType(
primary, sub, parameters)
content = testtools.content.Content(
content_type, lambda: [file_bytes])
text = content.as_text()
if text and text[-1] not in '\r\n':
self._neednewline = True
self._output.write(text)
elif test_status == 'inprogress' and test_id not in self._emitted:
if self._neednewline:
self._neednewline = False
self._output.write('\n')
worker = ''
for tag in test_tags or ():
if tag.startswith('worker-'):
worker = '(' + tag[7:] + ') '
if timestamp:
timestr = timestamp.isoformat()
else:
timestr = ''
self._output.write('%s: %s%s [start]\n' %
(timestr, worker, test_id))
self._emitted.add(test_id)
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
By default we strip out the tags in the test because they don't help us
in identifying the test that is run to it's result.
Make it possible to strip out the testscenarios information (not to
be confused with tempest scenarios) however that's often needed to
indentify generated negative tests.
"""
if strip_tags:
tags_start = name.find('[')
tags_end = name.find(']')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
if strip_scenarios:
tags_start = name.find('(')
tags_end = name.find(')')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
return name
def get_duration(timestamps):
start, end = timestamps
if not start or not end:
duration = ''
else:
delta = end - start
duration = '%d.%06ds' % (
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
return duration
def find_worker(test):
for tag in test['tags']:
if tag.startswith('worker-'):
return int(tag[7:])
return 'NaN'
# Print out stdout/stderr if it exists, always
def print_attachments(stream, test, all_channels=False):
"""Print out subunit attachments.
Print out subunit attachments that contain content. This
runs in 2 modes, one for successes where we print out just stdout
and stderr, and an override that dumps all the attachments.
"""
channels = ('stdout', 'stderr')
for name, detail in test['details'].items():
# NOTE(sdague): the subunit names are a little crazy, and actually
# are in the form pythonlogging:'' (with the colon and quotes)
name = name.split(':')[0]
if detail.content_type.type == 'test':
detail.content_type.type = 'text'
if (all_channels or name in channels) and detail.as_text():
title = "Captured %s:" % name
stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
# indent attachment lines 4 spaces to make them visually
# offset
for line in detail.as_text().split('\n'):
stream.write(" %s\n" % line)
def show_outcome(stream, test, print_failures=False, failonly=False):
global RESULTS
status = test['status']
# TODO(sdague): ask lifeless why on this?
if status == 'exists':
return
worker = find_worker(test)
name = cleanup_test_name(test['id'])
duration = get_duration(test['timestamps'])
if worker not in RESULTS:
RESULTS[worker] = []
RESULTS[worker].append(test)
# don't count the end of the return code as a fail
if name == 'process-returncode':
return
if status == 'fail':
FAILS.append(test)
stream.write('{%s} %s [%s] ... FAILED\n' % (
worker, name, duration))
if not print_failures:
print_attachments(stream, test, all_channels=True)
elif not failonly:
if status == 'success':
stream.write('{%s} %s [%s] ... ok\n' % (
worker, name, duration))
print_attachments(stream, test)
elif status == 'skip':
stream.write('{%s} %s ... SKIPPED: %s\n' % (
worker, name, test['details']['reason'].as_text()))
else:
stream.write('{%s} %s [%s] ... %s\n' % (
worker, name, duration, test['status']))
if not print_failures:
print_attachments(stream, test, all_channels=True)
stream.flush()
def print_fails(stream):
"""Print summary failure report.
Currently unused, however there remains debate on inline vs. at end
reporting, so leave the utility function for later use.
"""
if not FAILS:
return
stream.write("\n==============================\n")
stream.write("Failed %s tests - output below:" % len(FAILS))
stream.write("\n==============================\n")
for f in FAILS:
stream.write("\n%s\n" % f['id'])
stream.write("%s\n" % ('-' * len(f['id'])))
print_attachments(stream, f, all_channels=True)
stream.write('\n')
def count_tests(key, value):
count = 0
for k, v in RESULTS.items():
for item in v:
if key in item:
if re.search(value, item[key]):
count += 1
return count
def run_time():
runtime = 0.0
for k, v in RESULTS.items():
for test in v:
runtime += float(get_duration(test['timestamps']).strip('s'))
return runtime
def worker_stats(worker):
tests = RESULTS[worker]
num_tests = len(tests)
delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
return num_tests, delta
def print_summary(stream):
stream.write("\n======\nTotals\n======\n")
stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'),
run_time()))
stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
# we could have no results, especially as we filter out the process-codes
if RESULTS:
stream.write("\n==============\nWorker Balance\n==============\n")
for w in range(max(RESULTS.keys()) + 1):
if w not in RESULTS:
stream.write(
" - WARNING: missing Worker %s! "
"Race in testr accounting.\n" % w)
else:
num, time = worker_stats(w)
stream.write(" - Worker %s (%s tests) => %ss\n" %
(w, num, time))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')
parser.add_argument('--failonly', action='store_true',
dest='failonly', help="Don't print success items",
default=(
os.environ.get('TRACE_FAILONLY', False)
is not False))
return parser.parse_args()
def main():
args = parse_args()
stream = subunit.ByteStreamToStreamResult(
sys.stdin, non_subunit_name='stdout')
starts = Starts(sys.stdout)
outcomes = testtools.StreamToDict(
functools.partial(show_outcome, sys.stdout,
print_failures=args.print_failures,
failonly=args.failonly
))
summary = testtools.StreamSummary()
result = testtools.CopyStreamResult([starts, outcomes, summary])
result.startTestRun()
try:
stream.run(result)
finally:
result.stopTestRun()
if count_tests('status', '.*') == 0:
print("The test run didn't actually run any tests")
return 1
if args.post_fails:
print_fails(sys.stdout)
print_summary(sys.stdout)
return (0 if summary.wasSuccessful() else 1)
if __name__ == '__main__':
sys.exit(main())
|
|
"""
readligo.py
Version 0.2
April 21, 2016
Jonah Kanner, Roy Williams, and Alan Weinstein
Updates in this version:
* Should now work with both Python 2 and Python 3
This module provides tools for reading LIGO data
files. Data along with supporting documentation
can be downloaded from the losc web site:
https://losc.ligo.org
Some possible use cases are shown below.
Example #0:
To load all data from a single file:
strain, time, dq = rl.loaddata('ligo_data/H-H1_LOSC_4_V1-842653696-4096.hdf5', 'H1')
Example #1:
segList = getsegs(842657792, 842658792, 'H1')
for (start, stop) in segList:
strain, meta, dq = getstrain(start, stop, 'H1')
# -- Analysis code here
...
This default configuration assumes that the needed LIGO data
files are available in the current working directory or a
subdirectory. LIGO data between the input GPS times is loaded
into STRAIN. META is a dictionary of gps start, gps stop, and the
sample time. DQ is a dictionary of data quality flags.
Example #2
segList = SegmentList('H1_segs.txt')
In Example 2, 'H1_segs.txt' is a segment list downloaded from the
LOSC web site using the Timeline application. This may be used in the same
manner as segList in example 1.
Example #3
filelist = FileList(directory='/home/ligodata')
segList = getsegs(842657792, 842658792, 'H1', filelist=filelist)
for start, stop in segList:
strain, meta, dq = getstrain(start, stop, 'H1', filelist=filelist)
# -- Analysis code here
In this example, the first command searches the indicated directory and
sub-directories for LIGO data files. This list of data files is then
used to construct a segment list and load the requested data.
-- SEGMENT LISTS --
Segment lists may be downloaded from the LOSC web site
using the Timeline Query Form or constructed directly
from the data files.
Read in a segment list downloaded from the Timeline
application on the LOSC web site with SegmentList:
>> seglist = SegmentList('H1_segs.txt')
OR
Construct a segment list directly from the LIGO
data files with getsegs():
>> seglist = getsegs(842657792, 842658792, 'H1', flag='DATA', filelist=None)
"""
import numpy as np
import os
import fnmatch
def read_frame(filename, ifo, readstrain=True):
"""
Helper function to read frame files
"""
try:
import Fr
except:
from pylal import Fr
if ifo is None:
raise TypeError("""To read GWF data, ifo must be 'H1', 'H2', or 'L1'.
def loaddata(filename, ifo=None):""")
#-- Read strain channel
strain_name = ifo + ':LOSC-STRAIN'
if readstrain:
sd = Fr.frgetvect(filename, strain_name)
strain = sd[0]
gpsStart = sd[1]
ts = sd[3][0]
else:
ts = 1
strain = 0
#-- Read DQ channel
dq_name = ifo + ':LOSC-DQMASK'
qd = Fr.frgetvect(filename, dq_name)
gpsStart = qd[1]
qmask = np.array(qd[0])
dq_ts = qd[3][0]
shortnameList_wbit = qd[5].split()
shortnameList = [name.split(':')[1] for name in shortnameList_wbit]
#-- Read Injection channel
inj_name = ifo + ':LOSC-INJMASK'
injdata = Fr.frgetvect(filename, inj_name)
injmask = injdata[0]
injnamelist_bit = injdata[5].split()
injnamelist = [name.split(':')[1] for name in injnamelist_bit]
return strain, gpsStart, ts, qmask, shortnameList, injmask, injnamelist
def read_hdf5(filename, readstrain=True):
"""
Helper function to read HDF5 files
"""
import h5py
dataFile = h5py.File(filename, 'r')
#-- Read the strain
if readstrain:
strain = dataFile['strain']['Strain'][...]
else:
strain = 0
ts = dataFile['strain']['Strain'].attrs['Xspacing']
#-- Read the DQ information
dqInfo = dataFile['quality']['simple']
qmask = dqInfo['DQmask'][...]
shortnameArray = dqInfo['DQShortnames'].value
shortnameList = list(shortnameArray)
# -- Read the INJ information
injInfo = dataFile['quality/injections']
injmask = injInfo['Injmask'][...]
injnameArray = injInfo['InjShortnames'].value
injnameList = list(injnameArray)
#-- Read the meta data
meta = dataFile['meta']
gpsStart = meta['GPSstart'].value
dataFile.close()
return strain, gpsStart, ts, qmask, shortnameList, injmask, injnameList
def loaddata(filename, ifo=None, tvec=True, readstrain=True):
"""
The input filename should be a LOSC .hdf5 file or a LOSC .gwf
file. The file type will be determined from the extenstion.
The detector should be H1, H2, or L1.
The return value is:
STRAIN, TIME, CHANNEL_DICT
STRAIN is a vector of strain values
TIME is a vector of time values to match the STRAIN vector
unless the flag tvec=False. In that case, TIME is a
dictionary of meta values.
CHANNEL_DICT is a dictionary of data quality channels
"""
# -- Check for zero length file
if os.stat(filename).st_size == 0:
return None, None, None
file_ext = os.path.splitext(filename)[1]
if (file_ext.upper() == '.GWF'):
strain, gpsStart, ts, qmask, shortnameList, injmask, injnameList = read_frame(filename, ifo, readstrain)
else:
strain, gpsStart, ts, qmask, shortnameList, injmask, injnameList = read_hdf5(filename, readstrain)
#-- Create the time vector
gpsEnd = gpsStart + len(qmask)
if tvec:
time = np.arange(gpsStart, gpsEnd, ts)
else:
meta = {}
meta['start'] = gpsStart
meta['stop'] = gpsEnd
meta['dt'] = ts
#-- Create 1 Hz DQ channel for each DQ and INJ channel
channel_dict = {} #-- 1 Hz, mask
slice_dict = {} #-- sampling freq. of stain, a list of slices
final_one_hz = np.zeros(qmask.shape, dtype='int32')
for flag in shortnameList:
bit = shortnameList.index(flag)
# Special check for python 3
if isinstance(flag, bytes): flag = flag.decode("utf-8")
channel_dict[flag] = (qmask >> bit) & 1
for flag in injnameList:
bit = injnameList.index(flag)
# Special check for python 3
if isinstance(flag, bytes): flag = flag.decode("utf-8")
channel_dict[flag] = (injmask >> bit) & 1
#-- Calculate the DEFAULT channel
try:
channel_dict['DEFAULT'] = ( channel_dict['DATA'] )
except:
print("Warning: Failed to calculate DEFAULT data quality channel")
if tvec:
return strain, time, channel_dict
else:
return strain, meta, channel_dict
def dq2segs(channel, gps_start):
"""
This function takes a DQ CHANNEL (as returned by loaddata or getstrain) and
the GPS_START time of the channel and returns a segment
list. The DQ Channel is assumed to be a 1 Hz channel.
Returns of a list of segment GPS start and stop times.
"""
#-- Check if the user input a dictionary
if type(channel) == dict:
try:
channel = channel['DEFAULT']
except:
print("ERROR: Could not find DEFAULT channel in dictionary")
raise
#-- Create the segment list
segments = dq_channel_to_seglist(channel, fs=1)
t0 = gps_start
segList = [(int(seg.start+t0), int(seg.stop+t0)) for seg in segments]
return SegmentList(segList)
def dq_channel_to_seglist(channel, fs=4096):
"""
WARNING:
This function is designed to work the output of the low level function
LOADDATA, not the output from the main data loading function GETSTRAIN.
Takes a data quality 1 Hz channel, as returned by
loaddata, and returns a segment list. The segment
list is really a list of slices for the strain
associated strain vector.
If CHANNEL is a dictionary instead of a single channel,
an attempt is made to return a segment list for the DEFAULT
channel.
Returns a list of slices which can be used directly with the
strain and time outputs of LOADDATA.
"""
#-- Check if the user input a dictionary
if type(channel) == dict:
try:
channel = channel['DEFAULT']
except:
print("ERROR: Could not find DEFAULT channel in dictionary")
raise
# -- Create the segment list
condition = channel > 0
boundaries = np.where(np.diff(condition) == True)[0]
# -- Need to +1 due to how np.diff works
boundaries = boundaries + 1
# if the array "begins" True, we need to complete the first segment
if condition[0]:
boundaries = np.append(0,boundaries)
# if the array "ends" True, we need to complete the last segment
if condition[-1]:
boundaries = np.append(boundaries,len(condition))
# -- group the segment boundaries two by two
segments = boundaries.reshape((len(boundaries)/2,2))
# -- Account for sampling frequency and return a slice
segment_list = [slice(start*fs, stop*fs) for (start,stop) in segments]
return segment_list
class FileList():
"""
Class for lists of LIGO data files.
When a FileList instance is created, DIRECTORY will
be searched for LIGO data files. Sub-directories
will be searched as well. By default, the current
working directory is searched.
"""
def __init__(self, directory=None, cache=None):
# -- Set default directory
if directory is None:
if os.path.isdir('/archive/losc/strain-gwf'):
directory='/archive/losc/strain-gwf'
else:
directory='.'
print("Using data directory {0} ...".format(directory))
self.directory = directory
self.cache = cache
if cache is None:
self.list = self.searchdir(directory)
else:
self.readcache()
def searchdir(self, directory='.'):
frameList = []
hdfList = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*.gwf'):
frameList.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.hdf5'):
hdfList.append(os.path.join(root, filename))
return frameList + hdfList
def writecache(self, cacheName):
outfile = open(cacheName, 'w')
for file in self.list:
outfile.write(file + '\n')
outfile.close()
def readcache(self):
infile = open(self.cache, 'r')
self.list = infile.read().split()
infile.close()
def findfile(self, gps, ifo):
start_gps = gps - (gps % 4096)
filenamelist = fnmatch.filter(self.list, '*' + '-' + ifo + '*' + '-' + str(start_gps) + '-' + '*')
if len(filenamelist) == 0:
print("WARNING! No file found for GPS {0} and IFO {1}".format(gps, ifo))
return None
else:
return filenamelist[0]
def getstrain(start, stop, ifo, filelist=None):
"""
START should be the starting gps time of the data to be loaded.
STOP should be the end gps time of the data to be loaded.
IFO should be 'H1', 'H2', or 'L1'.
FILELIST is an optional argument that is a FileList() instance.
The return value is (strain, meta, dq)
STRAIN: The data as a strain time series
META: A dictionary of meta data, especially the start time, stop time,
and sample time
DQ: A dictionary of the data quality flags
"""
if filelist is None:
filelist = FileList()
# -- Check if this is a science segment
segList = getsegs(start, stop, ifo, flag='DATA', filelist=filelist)
sl = segList.seglist
if (sl[0][0] == start) and (sl[0][1] == stop):
pass
else:
raise TypeError("""Error in getstrain.
Requested times include times where the data file was not found
or instrument not in SCIENCE mode.
Use readligo.getsegs() to construct a segment list.
The science mode segment list for the requested time range is:
{0}""".format(segList))
# -- Construct list of expected file start times
first = start - (start % 4096)
gpsList = np.arange(first, stop, 4096)
m_strain = np.array([])
m_dq = None
# -- Loop over needed files
for time in gpsList:
filename = filelist.findfile(time, ifo)
print("Loading {0}".format(filename))
#-- Read in data
strain, meta, dq = loaddata(filename, ifo, tvec=False)
if len(m_strain) == 0:
m_start = meta['start']
dt = meta['dt']
m_stop = meta['stop']
m_strain = np.append(m_strain, strain)
if m_dq is None:
m_dq = dq
else:
for key in dq.keys():
m_dq[key] = np.append(m_dq[key], dq[key])
# -- Trim the data
lndx = np.abs(start - m_start)*(1.0/dt)
rndx = np.abs(stop - m_start)*(1.0/dt)
m_strain = m_strain[lndx:rndx]
for key in m_dq.keys():
m_dq[key] = m_dq[key][lndx*dt:rndx*dt]
meta['start'] = start
meta['stop'] = stop
meta['dt'] = dt
return m_strain, meta, m_dq
class SegmentList():
def __init__(self, filename, numcolumns=3):
if type(filename) is str:
if numcolumns == 4:
number, start, stop, duration = np.loadtxt(filename, dtype='int',unpack=True)
elif numcolumns == 2:
start, stop = np.loadtxt(filename, dtype='int',unpack=True)
elif numcolumns == 3:
start, stop, duration = np.loadtxt(filename, dtype='int',unpack=True)
self.seglist = zip(start, stop)
elif type(filename) is list:
self.seglist = filename
else:
raise TypeError("SegmentList() expects the name of a segmentlist file from the LOSC website Timeline")
def __repr__(self):
return 'SegmentList( {0} )'.format(self.seglist)
def __iter__(self):
return iter(self.seglist)
def __getitem__(self, key):
return self.seglist[key]
def getsegs(start, stop, ifo, flag='DATA', filelist=None):
"""
Method for constructing a segment list from
LOSC data files. By default, the method uses
files in the current working directory to
construct a segment list.
If a FileList is passed in the flag FILELIST,
then those files will be searched for segments
passing the DQ flag passed as the FLAG argument.
"""
if filelist is None:
filelist = FileList()
# -- Construct list of expected file start times
first = start - (start % 4096)
gpsList = np.arange(first, stop, 4096)
m_dq = None
# -- Initialize segment list
segList = []
# -- Loop over needed files
for time in gpsList:
filename = filelist.findfile(time, ifo)
#-- Read in data
if filename is None:
print("WARNING! No file found with GPS start time {0}".format(time))
print("Segment list may contain errors due to missing files.")
continue
else:
try:
strain, meta, dq = loaddata(filename, ifo, tvec=False, readstrain=False)
except:
print("WARNING! Failed to load file {0}".format(filename))
print("Segment list may contain errors due to corrupt files.")
continue
if dq is None:
print("Warning! Found zero length file {0}".format(filename))
print("Segment list may contain errors.")
continue
#-- Add segments to list on a file-by-file basis
chan = dq[flag]
indxlist = dq_channel_to_seglist(chan, fs=1.0)
i_start = meta['start']
i_seglist = [(indx.start+i_start, indx.stop+i_start) for indx in indxlist]
i_seglist = [(int(begin), int(end)) for begin, end in i_seglist]
segList = segList + i_seglist
# -- Sort segments
segList.sort()
# -- Merge overlapping segments
for i in range(0, len(segList)-1):
seg1 = segList[i]
seg2 = segList[i+1]
if seg1[1] == seg2[0]:
segList[i] = None
segList[i+1] = (seg1[0], seg2[1])
# -- Remove placeholder segments
segList = [seg for seg in segList if seg is not None]
# -- Trim segment list to fit within requested start/stop times
for seg in segList:
idx = segList.index(seg)
if (seg[1] < start):
segList[idx] = None
elif (seg[0] > stop):
segList[idx] = None
elif (seg[0] < start) and (seg[1] > stop):
segList[idx] = (start, stop)
elif (seg[0] < start):
segList[idx] = (start, seg[1])
elif (seg[1] > stop):
segList[idx] = (seg[0], stop)
# -- Remove placeholder segments
segList = [seg for seg in segList if seg is not None]
return SegmentList(segList)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `file_read_backwards` module."""
import itertools
import os
import tempfile
import unittest
from collections import deque
from file_read_backwards.file_read_backwards import FileReadBackwards
from file_read_backwards.file_read_backwards import supported_encodings
from file_read_backwards.buffer_work_space import new_lines
# doing this xrange/range dance so that we don't need to add additional dependencies of future or six modules
try:
xrange
except NameError:
xrange = range
created_files = set()
def helper_write(t, s, encoding="utf-8"):
"""A helper method to write out string s in specified encoding."""
t.write(s.encode(encoding))
def helper_create_temp_file(generator=None, encoding='utf-8'):
global created_files
if generator is None:
generator = ("line {}!\n".format(i) for i in xrange(42))
temp_file = tempfile.NamedTemporaryFile(delete=False)
for line in generator:
helper_write(temp_file, line, encoding)
temp_file.close()
print('Wrote file {}'.format(temp_file.name))
created_files.add(temp_file)
return temp_file
def helper_destroy_temp_file(temp_file):
temp_file.close()
os.unlink(temp_file.name)
def helper_destroy_temp_files():
global created_files
while created_files:
helper_destroy_temp_file(created_files.pop())
def tearDownModule():
helper_destroy_temp_files()
class TestFileReadBackwards(unittest.TestCase):
"""Class that contains various test cases for actual FileReadBackwards usage."""
@classmethod
def setUpClass(cls):
cls.empty_file = helper_create_temp_file(generator=(_ for _ in []))
cls.long_file = helper_create_temp_file()
@classmethod
def tearDownClass(cls):
helper_destroy_temp_files()
def test_with_completely_empty_file(self):
"""Test with a completely empty file."""
f = FileReadBackwards(self.empty_file.name)
expected_lines = deque()
lines_read = deque()
for l in f:
lines_read.appendleft(l)
self.assertEqual(expected_lines, lines_read)
def test_file_with_a_single_new_line_char_with_different_encodings(self):
"""Test a file with a single new line character."""
for encoding, new_line in itertools.product(supported_encodings, new_lines):
temp_file = helper_create_temp_file((l for l in [new_line]), encoding=encoding)
f = FileReadBackwards(temp_file.name)
expected_lines = deque([""])
lines_read = deque()
for l in f:
lines_read.appendleft(l)
self.assertEqual(
expected_lines,
lines_read,
msg="Test with {0} encoding with {1!r} as newline".format(encoding, new_line))
def test_file_with_one_line_of_text_with_accented_char_followed_by_a_new_line(self):
"""Test a file with a single line of text with accented char followed by a new line."""
b = b'Caf\xc3\xa9' # accented e in utf-8
s = b.decode("utf-8")
for new_line in new_lines:
temp_file = helper_create_temp_file((l for l in [s, new_line]))
f = FileReadBackwards(temp_file.name)
expected_lines = deque([s])
lines_read = deque()
for l in f:
lines_read.appendleft(s)
self.assertEqual(expected_lines, lines_read, msg="Test with {0!r} as newline".format(new_line))
def test_file_with_one_line_of_text_followed_by_a_new_line_with_different_encodings(self):
"""Test a file with just one line of text followed by a new line."""
for encoding, new_line in itertools.product(supported_encodings, new_lines):
temp_file = helper_create_temp_file((l for l in ["something{0}".format(new_line)]), encoding=encoding)
f = FileReadBackwards(temp_file.name)
expected_lines = deque(["something"])
lines_read = deque()
for l in f:
lines_read.appendleft(l)
self.assertEqual(
expected_lines,
lines_read,
msg="Test with {0} encoding with {1!r} as newline".format(encoding, new_line))
def test_file_with_varying_number_of_new_lines_and_some_text_in_chunk_size(self):
"""Test a file with varying number of new lines and text of size custom chunk_size."""
chunk_size = 3
s = "t"
for number_of_new_lines in xrange(21):
for new_line in new_lines: # test with variety of new lines
temp_file = helper_create_temp_file((l for l in [new_line * number_of_new_lines, s * chunk_size]))
f = FileReadBackwards(temp_file.name, chunk_size=chunk_size)
expected_lines = deque()
for _ in xrange(number_of_new_lines):
expected_lines.append("")
expected_lines.append(s * chunk_size)
lines_read = deque()
for l in f:
lines_read.appendleft(l)
self.assertEqual(
expected_lines,
lines_read,
msg="Test with {0} of new line {1!r} followed by {2} of {3!r}".format(number_of_new_lines, new_line,
chunk_size, s))
def test_file_with_new_lines_and_some_accented_characters_in_chunk_size(self):
"""Test a file with many new lines and a random text of size custom chunk_size."""
chunk_size = 3
b = b'\xc3\xa9'
s = b.decode("utf-8")
for number_of_new_lines in xrange(21):
for new_line in new_lines: # test with variety of new lines
temp_file = helper_create_temp_file((l for l in [new_line * number_of_new_lines, s * chunk_size]))
f = FileReadBackwards(temp_file.name, chunk_size=chunk_size)
expected_lines = deque()
for _ in xrange(number_of_new_lines):
expected_lines.append("")
expected_lines.append(s * chunk_size)
lines_read = deque()
for l in f:
lines_read.appendleft(l)
self.assertEqual(
expected_lines,
lines_read,
msg="Test with {0} of new line {1!r} followed by {2} of \\xc3\\xa9".format(number_of_new_lines,
new_line, chunk_size))
def test_unsupported_encoding(self):
"""Test when users pass in unsupported encoding, NotImplementedError should be thrown."""
with self.assertRaises(NotImplementedError):
_ = FileReadBackwards(self.empty_file.name, encoding="not-supported-encoding") # noqa: F841
def test_file_with_one_line_of_text_readline(self):
"""Test a file with a single line of text followed by a new line."""
s = "Line0"
for new_line in new_lines:
temp_file = helper_create_temp_file((l for l in [s, new_line]))
with FileReadBackwards(temp_file.name) as fp:
l = fp.readline()
expected_line = s + os.linesep
self.assertEqual(l, expected_line)
# the file contains only 1 line
second_line = fp.readline()
expected_second_line = ""
self.assertEqual(second_line, expected_second_line)
def test_file_with_two_lines_of_text_readline(self):
"""Test a file with a two lines of text followed by a new line."""
line0 = "Line0"
line1 = "Line1"
for new_line in new_lines:
line0_with_n = "{}{}".format(line0, new_line)
line1_with_n = "{}{}".format(line1, new_line)
temp_file = helper_create_temp_file((l for l in [line0_with_n, line1_with_n]))
with FileReadBackwards(temp_file.name) as fp:
l = fp.readline()
expected_line = line1 + os.linesep
self.assertEqual(l, expected_line)
second_line = fp.readline()
expected_second_line = line0 + os.linesep
self.assertEqual(second_line, expected_second_line)
# EOF
third_line = fp.readline()
expected_third_line = ""
self.assertEqual(third_line, expected_third_line)
class TestFileReadBackwardsAsContextManager(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temp_file = helper_create_temp_file()
@classmethod
def tearDownClass(cls):
helper_destroy_temp_files()
def test_behaves_as_classic(self):
with FileReadBackwards(self.temp_file.name) as f:
lines_read = deque()
for l in f:
lines_read.appendleft(l)
f2 = FileReadBackwards(self.temp_file.name)
lines_read2 = deque()
for l2 in f2:
lines_read2.appendleft(l2)
self.assertEqual(
lines_read,
lines_read2,
msg="The Context Manager way should behave exactly the same way as without using one."
)
class TestFileReadBackwardsCloseFunctionality(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temp_file = helper_create_temp_file()
@classmethod
def tearDownClass(cls):
helper_destroy_temp_files()
def test_close_on_iterator(self):
with FileReadBackwards(self.temp_file.name) as f:
it = iter(f)
for count, i in enumerate(it):
if count == 2:
break
self.assertFalse(it.closed, msg="The fp should not be closed when not exhausted")
it.close()
self.assertTrue(it.closed, msg="Calling close() on the iterator should close it")
def test_not_creating_new_iterator(self):
with FileReadBackwards(self.temp_file.name) as f:
it1 = iter(f)
it2 = iter(f)
self.assertTrue(it1 is it2, msg="FileReadBackwards will return the same iterator")
def test_close_on_iterator_exhausted(self):
with FileReadBackwards(self.temp_file.name) as f:
it = iter(f)
for _ in it:
pass
self.assertTrue(it.closed, msg="The fp should be closed automatically when the iterator is exhausted.")
def test_close_on_reader_exit(self):
with FileReadBackwards(self.temp_file.name) as f:
it = iter(f)
self.assertTrue(it.closed,
msg="Iterator created by a reader should have its fp closed when the reader gets closed.")
def test_close_on_reader_explicitly(self):
f = FileReadBackwards(self.temp_file.name)
it = iter(f)
self.assertFalse(it.closed, msg="Iterator should not have its fp closed at this point.")
f.close()
self.assertTrue(it.closed,
msg="Iterator created by a reader should have its fp closed when the reader closes it.")
def test_close_on_reader_with_already_closed_iterator(self):
with FileReadBackwards(self.temp_file.name) as f:
it = iter(f)
it.close()
self.assertTrue(it.closed, msg="It should be okay to close (through the reader) an already closed iterator.")
def test_cannot_iterate_when_closed(self):
with FileReadBackwards(self.temp_file.name) as f:
it = iter(f)
it.close()
for _ in it:
self.fail(msg="An iterator should be exhausted when closed.")
|
|
# -*- coding: utf-8 -*-
"""
common.py
~~~~~~~~~~~~
This module implements the common and helper functions for the OneView REST API
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
__title__ = 'common'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2015) Hewlett Packard Enterprise ' \
' Development LP'
__license__ = 'MIT'
__status__ = 'Development'
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
# Looking for a switch type, using filters:
# https://<appliance>/rest/switch-types?filter="partNumber = '455880-B21'"
uri = {
#------------------------------------
# Settings
#------------------------------------
'globalSettings': '/rest/global-settings',
'vol-tmplate-policy': '/rest/global-settings/StorageVolumeTemplateRequired',
'eulaStatus': '/rest/appliance/eula/status',
'eulaSave': '/rest/appliance/eula/save',
'serviceAccess': '/rest/appliance/settings/enableServiceAccess',
'service': '/rest/appliance/settings/serviceaccess',
'applianceNetworkInterfaces': '/rest/appliance/network-interfaces',
'healthStatus': '/rest/appliance/health-status',
'version': '/rest/version',
'supportDump': '/rest/appliance/support-dumps',
'backups': '/rest/backups',
'archive': '/rest/backups/archive',
'dev-read-community-str': '/rest/appliance/device-read-community-string',
'licenses': '/rest/licenses',
'nodestatus': '/rest/appliance/nodeinfo/status',
'nodeversion': '/rest/appliance/nodeinfo/version',
'shutdown': '/rest/appliance/shutdown',
'trap': '/rest/appliance/trap-destinations',
'restores': '/rest/restores',
'domains': '/rest/domains',
'schema': '/rest/domains/schema',
'progress': '/rest/appliance/progress',
'appliance-firmware': '/rest/appliance/firmware/image',
'fw-pending': '/rest/appliance/firmware/pending',
#------------------------------------
# Security
#------------------------------------
'activeSessions': '/rest/active-user-sessions',
'loginSessions': '/rest/login-sessions',
'users': '/rest/users',
'userRole': '/rest/users/role',
'changePassword': '/rest/users/changePassword',
'roles': '/rest/roles',
'category-actions': '/rest/authz/category-actions',
'role-category-actions': '/rest/authz/role-category-actions',
'validator': '/rest/authz/validator',
#------------------------------------
# Facilities
#------------------------------------
'datacenters': '/rest/datacenters',
'powerDevices': '/rest/power-devices',
'powerDevicesDiscover': '/rest/power-devices/discover',
'racks': '/rest/racks',
#------------------------------------
# Systems
#------------------------------------
'servers': '/rest/server-hardware',
'server-hardware-types': '/rest/server-hardware-types',
'enclosures': '/rest/enclosures',
'enclosureGroups': '/rest/enclosure-groups',
'enclosurePreview': '/rest/enclosure-preview',
'fwUpload': '/rest/firmware-bundles',
'fwDrivers': '/rest/firmware-drivers',
#------------------------------------
# Connectivity
#------------------------------------
'conn': '/rest/connections',
'ct': '/rest/connection-templates',
'enet': '/rest/ethernet-networks',
'fcnet': '/rest/fc-networks',
'nset': '/rest/network-sets',
'li': '/rest/logical-interconnects',
'lig': '/rest/logical-interconnect-groups',
'ic': '/rest/interconnects',
'ictype': '/rest/interconnect-types',
'uplink-sets': '/rest/uplink-sets',
'ld': '/rest/logical-downlinks',
'idpool': '/rest/id-pools',
'vmac-pool': '/rest/id-pools/vmac',
'vwwn-pool': '/rest/id-pools/vwwn',
'vsn-pool': '/rest/id-pools/vsn',
#------------------------------------
# Server Profiles
#------------------------------------
'profiles': '/rest/server-profiles',
'profile-templates': '/rest/server-profile-templates',
'profile-networks': '/rest/server-profiles/available-networks',
'profile-networks-schema': '/rest/server-profiles/available-networks/schema',
'profile-available-servers': '/rest/server-profiles/available-servers',
'profile-available-servers-schema': '/rest/server-profiles/available-servers/schema',
'profile-available-storage-system': '/rest/server-profiles/available-storage-system',
'profile-available-storage-systems': '/rest/server-profiles/available-storage-systems',
'profile-available-targets': '/rest/server-profiles/available-targets',
'profile-messages-schema': '/rest/server-profiles/messages/schema',
'profile-ports': '/rest/server-profiles/profile-ports',
'profile-ports-schema': '/rest/server-profiles/profile-ports/schema',
'profile-schema': '/rest/server-profiles/schema',
#------------------------------------
# Health
#------------------------------------
'alerts': '/rest/alerts',
'events': '/rest/events',
'audit-logs': '/rest/audit-logs',
'audit-logs-download': '/rest/audit-logs/download',
#------------------------------------
# Certificates
#------------------------------------
'certificates': '/rest/certificates',
'ca': '/rest/certificates/ca',
'crl': '/rest/certificates/ca/crl',
'rabbitmq-kp': '/rest/certificates/client/rabbitmq/keypair',
'rabbitmq': '/rest/certificates/client/rabbitmq',
'cert-https': '/rest/certificates/https',
#------------------------------------
# Searching and Indexing
#------------------------------------
'resource': '/rest/index/resources',
'association': '/rest/index/associations',
'tree': '/rest/index/trees',
'search-suggestion': '/rest/index/search-suggestions',
# 'GetAllNetworks': ('/index/rest/index/resources'
# '?sort=name:asc&category=fc-networks'
# '&category=networks&start=0&count=-1'),
# 'GetEthNetworks': ('/index/rest/index/resources'
# '?sort=name:asc&category=networks&start=0&count=-1'),
# 'GetFcNetworks': ('/index/rest/index/resources'
# '?sort=name:asc&category=fc-networks&start=0&count=-1'),
#------------------------------------
# Logging and Tracking
#------------------------------------
'task': '/rest/tasks',
#------------------------------------
# Storage
#------------------------------------
'storage-pools': '/rest/storage-pools',
'storage-systems': '/rest/storage-systems',
'storage-volumes': '/rest/storage-volumes',
'vol-templates': '/rest/storage-volume-templates',
'connectable-vol': '/rest/storage-volume-templates/connectable-volume-templates',
'attachable-volumes': '/rest/storage-volumes/attachable-volumes',
#------------------------------------
# FC-SANS
#------------------------------------
'device-managers': '/rest/fc-sans/device-managers',
'managed-sans': '/rest/fc-sans/managed-sans',
'providers': '/rest/fc-sans/providers',
#------------------------------------
# Uncategorized
#------------------------------------
'unmanaged-devices': '/rest/unmanaged-devices',
}
############################################################################
# Utility to print resource to standard output
############################################################################
def print_entity(entity):
if not entity:
return
if 'name' in entity:
print(('name: ', entity['name']))
if isinstance(entity, dict):
for key, value in list(entity.items()):
print(('\t', key, ' = ', value))
elif hasattr(entity, '__iter__'):
for item in entity:
print(('\t', item))
else:
print(('\t', entity))
def print_task_tuple(entities):
print('Task/Entity Tuples:')
for indx, (task, entity) in enumerate(entities):
print((indx, ') Entry'))
try:
print (('\tTask: ', task['name'], task['taskState'],
task['taskStatus'], task['uri']))
except KeyError:
print('\tTask: n/a')
try:
print(('\tResource: ', entity['name'], entity['uri']))
except KeyError:
print('\tResource: n/a')
def get_members(mlist):
if not mlist:
return []
if not mlist['members']:
return []
return mlist['members']
def get_member(mlist):
if not mlist:
return None
if not mlist['members']:
return None
return mlist['members'][0]
############################################################################
# Create default Resource Instances
############################################################################
def make_user_dict(name, password, enabled, fullName, emailAddress,
officePhone, mobilePhone, roles=[]):
return {
'userName': name,
'password': password,
'fullName': fullName,
'emailAddress': emailAddress,
'officePhone': officePhone,
'mobilePhone': mobilePhone,
'enabled': enabled,
'type': 'UserAndRoles',
'roles': roles}
def make_Bandwidth(typicalBandwidth=2500, maximumBandwidth=10000):
""" Create an Bandwidth dictionary
Args:
typicalBandwidth:
The transmit throughput (mbps) that should be allocated to this
connection. For FlexFabric connections,this value must not exceed
the maximum bandwidth of the selected network
maximumBandwidth:
Maximum transmit throughput (mbps) allowed on this connection. The
value is limited by the maximum throughput of the network link and
maximumBandwidth of the selected network.
Returns: dict
"""
return {'maximumBandwidth': maximumBandwidth,
'typicalBandwidth': typicalBandwidth
}
def make_network_set(name, networkUris=[]):
""" Create an network-set dictionary
Args:
name:
Name of the Network Set
networkUris:
A set of Ethernet network URIs that will be members of this network
set. NOTE: all Ethernet networks in a network set must have unique
VLAN IDs.
Returns: dict
"""
return {
'name': name,
'type': 'network-set',
'nativeNetworkUri': None,
'networkUris': networkUris[:],
'connectionTemplateUri': None}
def make_ethernet_networkV3(name, description=None, ethernetNetworkType=None,
purpose='General', privateNetwork=False,
smartLink=True, vlanId=0):
""" Create an ethernet-networkV3 dictionary
Args:
name:
Name of the Ethernet Network
description:
Breif description of the Ethernet Network
vlanId:
The Virtual LAN (VLAN) identification number assigned to the
network. The VLAN ID is optional when ethernetNetworkType is
Untagged or Tunnel. Multiple Ethernet networks can be defined
with the same VLAN ID, but all Ethernet networks in an uplink set
or network set must have unique VLAN IDs. The VLAN ID cannot be
changed once the network has been created.
purpose:
A description of the network's role within the logical
interconnect. Values: 'FaultTolerance', 'General', 'Management',
or 'VMMigration'
smartLink:
When enabled, the network is configured so that, within a logical
interconnect, all uplinks that carry the network are monitored.
If all uplinks lose their link to external interconnects, all
corresponding dowlink (server) ports which connect to the network
are forced into an unlinked state. This allows a server side NIC
teaming driver to automatically failover to an alternate path.
privateNetwork:
When enabled, the network is configured so that all downlink
(server) ports connected to the network are prevented from
communicating with each other within the logical interconnect.
Servers on the network only communicate with each other through an
external L3 router that redirects the traffic back to the logical
interconnect.
ethernetNetworkType:
The type of Ethernet network. It is optional. If this field is
missing or its value is Tagged, you must supply a valid vlanId;
if this value is Untagged or Tunnel, please either ignore vlanId
or specify vlanId equals 0. Values: 'NotApplicable', 'Tagged',
'Tunnel', 'Unknown', or 'Untagged'.
Returns: dict
"""
return {
'name': name,
'type': 'ethernet-networkV3',
'purpose': purpose,
'connectionTemplateUri': None,
'vlanId': vlanId,
'smartLink': smartLink,
'ethernetNetworkType': ethernetNetworkType,
'privateNetwork': privateNetwork}
def make_fc_networkV2(name, autoLoginRedistribution=True, description=None,
fabricType='FabricAttach', linkStabilityTime=30,
managedSanUri=None):
""" Create an ethernet-networkV3 dictionary
Args:
name:
Name of the Fibre Channel Network
autoLoginRedistribution:
Used for load balancing when logins are not evenly distributed
over the Fibre Channel links, such as when an uplink that was
previously down becomes available.
description:
Breif description of the Fibre Channel Network
fabricType:
The supported Fibre Channel access method. Values: 'FabricAttach',
or 'DirectAttach'.
linkStabilityTime:
The time interval, expressed in seconds, to wait after a link that
was previously offline becomes stable, before automatic
redistribution occurs within the fabric. This value is not
effective if autoLoginRedistribution is false.
managedSanUri:
The managed SAN URI that is associated with this Fibre Channel
network. This value should be null for Direct Attach Fibre Channel
networks and may be null for Fabric Attach Fibre Channel networks.
Returns: dict
"""
return {
'name': name,
'type': 'fc-networkV2',
'connectionTemplateUri': None,
'fabricType': fabricType,
'autoLoginRedistribution': autoLoginRedistribution,
'linkStabilityTime': linkStabilityTime,
'managedSanUri': managedSanUri}
def make_interconnect_map_template():
return {
'interconnectMapEntryTemplates':
[{'logicalLocation': {
'locationEntries':
[{'type': 'Bay', 'relativeValue': N},
{'type': 'Enclosure', 'relativeValue': 1}]},
'permittedInterconnectTypeUri': None,
'logicalDownlinkUri': None
} for N in range(1, 9)], }
def make_enet_settings(name,
enableIgmpSnooping=False,
igmpIdleTimeoutInterval=260,
enableFastMacCacheFailover=True,
macRefreshInterval=5,
enableNetworkLoopProtection=True):
return {
'type': 'EthernetInterconnectSettings',
'name': name,
'enableIgmpSnooping': enableIgmpSnooping,
'igmpIdleTimeoutInterval': igmpIdleTimeoutInterval,
'enableFastMacCacheFailover': enableFastMacCacheFailover,
'macRefreshInterval': macRefreshInterval,
'enableNetworkLoopProtection': enableNetworkLoopProtection,
'interconnectType': 'Ethernet'
# 'description': null,
}
def make_storage_vol_templateV3(name,
capacity,
shareable,
storagePoolUri,
state='Normal',
description='',
storageSystemUri=None,
snapshotPoolUri=None,
provisionType='Thin'):
return {
'provisioning': {
'shareable': shareable,
'provisionType': provisionType,
'capacity': capacity,
'storagePoolUri': storagePoolUri},
'name': name,
'state': state,
'description': description,
'storageSystemUri': storageSystemUri,
'snapshotPoolUri': snapshotPoolUri,
'type': 'StorageVolumeTemplateV3'
}
def make_storage_volume(name,
capacity,
shareable,
storagePoolUri,
description='',
provisionType='Thin'):
return {
'name': name,
'description': description,
'provisioningParameters': {
'shareable': shareable,
'provisionType': provisionType,
'requestedCapacity': capacity,
'storagePoolUri': storagePoolUri},
'type': 'StorageVolumeTemplate'
}
def make_connectionInfo_dict(hostname, port, user, passwd, ssl=True):
return {'connectionInfo': [
{'name': 'Host',
'value': hostname},
{'name': 'Port',
'value': port},
{'name': 'Username',
'value': user},
{'name': 'Password',
'value': passwd},
{'name': 'UseSsl',
'value': ssl}]
}
def make_LogicalInterconnectGroupV2(name, ethernetSettings=[]):
return {
'name': name,
'type': 'logical-interconnect-groupV2',
'interconnectMapTemplate': make_interconnect_map_template(),
'uplinkSets': [], # call make_uplink_template
'stackingMode': 'Enclosure',
'ethernetSettings': ethernetSettings,
# 'telemetryConfiguration': None,
# 'snmpConfiguration' : None,
# 'description': None
}
def make_LogicalInterconnectGroupV3(name, ethernetSettings=[],
enclosureType='C7000'):
return {
'name': name,
'type': 'logical-interconnect-groupV3',
'interconnectMapTemplate': make_interconnect_map_template(),
'uplinkSets': [], # call make_uplink_template
'stackingMode': 'Enclosure',
'ethernetSettings': ethernetSettings,
# 'telemetryConfiguration': None,
# 'snmpConfiguration' : None,
# 'description': None
}
def make_EthernetSettingsV2(enableFastMacCacheFailover=True,
enableIgmpSnooping=False,
enableNetworkLoopProtection=True,
enablePauseFloodProtection=True,
igmpIdleTimeoutInterval=260,
macRefreshInterval=5):
return{
'enableFastMacCacheFailover': enableFastMacCacheFailover,
'enableIgmpSnooping': enableIgmpSnooping,
'enableNetworkLoopProtection': enableNetworkLoopProtection,
'enablePauseFloodProtection': enablePauseFloodProtection,
'igmpIdleTimeoutInterval': igmpIdleTimeoutInterval,
'macRefreshInterval': macRefreshInterval,
'type': 'EthernetInterconnectSettingsV2'
}
def make_EthernetSettingsV3(enableFastMacCacheFailover=True,
enableIgmpSnooping=False,
enableNetworkLoopProtection=True,
enablePauseFloodProtection=True,
enableRichTLV=False,
igmpIdleTimeoutInterval=260,
macRefreshInterval=5):
return{
'enableFastMacCacheFailover': enableFastMacCacheFailover,
'enableIgmpSnooping': enableIgmpSnooping,
'enableNetworkLoopProtection': enableNetworkLoopProtection,
'enablePauseFloodProtection': enablePauseFloodProtection,
'igmpIdleTimeoutInterval': igmpIdleTimeoutInterval,
'macRefreshInterval': macRefreshInterval,
'type': 'EthernetInterconnectSettingsV3'
}
def make_trapdestinations_dict(trapDestination,
communityString='public',
enetTrapCategories=['Other',
'PortStatus',
'PortThresholds'],
fcTrapCategories=['Other', 'PortStatus'],
trapFormat='SNMPv1',
trapSeverities=['Critical',
'Info',
'Major',
'Minor',
'Normal',
'Unknown',
'Warning'],
vcmTrapCategories=['Legacy']):
return{
'trapDestination': trapDestination,
'communityString': communityString,
'enetTrapCategories': enetTrapCategories,
'fcTrapCategories': fcTrapCategories,
'trapFormat': trapFormat,
'trapSeverities': trapSeverities,
'vcmTrapCategories': vcmTrapCategories
}
def make_snmpconfiguration_dict(enabled=False,
readCommunity='public',
snmpAccess=[],
systemContact=None,
trapDestinations=[]):
return{
'enabled': enabled,
'readCommunity': readCommunity,
'snmpAccess': snmpAccess,
'systemContact': systemContact,
'trapDestinations': trapDestinations,
}
def set_iobay_occupancy(switchMap, bays, stype):
for location in switchMap['interconnectMapEntryTemplates']:
entries = location['logicalLocation']['locationEntries']
if [x for x in entries if x['type'] == 'Bay' and x['relativeValue']
in bays]:
location['permittedInterconnectTypeUri'] = stype
def get_iobay_entry(interconnectMap, bay):
if not interconnectMap:
return
for iobay_entry in interconnectMap['interconnectMapEntryTemplates']:
entries = iobay_entry['logicalLocation']['locationEntries']
for entry in entries:
if entry['type'] == 'Bay':
if bay == entry['relativeValue']:
return iobay_entry
def make_UplinkSetGroupV2(name,
ethernetNetworkType='Tagged',
lacpTimer='Long',
logicalPortConfigInfos=[],
mode='Auto',
nativeNetworkUri=None,
networkType='Ethernet',
networkUris=[]):
if networkType == 'Ethernet':
return {'name': name,
'ethernetNetworkType': ethernetNetworkType,
'lacpTimer': lacpTimer,
'networkUris': networkUris,
'networkType': networkType,
'mode': mode,
'primaryPort': None,
'logicalPortConfigInfos': logicalPortConfigInfos,
'nativeNetworkUri': nativeNetworkUri,
}
if networkType == 'FibreChannel':
return {'name': name,
'ethernetNetworkType': 'NotApplicable',
'networkUris': networkUris,
'logicalPortConfigInfos': logicalPortConfigInfos,
'networkType': 'FibreChannel', # Ethernet or FibreChannel
'mode': mode,
}
raise Exception('networkType must be Ethernet or FibreChannel.')
def make_port_config_info(enclosure, bay, port, speed='Auto'):
return {'logicalLocation': {
'locationEntries':
[{'type': 'Enclosure', 'relativeValue': enclosure},
{'type': 'Bay', 'relativeValue': bay},
{'type': 'Port', 'relativeValue': port}]
},
'desiredSpeed': speed
}
def make_EnclosureGroupV200(associatedLIGs, name,
powerMode='RedundantPowerSupply'):
""" Create an EnclosureGroupV200 dictionary
Args:
associatedLIGs:
A sorted list of logical interconnect group URIs associated with
the enclosure group.
name:
The name of the enclosure group.
stackingMode:
Stacking mode of the enclosure group. Currently only the Enclosure
mode is supported. Values are 'Enclosure', 'MultiEnclosure',
'None', or 'SwitchParis'.
powerMode:
Power mode of the enclosure group. Values are 'RedundantPowerFeed'
or 'RedundantPowerSupply'.
Returns: dict
"""
ligUri = associatedLIGs['uri']
icms = associatedLIGs['interconnectMapTemplate']['interconnectMapEntryTemplates']
ligs = []
# With the 200 API, the LIG uri can only be assigned if the LIG contains a
# definition of the interconnect bay. I.E. if the LIG only has ICM 1 and 2
# defined then 3 - 8 must be set to None. I.E:
# 'interconnectBayMappings': [{'interconnectBay': 1,
# 'logicalInterconnectGroupUri': '/rest/logical-interconnect-groups/f8371e33-6d07-4477-9b63-cf8400242059'},
# {'interconnectBay': 2,
# 'logicalInterconnectGroupUri': '/rest/logical-interconnect-groups/f8371e33-6d07-4477-9b63-cf8400242059'}]}
# {'interconnectBay': 3,
# 'logicalInterconnectGroupUri': None},
# {'interconnectBay': 4,
# 'logicalInterconnectGroupUri': None},
# ...
for N in range(1, 9):
if N > len(icms):
ligs.append({'interconnectBay': N,
'logicalInterconnectGroupUri': None})
else:
ligs.append({'interconnectBay': N,
'logicalInterconnectGroupUri': ligUri})
return {
'name': name,
'type': 'EnclosureGroupV200',
'stackingMode': 'Enclosure',
'powerMode': powerMode,
'enclosureCount': 1,
'enclosureTypeUri': "/rest/enclosure-types/c7000",
'interconnectBayMappingCount': 8,
'interconnectBayMappings': ligs
}
def make_enclosure_dict(host, user, passwd, egroup, state="",
licenseIntent='OneView',
firmwareBaseLineUri=None, force=False, forcefw=False):
return {
'hostname': host,
'username': user,
'password': passwd,
'force': force,
'enclosureGroupUri': egroup,
'firmwareBaselineUri': firmwareBaseLineUri,
'updateFirmwareOn': 'EnclosureOnly',
'forceInstallFirmware': forcefw,
'state': state,
'licensingIntent': licenseIntent}
def make_monitored_enclosure_dict(host, user, passwd, state='Monitored',
licenseIntent='OneViewStandard', force=False):
return {
'hostname': host,
'username': user,
'password': passwd,
'force': force,
'state': state,
'licensingIntent': licenseIntent}
def make_storage_system_dict(mdom, udom, mports, uports):
return {
'type': 'StorageSystem',
'managedDomain': mdom,
'unmanagedDomains': udom[:],
'managedPorts': mports[:],
'unmanagedPorts': uports[:],
}
def make_ProfileConnectionV4(cid, name, networkUri, profileTemplateConnection,
connectionBoot=None, functionType='Ethernet',
mac=None, macType='Virtual', portId='Auto',
requestedMbps=None, wwnn=None, wwpn=None,
wwpnType='Virtual'):
""" Create a ProfileConnectionV4 dictionary
Args:
connectionBoot:
ConnectionBoot dictionary that descirbes server boot management.
functionType:
The function of the connection, either 'Ethernet' or 'FibreChannel'
cid:
A unique identifier for this connection. When creating or editing a
profile, an id is automatically assigned if the attribute is
omitted or 0 is specified. When editing a profile, a connection is
created if the id does not identify an existing connection.
mac:
The MAC address that is currently programmed on the FlexNic. The
value can be a virtual MAC, user defined MAC or physical MAC read
from the device. It cannot be modified after the connection is
created.
macType:
Specifies the type of MAC address to be programmed into the IO
Devices. The value can be 'Virtual', 'Physical' or 'UserDefined'.
It cannot be modified after the connection is created.
name:
A string used to identify the respective connection. The connection
name is case insensitive, limited to 63 characters and must be
unique within the profile.
networkUri:
Identifies the network or network set to be connected. Use GET
/rest/server-profiles/available-networks to retrieve the list of
available Ethernet networks, Fibre Channel networks and network
sets that are available along with their respective ports.
profileTemplateConnection:
Specifies if the connection list is to be used in defining a server
profile template.
portId:
Identifies the port (FlexNIC) used for this connection, for
example 'Flb 1:1-a'. The port can be automatically selected by
specifying 'Auto', 'None', or a physical port when creating or
editing the connection. If 'Auto' is specified, a port that
provides access to the selected network(networkUri) will be
selected. A physical port(e.g. 'Flb 1:2') can be specified if the
choice of a specific FlexNIC on the physical port is not important.
If 'None' is specified, the connection will not be configured on
the server hardware. When omitted, portId defaults to 'Auto'. Use
/ rest / server - profiles / profile - ports to retrieve the list
of available ports.
requestedMbps:
The transmit throughput (mbps) that should be allocated to this
connection. For FlexFabric connections, this value must not exceed
the maximum bandwidth of the selected network (networkUri). If
omitted, this value defaults to the typical bandwidth value of the
selected network. The sum of the requestedBW values for the
connections (FlexNICs) on an adapter port cannot exceed the
capacity of the network link. For Virtual Connect Fibre Channel
connections, the available discrete values are based on the adapter
and the Fibre Channel interconnect module.
wwnn:
The node WWN address that is currently programmed on the FlexNic.
The value can be a virtual WWNN, user defined WWNN or physical WWNN
read from the device. It cannot be modified after the connection
is created.
wwpn:
The port WWN address that is currently programmed on the FlexNIC.
The value can be a virtual WWPN, user defined WWPN or the physical
WWPN read from the device. It cannot be modified after the
connection is created.
wwpnType:
Specifies the type of WWN address to be porgrammed on the FlexNIC.
The value can be 'Virtual', 'Physical' or 'UserDefined'. It cannot
be modified after the connection is created. If the WWPN, WWNN,
MAC, connection's macType and connection's wwpnType are omitted in
the FC connection, then the connection's macType and connection's
wwpnType are set to the profile's default macType and profile's
default wwnnType.
Returns: dict
"""
if profileTemplateConnection:
return {
'boot': connectionBoot,
'functionType': functionType,
'id': cid,
'name': name,
'networkUri': networkUri,
'portId': portId,
'requestedMbps': requestedMbps,
}
else :
return {
'boot': connectionBoot,
'functionType': functionType,
'id': cid,
'mac': mac,
'macType': macType,
'name': name,
'networkUri': networkUri,
'portId': portId,
'requestedMbps': requestedMbps,
'wwnn': wwnn,
'wwpn': wwpn,
'wwpnType': wwpnType,
}
def make_ConnectionBoot(priority='Primary',
arrayWwpn=None,
lun=None):
""" Create a ConnectionBoot dictionary
Args:
priority:
Indicates the boot priority for this device. PXE and Fibre Channel
connections are treated separately; an Ethernet connection and a
Fibre Channel connection can both be marked as Primary. The 'order'
attribute controls ordering among the different device types.
Choices are 'NotBootable', 'Primary', or 'Secondary'
arrayWwpn:
The wwpn of the target device that provides access to the Boot
Volume, 16 HEX digits as a string.
lun:
The LUN of the boot volume presented by the target device. The
value can be either 1 to 3 decimal digits in the range 0 to 255 or
13 to 16 HEX digits as a string.
Returns: dict
"""
if arrayWwpn is None and lun is None:
return {
'priority': priority}
else:
return {
'priority': priority,
'targets': make_BootTarget(arrayWwpn, lun)}
def make_BootTarget(arrayWwpn=None, lun=None):
""" Create a BootTarget dictionary
Args:
arrayWwpn:
The wwpn of the target device that provides access to the Boot
Volume, 16 HEX digits as a string.
lun:
The LUN of the boot volume presented by the target device. The
value can be either 1 to 3 decimal digits in the range 0 to 255 or
13 to 16 HEX digits as a string.
Returns: dict
"""
return [{'arrayWwpn': arrayWwpn,
'lun': lun}]
def make_ServerProfileTemplateV1(name=None,
description=None,
serverProfileDescription=None,
serverHardwareTypeUri=None,
enclosureGroupUri=None,
affinity=None,
hideUnusedFlexNics=None,
profileConnectionV4=None):
"""
Create a ServerProfileTemplateV1 dictionary for use with the V200 API
Args:
name:
Unique name of the Server Profile Template
description:
Description of the Server Profile Template
serverProfileDescription:
The description of the server profiles created from this template.
serverHardwareTypeUri:
Identifies the server hardware type for which the Server Profile
was designed. The serverHardwareTypeUri is determined when the
profile is created.
enclosureGroupUri:
Identifies the enclosure group for which the Server Profile Template
was designed. The enclosureGroupUri is determined when the profile
template is created and cannot be modified.
affinity:
This identifies the behavior of the server profile when the server
hardware is removed or replaced. This can be set to 'Bay' or
'BayAndServer'.
hideUnusedFlexNics:
This setting controls the enumeration of physical functions that do
not correspond to connections in a profile.
profileConnectionV4:
An array of profileConnectionV4
Returns: dict
"""
return {
'type': 'ServerProfileTemplateV1',
'name': name,
'description': description,
'serverProfileDescription': serverProfileDescription,
'serverHardwareTypeUri': serverHardwareTypeUri,
'enclosureGroupUri': enclosureGroupUri,
'affinity': affinity,
'hideUnusedFlexNics': hideUnusedFlexNics,
'connections': profileConnectionV4}
def make_ServerProfileV5(affinity='Bay',
biosSettings=None,
bootSettings=None,
bootModeSetting=None,
profileConnectionV4=None,
description=None,
firmwareSettingsV3=None,
hideUnusedFlexNics=True,
localStorageSettingsV3=None,
macType='Virtual',
name=None,
sanStorageV3=None,
serialNumber=None,
serialNumberType='Physical',
serverHardwareTypeUri=None,
serverHardwareUri=None,
serverProfileTemplateUri=None,
uuid=None,
wwnType='Virtual'):
""" Create a ServerProfileV5 dictionary for use with the V200 API
Args:
affinity:
This identifies the behavior of the server profile when the server
hardware is removed or replaced. This can be set to 'Bay' or
'BayAndServer'.
biosSettings:
Dictionary that describes Server BIOS settings
bootSettings:
Dictionary that indicates that the server will attempt to boot from
this connection. This object can only be specified if
"boot.manageBoot" is set to 'true'
bootModeSetting:
Dictionary that describes the boot mode settings to be confiured on
Gen9 and newer servers.
profileConnectionV4:
Array of ProfileConnectionV3
description:
Description of the Server Profile
firmwareSettingsV3:
FirmwareSettingsV3 disctionary that defines the firmware baseline
and managemnt
hideUnusedFlexNics:
This setting controls the enumeration of physical functions that do
not correspond to connections in a profile.
localStorageSettingsV3:
Disctionary that describes the local storage settings.
macType:
Specifies the type of MAC address to be programmed into the IO
devices. The value can be 'Virtual', 'Physical' or 'UserDefined'.
name:
Unique name of the Server Profile
sanStorageV3:
Dictionary that describes teh san storage settings.
serialNumber:
A 10-byte value that is exposed to the Operating System as the
server hardware's Serial Number. The value can be a virtual serial
number, user defined serial number or physical serial number read
from the server's ROM. It cannot be modified after the profile is
created.
serialNumberType:
Specifies the type of Serial Number and UUID to be programmed into
the server ROM. The value can be 'Virtual', 'UserDefined', or
'Physical'. The serialNumberType defaults to 'Virtual' when
serialNumber or uuid are not specified. It cannot be modified
after the profile is created.
serverHardwareTypeUri:
Identifies the server hardware type for which the Server Profile
was designed. The serverHardwareTypeUri is determined when the
profile is created.
serverHardwareUri:
Identifies the server hardware to which the server profile is
currently assigned, if applicable
serverProfileTemplateUri:
Identifies the Server profile template the Server Profile is based
on.
uuid:
A 36-byte value that is exposed to the Operating System as the
server hardware's UUID. The value can be a virtual uuid, user
defined uuid or physical uuid read from the server's ROM. It
cannot be modified after the profile is created.
wwnType:
Specifies the type of WWN address to be programmed into the IO
devices. The value can be 'Virtual', 'Physical' or 'UserDefined'.
It cannot be modified after the profile is created.
Returns: dict
"""
return {
'affinity': affinity,
'bios': biosSettings,
'boot': bootSettings,
'bootMode': bootModeSetting,
'connections': profileConnectionV4,
'description': description,
'firmware': firmwareSettingsV3,
'hideUnusedFlexNics': hideUnusedFlexNics,
'localStorage': localStorageSettingsV3,
'macType': macType,
'name': name,
'sanStorage': sanStorageV3,
'serialNumber': serialNumber,
'serialNumberType': serialNumberType,
'serverHardwareTypeUri': serverHardwareTypeUri,
'serverHardwareUri': serverHardwareUri,
'serverProfileTemplateUri': serverProfileTemplateUri,
'type': 'ServerProfileV5',
'uuid': uuid,
'wwnType': wwnType
}
def make_FirmwareSettingsV3(firmwareUri,
firmwareInstallType,
manageFirmware=True,
forceInstallFirmware=False):
""" Create a FirmwareSettingsV3 dictionary for use with the V200 API
Args:
firmwareUri:
Identifies the firmware baseline to be applied to the server
hardware.
firmwareInstallType:
FirmwareAndOSDrivers:
Updates the firmware and OS drivers without powering down the
server hardware using HP Smart Update Tools.
FirmwareOnly:
Updates the firmware without powering down the server hardware
using using HP Smart Update Tools.
FirmwareOnlyOfflineMode:
Manages the firmware through HP OneView. Selecting this option
requires the server hardware to be powered down.
manageFirmware:
Indicates that the server firmware is configured using the server
profile
forceInstallFirmware:
Force installation of firmware even if same or newer version is
installed.
Returns: dict
"""
return {'firmwareBaselineUri': firmwareUri,
'manageFirmware': manageFirmware,
'forceInstallFirmware': forceInstallFirmware
}
def make_BiosSettings(manageBios=True, overriddenSettings=[]):
return {'manageBios': manageBios,
'overriddenSettings': overriddenSettings
}
def make_BootSettings(order, manageBoot=False):
""" Create a BootSettings dictionary for use with ServerProfileV5
Args:
manageBoot:
Indicates whether the boot order is configured using the server
profile.
order:
Defines the order in which boot will be attempted on the available
devices as an array of strings: 'CD', 'USB', 'HardDisk', 'PXE'
Returns: dict
"""
return {'manageBoot': manageBoot,
'order': order
}
def make_BootModeSetting(manageMode, mode, pxeBootPolicy):
""" Create a BootModeSetting dictionary (only with Gen9 and newer)
Args:
manageMode:
Boolean value indicates whether the boot mode is configured using
the server profile.
mode:
The environment used for server boot operations. Supported values
are: 'UEFI', 'UEFIOptimized', or 'BIOS'.
pxeBootPolicy:
Defines the filtering or priority of the PXE boot options for each
enabled NIC port. This field is required only when the "mode" is
set to "UEFI" or "UEFIOptimized". Possible values are:
'Auto': No change from current server setting
'IPv4': Only IPv4 entries will be allowed in the boot order.
'IPv6': Only IPv6 entries will be allowed in the boot order.
'IPv4ThenIPv6': both IPv4 and IPv6 entries will be present in
the boot order with IPV4 entries coming first.
'IPv6ThenIPv4': both IPv4 and IPv6 entries will be present in
the boot order with IPv6 entries coming first.
Returns: dict
"""
return {'manageMode': manageMode,
'mode': mode,
'pxeBootPolicy': pxeBootPolicy
}
def make_LocalStorageSettingsV3(controllers):
""" Create a LocalStorageSettingsV3 dictionary
Args:
controllers:
Array of LocalStorageEmbeddedController
Returns: dict
"""
return {'controllers': controllers}
def make_LocalStorageEmbeddedController(importConfiguration, initialize,
LogicalDrives, managed, mode,
slotNumber='0'):
""" Create a LocalStorageEmbeddedController dictionary
Args:
importConfiguration:
Boolean, should the logical drives in the current configuration be
imported.
initialize:
Boolearn, should the controller be initalized before configuration.
LogicalDrives:
Array of LogicalDrivesV3
managed:
Boolean value determines if the controler is managed by OneView
mode:
Determines the mode of operation of the controller. The controller
mode can be RAID or HBA.
slotNumber:
The PCI slot number used by the controller. This value will always
be set to '0;, as only the embedded controller is supported in the
current version.
Returns: dict
"""
def make_LogicalDriveV3(bootable, driveName, driveTechnology,
numPhysicalDrives, raidLevel):
""" Create a LocalDriveV3 dictionary
Args:
bootable:
Indicates if the logical drive is bootable or not.
driveName:
The name of the logical drive.
driveTechnology:
Defines the interface type for drives that will be used to build
the logical drive. Supported values depend on the local storage
capabilities of the selected server hardware type.
numPhysicalDrives:
The number of physical drives to be used to build the logical
drive. The provided values must be consistent with the selected
RAID level and cannot exceed the maximum supported number of
drives for the selected server hardware type.
raidLevel:
The RAID level of the logical drive.
Returns: dict
"""
return {'bootable': bootable,
'driveName': driveName,
'driveTechnology': driveTechnology,
'numPhysicalDrives': numPhysicalDrives,
'raidLevel': raidLevel
}
def make_SanStorageV3(hostOSType, manageSanStorage, volumeAttachments):
""" Create a SanStorageV3 dictionary
Args:
hostOSType:
The operating system type of the host. To retrieve the list of
supported host OS types, issue a REST Get request using the
/rest/storage-systems/host-types API.
manageSanStorage:
Boolean, identifies if SAN is managed in the server profile.
volumeAttachments:
Array of VolumeAttachmentV2
Returns: dict
"""
return {'hostOSType': hostOSType,
'manageSanStorage': manageSanStorage,
'volumeAttachments': [volumeAttachments],
}
def make_VolumeAttachmentV2(lun=None,
lunType='Auto',
permanent=False,
storagePaths=[],
volumeName=None,
volumeProvisionType='Thin',
volumeProvisionedCapacityBytes=None,
volumeShareable=False,
volumeStoragePoolUri=None,
volumeStorageSystemUri=None,
volumeUri=None):
""" Create a VolumeAttachmentV2 dictionary
Args:
lun:
The logical unit number.
lunType:
The logical unit number type: 'Auto' or 'Manual'.
permanent:
If true, indicates that the volume will persist when the profile is
deleted. If false, then the volume will be deleted when the profile
is deleted.
storagePaths:
Array of StoragePathV2
volumeName:
The name of the volume. Required when creating a volume.
volumeProvisionType:
The provisioning type of the new volume: 'Thin' or 'Thick'. This
attribute is required when creating a volume.
volumeProvisionedCapacityBytes:
The requested provisioned capacity of the storage volume in bytes.
This attribute is required when creating a volume.
volumeShareable:
Identifies whether the storage volume is shared or private. If
false, then the volume will be private. If true, then the volume
will be shared. This attribute is required when creating a volume.
volumeStoragePoolUri:
The URI of the storage pool associated with this volume
attachment's volume.
volumeStorageSystemUri:
The URI of the storage system associated with this volume
attachment.
volumeUri:
The URI of the storage volume associated with this volume
attachment.
Returns: dict
"""
if volumeProvisionedCapacityBytes:
volAttach = {'id': None,
'lunType': lunType,
'permanent': permanent,
'volumeName': volumeName,
'volumeUri': None,
'volumeProvisionType': volumeProvisionType,
'volumeProvisionedCapacityBytes': volumeProvisionedCapacityBytes,
'volumeShareable': volumeShareable,
'volumeStoragePoolUri': volumeStoragePoolUri,
'volumeStorageSystemUri': None,
'storagePaths': storagePaths,
}
else:
volAttach = {'id': None,
'lunType': lunType,
'volumeUri': volumeUri,
'volumeStoragePoolUri': volumeStoragePoolUri,
'volumeStorageSystemUri': volumeStorageSystemUri,
'storagePaths': storagePaths,
}
if lunType == 'Manual':
volAttach['lun'] = lun
return volAttach
def make_ephemeral_volume_dict(lun, lunType, volumeUri, volumeStoragePoolUri,
volumeStorageSystemUri, storagePaths,
permanent=True, volumeId=None):
return {'id': volumeId,
'lun': lun,
'lunType': lunType,
'volumeUri': volumeUri,
'volumeStoragePoolUri': volumeStoragePoolUri,
'volumeStorageSystemUri': volumeStorageSystemUri,
'storagePaths': storagePaths,
}
def make_StoragePathV2(connectionId=None, isEnabled=True,
storageTargetType='Auto', storageTargets=[]):
""" Create a StoragePathV2 dictionary
Args:
connectionId:
The ID of the connection associated with this storage path. Use
GET /rest/server-profiles/available-networks to retrieve the list
of available networks
isEnabled:
Identifies if the storage path is enabled.
storageTargetType:
If set to 'Auto', the storage system will automatically identify
the storage targets. In this case, set the storageTargets field to
an empty array. If set to 'TargetPorts', the storage targets can
be manually specified in the storageTargets field using
comma-separated strings.
storageTargets:
Array of WWPNs of the targets on the storage system. If
storageTargetType is set to Auto, the storage system will
automatically select the target ports, in which case the
storageTargets field is not needed and should be set to an empty
array. If storageTargetType is set to TargetPorts, then the the
storageTargets field should be an array of comma-separated strings
representing the WWPNs intended to be used to connect with the
storage system.
Returns: dict
"""
return {'connectionId': connectionId,
'isEnabled': isEnabled,
'storageTargetType': storageTargetType,
'storageTargets': storageTargets
}
def make_powerstate_dict(state, control):
return {'powerState': state,
'powerControl': control}
def make_ls_firmware_dict(action, sppUri, force='true'):
return {'command': action, 'sppUri': sppUri, 'force': force}
#def get_entities(uri):
# return self._get_members(self.get(uri))
def make_eula_dict(supportAccess):
return {'supportAccess': supportAccess}
def make_initial_password_change_dict(userName, oldPassword, newPassword):
return {
'userName': userName,
'oldPassword': oldPassword,
'newPassword': newPassword}
def make_appliance_network_config_dict(hostName,
macAddress,
newApp1Ipv4Addr=None,
newIpv4Subnet=None,
newIpv4Gateway=None,
newSearchDomain1=None,
newSearchDomain2=None,
ipv4Type='DHCP',
ipv6Type='DHCP'):
# Only DHCP enable for now. Need more attributes for static
if ipv4Type == 'DHCP':
return {'applianceNetworks': [{
'confOneNode': True,
'hostname': hostName,
'macAddress': macAddress,
'ipv4Type': ipv4Type,
'ipv6Type': ipv6Type}]
}
if ipv4Type == 'STATIC':
return {
'applianceNetworks': [{
'confOneNode': True,
'hostname': hostName,
'macAddress': macAddress,
'ipv4Type': ipv4Type,
'ipv6Type': ipv6Type,
'app1Ipv4Addr': newApp1Ipv4Addr,
'ipv4Subnet': newIpv4Subnet,
'ipv4Gateway': newIpv4Gateway,
# 'searchDomains': [newSearchDomain1, newSearchDomain2]
'searchDomains': []
}]
}
raise Exception('ipv4Type must be STATIC or DHCP.')
def make_audit_log_dict(dateTimeStamp='',
componentId='',
organizationId='',
userId='',
domain='',
sourceIp='',
result='SUCCESS',
action='DONE',
objectType='',
objectTypeDescriptor='',
severity='INFO',
taskId='',
msg=''):
return {
'componentId': componentId,
'organizationId': organizationId,
'userId': userId,
'domain': domain,
'sourceIp': sourceIp,
'result': result,
'action': action,
'objectType': objectType,
'objectTypeDescriptor': objectTypeDescriptor,
'severity': severity,
'taskId': taskId,
'msg': msg}
def make_event_dict(severity='Unknown',
description='',
eventTypeID='',
eventDetails=None,
healthCategory='None',
urgency='None'):
return {
'severity': severity,
'description': description,
'eventTypeID': eventTypeID,
'eventDetails': eventDetails,
'healthCategory': healthCategory,
'type': 'EventResourceV2',
'urgency': urgency}
def make_event_detail_dict(eventItemName='',
eventItemValue=''):
return {
'eventItemName': eventItemName,
'eventItemValue': eventItemValue}
def make_user_modify_dict(userName,
password=None,
currentPassword=None,
replaceRoles=None,
roles=None,
emailAddress=None,
officePhone=None,
mobilePhone=None,
enabled=None,
fullName=None):
userDict = {'userName': userName}
if password is not None and currentPassword is not None:
userDict['password'] = password
userDict['currentPassword'] = currentPassword
if replaceRoles is not None:
userDict['replaceRoles'] = replaceRoles
if roles is not None:
userDict['roles'] = roles
if emailAddress is not None:
userDict['emailAddress'] = emailAddress
if officePhone is not None:
userDict['officePhone'] = officePhone
if mobilePhone is not None:
userDict['mobilePhone'] = mobilePhone
if enabled is not None:
userDict['enabled'] = enabled
if fullName is not None:
userDict['fullName'] = fullName
return userDict
def make_update_alert_dict(alertState=None,
assignedToUser=None,
eTag=None):
alertDict = {}
if alertState is not None:
alertDict['alertState'] = alertState
if assignedToUser is not None:
alertDict['assignedToUser'] = assignedToUser
if eTag is not None:
alertDict['eTag'] = eTag
return alertDict
def make_server_dict(hostname,
username,
password,
force=False,
licensingIntent='OneView',
configurationState='Managed'):
return {
'hostname': hostname,
'username': username,
'password': password,
'force': force,
'licensingIntent': licensingIntent,
'configurationState': configurationState}
def make_rack_dict(name, sn, thermal, height, depth, width, uheight):
return {
'name': name,
'serialNumber': sn,
'thermalLimit': thermal,
'height': height,
'depth': depth,
'width': width,
'uHeight': uheight}
def make_datacenter_dict(name, coolingCapacity, coolingMultiplier, currency,
costPerKilowattHour, defaultPowerLineVoltage,
width, depth, deratingType, deratingPercentage):
return {
'name': name,
'coolingCapacity': coolingCapacity,
'coolingMultiplier': coolingMultiplier,
'currency': currency,
'costPerKilowattHour': costPerKilowattHour,
'defaultPowerLineVoltage': defaultPowerLineVoltage,
'depth': depth,
'width': width,
'deratingType': deratingType,
'deratingPercentage': deratingPercentage,
'contents': []}
def make_powerdevice_dict(name, deviceType, feedIdentifier, lineVoltage,
model, partNumber, phaseType, ratedCapacity,
serialNumber):
return {
'name': name,
'deviceType': deviceType,
'feedIdentifier': feedIdentifier,
'lineVoltage': lineVoltage,
'model': model,
'partNumber': partNumber,
'phaseType': phaseType,
'ratedCapacity': ratedCapacity,
'serialNumber': serialNumber}
def make_alertMap_dict(notes, etag, state='Active', user='None',
urgency='None'):
return {
'alertState': state,
'assignedToUser': user,
'alertUrgency': urgency,
'notes': notes,
'eTag': etag
}
class pages(object):
def __init__(self, page, connection):
self._con = connection
self.currentPage = page
def __iter__(self):
return self
def __next__(self):
if self._con._nextPage is not None:
self.currentPage = self._con.getNextPage()
return self.currentPage
else:
raise StopIteration
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.reduce_<AGGREGATE> ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import ragged
from tensorflow.python.platform import googletest
_MAX_INT32 = dtypes.int32.max
_MIN_INT32 = dtypes.int32.min
_NAN = np.nan
def mean(*values):
return 1.0 * sum(values) / len(values)
class RaggedReduceOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# Docstring examples. RaggedTensor for testing is:
# [[3, 1, 4],
# [1, 5, ],
# [9, ],
# [2, 6 ]]
#=========================================================================
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4]
),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=-2,
expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4]
),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6]
),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=-1,
expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6]
),
dict(
ragged_reduce_op=ragged.reduce_prod,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[54, 30, 4] # = [3*1*9*2, 1*5*6, 4]
),
dict(
ragged_reduce_op=ragged.reduce_prod,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[12, 5, 9, 12] # = [3*1*4, 1*5, 9, 2*6]
),
dict(
ragged_reduce_op=ragged.reduce_min,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[1, 1, 4] # = [min(3, 1, 9, 2), min(1, 5, 6), 4]
),
dict(
ragged_reduce_op=ragged.reduce_min,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[1, 1, 9, 2] # = [min(3, 1, 4), min(1, 5), 9, min(2, 6)]
),
dict(
ragged_reduce_op=ragged.reduce_max,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[9, 6, 4] # = [max(3, 1, 9, 2), max(1, 5, 6), 4]
),
dict(
ragged_reduce_op=ragged.reduce_max,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[4, 5, 9, 6] # = [max(3, 1, 4), max(1, 5), 9, max(2, 6)]
),
dict(
ragged_reduce_op=ragged.reduce_mean,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[3.75, 4, 4] # = [mean(3, 1, 9, 2), mean(1, 5, 6), 4]
),
dict(
ragged_reduce_op=ragged.reduce_any,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=0,
expected=[True, True, False, True]),
dict(
ragged_reduce_op=ragged.reduce_any,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=1,
expected=[True, True, True]),
dict(
ragged_reduce_op=ragged.reduce_all,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=0,
expected=[False, True, False, True]),
dict(
ragged_reduce_op=ragged.reduce_all,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=1,
expected=[True, False, False]),
#=========================================================================
# Examples with the following RaggedTensor (ragged_rank=1):
# [[0, 1, 2, 3],
# [4 ],
# [ ],
# [5, 6 ],
# [7 ],
# [8, 9 ]]
#=========================================================================
# axis=None
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9),
dict(
ragged_reduce_op=ragged.reduce_prod,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=0 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9),
dict(
ragged_reduce_op=ragged.reduce_min,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=min(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
dict(
ragged_reduce_op=ragged.reduce_max,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=max(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
dict(
ragged_reduce_op=ragged.reduce_mean,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=mean(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
# axis=0
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[0 + 4 + 5 + 7 + 8, 1 + 6 + 9, 2, 3]),
dict(
ragged_reduce_op=ragged.reduce_prod,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[0 * 4 * 5 * 7 * 8, 1 * 6 * 9, 2, 3]),
dict(
ragged_reduce_op=ragged.reduce_min,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[min(0, 4, 5, 7, 8), min(1, 6, 9), 2, 3]),
dict(
ragged_reduce_op=ragged.reduce_max,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[max(0, 4, 5, 7, 8), max(1, 6, 9), 2, 3]),
dict(
ragged_reduce_op=ragged.reduce_mean,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[mean(0, 4, 5, 7, 8),
mean(1, 6, 9), 2, 3]),
# axis=1
# Note: we don't test mean here because it gives a NaN, and this will
# cause assertEqual to fail (since NaN != NaN). See testMeanNan().
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]),
dict(
ragged_reduce_op=ragged.reduce_prod,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[0 * 1 * 2 * 3, 4, 1, 5 * 6, 7, 8 * 9]),
dict(
ragged_reduce_op=ragged.reduce_min,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[min(0, 1, 2, 3), 4, _MAX_INT32,
min(5, 6), 7,
min(8, 9)]),
dict(
ragged_reduce_op=ragged.reduce_max,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[max(0, 1, 2, 3), 4, _MIN_INT32,
max(5, 6), 7,
max(8, 9)]),
#=========================================================================
# Examples with ragged_rank=2:
# [[[1, 2], [ ], [3, 4, 5]],
# [[6, 7], [ ], [8 ]],
# [ ],
# [[9 ] ]]
#=========================================================================
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[],
expected=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]]),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=None,
expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=0,
expected=[[1 + 6 + 9, 2 + 7], [], [3 + 8, 4, 5]]),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=1,
expected=[[1 + 3, 2 + 4, 5], [6 + 8, 7], [], [9]]),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=2,
expected=[[1 + 2, 0, 3 + 4 + 5], [6 + 7, 0, 8], [], [9]]),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[0, 1],
expected=[1 + 3 + 6 + 8 + 9, 2 + 4 + 7, 5]),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[0, 2],
expected=[1 + 6 + 9 + 2 + 7, 0, 3 + 8 + 4 + 5]),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[1, 2],
expected=[1 + 2 + 3 + 4 + 5, 6 + 7 + 8, 0, 9]),
dict(
ragged_reduce_op=ragged.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[0, 1, 2],
expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])),
#=========================================================================
# Examples for ragged_reduce_mean ragged_rank=2:
# [[[1, 2], [3, 4, 5]],
# [[6, 7], [8 ]],
# [[9 ] ]]
#=========================================================================
dict(
ragged_reduce_op=ragged.reduce_mean,
rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]],
axis=0,
expected=[[mean(1, 6, 9), mean(2, 7)], [mean(3, 8), 4, 5]]),
dict(
ragged_reduce_op=ragged.reduce_mean,
rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]],
axis=1,
expected=[[mean(1, 3), mean(2, 4), 5], [mean(6, 8), 7], [9]]),
dict(
ragged_reduce_op=ragged.reduce_mean,
rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]],
axis=2,
expected=[[mean(1, 2), mean(3, 4, 5)], [mean(6, 7), 8], [9]]),
)
def testReduce(self, ragged_reduce_op, rt_input, axis, expected):
rt_input = ragged.constant(rt_input)
reduced = ragged_reduce_op(rt_input, axis)
with self.test_session():
self.assertEqual(reduced.eval().tolist(), expected)
def assertEqualWithNan(self, actual, expected):
"""Like assertEqual, but NaN==NaN."""
self.assertTrue(
((actual == expected) | (np.isnan(actual) & np.isnan(expected))).all())
def testMeanNan(self):
rt_as_list = [[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]]
expected = (
np.array([0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]) / np.array(
[4, 1, 0, 2, 1, 2]))
rt_input = ragged.constant(rt_as_list)
reduced = ragged.reduce_mean(rt_input, axis=1)
with self.test_session():
self.assertEqualWithNan(reduced.eval(), expected)
def testMeanWithTensorInputs(self):
tensor = [[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]]
expected = [2.0, 20.0]
reduced = ragged.reduce_mean(tensor, axis=1)
with self.test_session():
self.assertAllEqual(reduced.eval(), expected)
def testErrors(self):
rt_input = ragged.constant([[1, 2, 3], [4, 5]])
axis = array_ops.placeholder_with_default(constant_op.constant([0]), None)
self.assertRaisesRegexp(ValueError,
r'axis must be known at graph construction time.',
ragged.reduce_sum, rt_input, axis)
self.assertRaisesRegexp(TypeError,
r'axis must be an int; got str.*',
ragged.reduce_sum, rt_input, ['x'])
if __name__ == '__main__':
googletest.main()
|
|
"""Support for the DirecTV receivers."""
import logging
import requests
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL, MEDIA_TYPE_MOVIE, MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON)
from homeassistant.const import (
CONF_DEVICE, CONF_HOST, CONF_NAME, CONF_PORT, STATE_OFF, STATE_PAUSED,
STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_MEDIA_CURRENTLY_RECORDING = 'media_currently_recording'
ATTR_MEDIA_RATING = 'media_rating'
ATTR_MEDIA_RECORDED = 'media_recorded'
ATTR_MEDIA_START_TIME = 'media_start_time'
DEFAULT_DEVICE = '0'
DEFAULT_NAME = "DirecTV Receiver"
DEFAULT_PORT = 8080
SUPPORT_DTV = SUPPORT_PAUSE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_NEXT_TRACK | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_PLAY
SUPPORT_DTV_CLIENT = SUPPORT_PAUSE | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_NEXT_TRACK | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_PLAY
DATA_DIRECTV = 'data_directv'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the DirecTV platform."""
known_devices = hass.data.get(DATA_DIRECTV, set())
hosts = []
if CONF_HOST in config:
_LOGGER.debug("Adding configured device %s with client address %s ",
config.get(CONF_NAME), config.get(CONF_DEVICE))
hosts.append([
config.get(CONF_NAME), config.get(CONF_HOST),
config.get(CONF_PORT), config.get(CONF_DEVICE)
])
elif discovery_info:
host = discovery_info.get('host')
name = 'DirecTV_{}'.format(discovery_info.get('serial', ''))
# Attempt to discover additional RVU units
_LOGGER.debug("Doing discovery of DirecTV devices on %s", host)
from DirectPy import DIRECTV
dtv = DIRECTV(host, DEFAULT_PORT)
try:
resp = dtv.get_locations()
except requests.exceptions.RequestException as ex:
# Bail out and just go forward with uPnP data
# Make sure that this device is not already configured
# Comparing based on host (IP) and clientAddr.
_LOGGER.debug("Request exception %s trying to get locations", ex)
resp = {
'locations': [{
'locationName': name,
'clientAddr': DEFAULT_DEVICE
}]
}
_LOGGER.debug("Known devices: %s", known_devices)
for loc in resp.get("locations") or []:
if "locationName" not in loc or "clientAddr" not in loc:
continue
# Make sure that this device is not already configured
# Comparing based on host (IP) and clientAddr.
if (host, loc["clientAddr"]) in known_devices:
_LOGGER.debug("Discovered device %s on host %s with "
"client address %s is already "
"configured",
str.title(loc["locationName"]),
host, loc["clientAddr"])
else:
_LOGGER.debug("Adding discovered device %s with"
" client address %s",
str.title(loc["locationName"]),
loc["clientAddr"])
hosts.append([str.title(loc["locationName"]), host,
DEFAULT_PORT, loc["clientAddr"]])
dtvs = []
for host in hosts:
dtvs.append(DirecTvDevice(*host))
hass.data.setdefault(DATA_DIRECTV, set()).add((host[1], host[3]))
add_entities(dtvs)
class DirecTvDevice(MediaPlayerDevice):
"""Representation of a DirecTV receiver on the network."""
def __init__(self, name, host, port, device):
"""Initialize the device."""
from DirectPy import DIRECTV
self.dtv = DIRECTV(host, port, device)
self._name = name
self._is_standby = True
self._current = None
self._last_update = None
self._paused = None
self._last_position = None
self._is_recorded = None
self._is_client = device != '0'
self._assumed_state = None
self._available = False
self._first_error_timestamp = None
if self._is_client:
_LOGGER.debug("Created DirecTV client %s for device %s",
self._name, device)
else:
_LOGGER.debug("Created DirecTV device for %s", self._name)
def update(self):
"""Retrieve latest state."""
_LOGGER.debug("%s: Updating status", self.entity_id)
try:
self._available = True
self._is_standby = self.dtv.get_standby()
if self._is_standby:
self._current = None
self._is_recorded = None
self._paused = None
self._assumed_state = False
self._last_position = None
self._last_update = None
else:
self._current = self.dtv.get_tuned()
if self._current['status']['code'] == 200:
self._first_error_timestamp = None
self._is_recorded = self._current.get('uniqueId')\
is not None
self._paused = self._last_position == \
self._current['offset']
self._assumed_state = self._is_recorded
self._last_position = self._current['offset']
self._last_update = dt_util.utcnow() if not self._paused \
or self._last_update is None else self._last_update
else:
# If an error is received then only set to unavailable if
# this started at least 1 minute ago.
log_message = "{}: Invalid status {} received".format(
self.entity_id,
self._current['status']['code']
)
if self._check_state_available():
_LOGGER.debug(log_message)
else:
_LOGGER.error(log_message)
except requests.RequestException as ex:
_LOGGER.error("%s: Request error trying to update current status: "
"%s", self.entity_id, ex)
self._check_state_available()
except Exception as ex:
_LOGGER.error("%s: Exception trying to update current status: %s",
self.entity_id, ex)
self._available = False
if not self._first_error_timestamp:
self._first_error_timestamp = dt_util.utcnow()
raise
def _check_state_available(self):
"""Set to unavailable if issue been occurring over 1 minute."""
if not self._first_error_timestamp:
self._first_error_timestamp = dt_util.utcnow()
else:
tdelta = dt_util.utcnow() - self._first_error_timestamp
if tdelta.total_seconds() >= 60:
self._available = False
return self._available
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attributes = {}
if not self._is_standby:
attributes[ATTR_MEDIA_CURRENTLY_RECORDING] =\
self.media_currently_recording
attributes[ATTR_MEDIA_RATING] = self.media_rating
attributes[ATTR_MEDIA_RECORDED] = self.media_recorded
attributes[ATTR_MEDIA_START_TIME] = self.media_start_time
return attributes
@property
def name(self):
"""Return the name of the device."""
return self._name
# MediaPlayerDevice properties and methods
@property
def state(self):
"""Return the state of the device."""
if self._is_standby:
return STATE_OFF
# For recorded media we can determine if it is paused or not.
# For live media we're unable to determine and will always return
# playing instead.
if self._paused:
return STATE_PAUSED
return STATE_PLAYING
@property
def available(self):
"""Return if able to retrieve information from DVR or not."""
return self._available
@property
def assumed_state(self):
"""Return if we assume the state or not."""
return self._assumed_state
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
if self._is_standby:
return None
return self._current['programId']
@property
def media_content_type(self):
"""Return the content type of current playing media."""
if self._is_standby:
return None
if 'episodeTitle' in self._current:
return MEDIA_TYPE_TVSHOW
return MEDIA_TYPE_MOVIE
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
if self._is_standby:
return None
return self._current['duration']
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._is_standby:
return None
return self._last_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
if self._is_standby:
return None
return self._last_update
@property
def media_title(self):
"""Return the title of current playing media."""
if self._is_standby:
return None
return self._current['title']
@property
def media_series_title(self):
"""Return the title of current episode of TV show."""
if self._is_standby:
return None
return self._current.get('episodeTitle')
@property
def media_channel(self):
"""Return the channel current playing media."""
if self._is_standby:
return None
return "{} ({})".format(
self._current['callsign'], self._current['major'])
@property
def source(self):
"""Name of the current input source."""
if self._is_standby:
return None
return self._current['major']
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_DTV_CLIENT if self._is_client else SUPPORT_DTV
@property
def media_currently_recording(self):
"""If the media is currently being recorded or not."""
if self._is_standby:
return None
return self._current['isRecording']
@property
def media_rating(self):
"""TV Rating of the current playing media."""
if self._is_standby:
return None
return self._current['rating']
@property
def media_recorded(self):
"""If the media was recorded or live."""
if self._is_standby:
return None
return self._is_recorded
@property
def media_start_time(self):
"""Start time the program aired."""
if self._is_standby:
return None
return dt_util.as_local(
dt_util.utc_from_timestamp(self._current['startTime']))
def turn_on(self):
"""Turn on the receiver."""
if self._is_client:
raise NotImplementedError()
_LOGGER.debug("Turn on %s", self._name)
self.dtv.key_press('poweron')
def turn_off(self):
"""Turn off the receiver."""
if self._is_client:
raise NotImplementedError()
_LOGGER.debug("Turn off %s", self._name)
self.dtv.key_press('poweroff')
def media_play(self):
"""Send play command."""
_LOGGER.debug("Play on %s", self._name)
self.dtv.key_press('play')
def media_pause(self):
"""Send pause command."""
_LOGGER.debug("Pause on %s", self._name)
self.dtv.key_press('pause')
def media_stop(self):
"""Send stop command."""
_LOGGER.debug("Stop on %s", self._name)
self.dtv.key_press('stop')
def media_previous_track(self):
"""Send rewind command."""
_LOGGER.debug("Rewind on %s", self._name)
self.dtv.key_press('rew')
def media_next_track(self):
"""Send fast forward command."""
_LOGGER.debug("Fast forward on %s", self._name)
self.dtv.key_press('ffwd')
def play_media(self, media_type, media_id, **kwargs):
"""Select input source."""
if media_type != MEDIA_TYPE_CHANNEL:
_LOGGER.error("Invalid media type %s. Only %s is supported",
media_type, MEDIA_TYPE_CHANNEL)
return
_LOGGER.debug("Changing channel on %s to %s", self._name, media_id)
self.dtv.tune_channel(media_id)
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'ClearIsisProcessRpc.Input.Instance' : {
'meta_info' : _MetaInfoClass('ClearIsisProcessRpc.Input.Instance',
False,
[
_MetaInfoClassMember('instance-identifier', ATTRIBUTE, 'str' , None, None,
[], [],
''' IS-IS process instance identifier
''',
'instance_identifier',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'instance',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisProcessRpc.Input' : {
'meta_info' : _MetaInfoClass('ClearIsisProcessRpc.Input',
False,
[
_MetaInfoClassMember('instance', REFERENCE_CLASS, 'Instance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisProcessRpc.Input.Instance',
[], [],
''' Clear data from single IS-IS instance
''',
'instance',
'Cisco-IOS-XR-isis-act', False),
_MetaInfoClassMember('process', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Clear all IS-IS data structures
''',
'process',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'input',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisProcessRpc' : {
'meta_info' : _MetaInfoClass('ClearIsisProcessRpc',
False,
[
_MetaInfoClassMember('input', REFERENCE_CLASS, 'Input' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisProcessRpc.Input',
[], [],
''' ''',
'input',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'clear-isis-process',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisRouteRpc.Input.Instance' : {
'meta_info' : _MetaInfoClass('ClearIsisRouteRpc.Input.Instance',
False,
[
_MetaInfoClassMember('instance-identifier', ATTRIBUTE, 'str' , None, None,
[], [],
''' IS-IS process instance identifier
''',
'instance_identifier',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'instance',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisRouteRpc.Input' : {
'meta_info' : _MetaInfoClass('ClearIsisRouteRpc.Input',
False,
[
_MetaInfoClassMember('instance', REFERENCE_CLASS, 'Instance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisRouteRpc.Input.Instance',
[], [],
''' Clear data from single IS-IS instance
''',
'instance',
'Cisco-IOS-XR-isis-act', False),
_MetaInfoClassMember('route', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Clear IS-IS routes
''',
'route',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'input',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisRouteRpc' : {
'meta_info' : _MetaInfoClass('ClearIsisRouteRpc',
False,
[
_MetaInfoClassMember('input', REFERENCE_CLASS, 'Input' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisRouteRpc.Input',
[], [],
''' ''',
'input',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'clear-isis-route',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisStatRpc.Input.Instance' : {
'meta_info' : _MetaInfoClass('ClearIsisStatRpc.Input.Instance',
False,
[
_MetaInfoClassMember('instance-identifier', ATTRIBUTE, 'str' , None, None,
[], [],
''' IS-IS process instance identifier
''',
'instance_identifier',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'instance',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisStatRpc.Input.Statistics' : {
'meta_info' : _MetaInfoClass('ClearIsisStatRpc.Input.Statistics',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'statistics',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisStatRpc.Input' : {
'meta_info' : _MetaInfoClass('ClearIsisStatRpc.Input',
False,
[
_MetaInfoClassMember('instance', REFERENCE_CLASS, 'Instance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisStatRpc.Input.Instance',
[], [],
''' Clear data from single IS-IS instance
''',
'instance',
'Cisco-IOS-XR-isis-act', False),
_MetaInfoClassMember('statistics', REFERENCE_CLASS, 'Statistics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisStatRpc.Input.Statistics',
[], [],
''' Clear IS-IS protocol statistics
''',
'statistics',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'input',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisStatRpc' : {
'meta_info' : _MetaInfoClass('ClearIsisStatRpc',
False,
[
_MetaInfoClassMember('input', REFERENCE_CLASS, 'Input' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisStatRpc.Input',
[], [],
''' ''',
'input',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'clear-isis-stat',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisDistRpc.Input.Instance' : {
'meta_info' : _MetaInfoClass('ClearIsisDistRpc.Input.Instance',
False,
[
_MetaInfoClassMember('instance-identifier', ATTRIBUTE, 'str' , None, None,
[], [],
''' IS-IS process instance identifier
''',
'instance_identifier',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'instance',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisDistRpc.Input' : {
'meta_info' : _MetaInfoClass('ClearIsisDistRpc.Input',
False,
[
_MetaInfoClassMember('instance', REFERENCE_CLASS, 'Instance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisDistRpc.Input.Instance',
[], [],
''' Reset BGP-LS topology from single IS-IS instance
''',
'instance',
'Cisco-IOS-XR-isis-act', False),
_MetaInfoClassMember('distribution', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Reset BGP-LS topology distribution
''',
'distribution',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'input',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisDistRpc' : {
'meta_info' : _MetaInfoClass('ClearIsisDistRpc',
False,
[
_MetaInfoClassMember('input', REFERENCE_CLASS, 'Input' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisDistRpc.Input',
[], [],
''' ''',
'input',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'clear-isis-dist',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisRpc.Input.Instance' : {
'meta_info' : _MetaInfoClass('ClearIsisRpc.Input.Instance',
False,
[
_MetaInfoClassMember('instance-identifier', ATTRIBUTE, 'str' , None, None,
[], [],
''' IS-IS process instance identifier
''',
'instance_identifier',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'instance',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisRpc.Input.RtTypeEnum' : _MetaInfoEnum('RtTypeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act',
{
'AFI-ALL-MULTICAST':'AFI_ALL_MULTICAST',
'AFI-ALL-SAFI-ALL':'AFI_ALL_SAFI_ALL',
'AFI-ALL-UNICAST':'AFI_ALL_UNICAST',
'IPv4-MULTICAST':'IPv4_MULTICAST',
'IPv4-SAFI-ALL':'IPv4_SAFI_ALL',
'IPv4-UNICAST':'IPv4_UNICAST',
'IPv6-MULTICAST':'IPv6_MULTICAST',
'IPv6-SAFI-ALL':'IPv6_SAFI_ALL',
'IPv6-UNICAST':'IPv6_UNICAST',
}, 'Cisco-IOS-XR-isis-act', _yang_ns._namespaces['Cisco-IOS-XR-isis-act']),
'ClearIsisRpc.Input' : {
'meta_info' : _MetaInfoClass('ClearIsisRpc.Input',
False,
[
_MetaInfoClassMember('instance', REFERENCE_CLASS, 'Instance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisRpc.Input.Instance',
[], [],
''' Clear data from single IS-IS instance
''',
'instance',
'Cisco-IOS-XR-isis-act', False),
_MetaInfoClassMember('rt-type', REFERENCE_ENUM_CLASS, 'RtTypeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisRpc.Input.RtTypeEnum',
[], [],
''' Clear data for these route types
''',
'rt_type',
'Cisco-IOS-XR-isis-act', False),
_MetaInfoClassMember('route', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Clear IS-IS routes
''',
'route',
'Cisco-IOS-XR-isis-act', False),
_MetaInfoClassMember('topology', ATTRIBUTE, 'str' , None, None,
[], [],
''' Topology table information
''',
'topology',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'input',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
'ClearIsisRpc' : {
'meta_info' : _MetaInfoClass('ClearIsisRpc',
False,
[
_MetaInfoClassMember('input', REFERENCE_CLASS, 'Input' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act', 'ClearIsisRpc.Input',
[], [],
''' ''',
'input',
'Cisco-IOS-XR-isis-act', False),
],
'Cisco-IOS-XR-isis-act',
'clear-isis',
_yang_ns._namespaces['Cisco-IOS-XR-isis-act'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_isis_act'
),
},
}
_meta_table['ClearIsisProcessRpc.Input.Instance']['meta_info'].parent =_meta_table['ClearIsisProcessRpc.Input']['meta_info']
_meta_table['ClearIsisProcessRpc.Input']['meta_info'].parent =_meta_table['ClearIsisProcessRpc']['meta_info']
_meta_table['ClearIsisRouteRpc.Input.Instance']['meta_info'].parent =_meta_table['ClearIsisRouteRpc.Input']['meta_info']
_meta_table['ClearIsisRouteRpc.Input']['meta_info'].parent =_meta_table['ClearIsisRouteRpc']['meta_info']
_meta_table['ClearIsisStatRpc.Input.Instance']['meta_info'].parent =_meta_table['ClearIsisStatRpc.Input']['meta_info']
_meta_table['ClearIsisStatRpc.Input.Statistics']['meta_info'].parent =_meta_table['ClearIsisStatRpc.Input']['meta_info']
_meta_table['ClearIsisStatRpc.Input']['meta_info'].parent =_meta_table['ClearIsisStatRpc']['meta_info']
_meta_table['ClearIsisDistRpc.Input.Instance']['meta_info'].parent =_meta_table['ClearIsisDistRpc.Input']['meta_info']
_meta_table['ClearIsisDistRpc.Input']['meta_info'].parent =_meta_table['ClearIsisDistRpc']['meta_info']
_meta_table['ClearIsisRpc.Input.Instance']['meta_info'].parent =_meta_table['ClearIsisRpc.Input']['meta_info']
_meta_table['ClearIsisRpc.Input']['meta_info'].parent =_meta_table['ClearIsisRpc']['meta_info']
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "pyannote-db-prism-"
cfg.versionfile_source = "Prism/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import asset
from google.ads.googleads.v9.services.types import asset_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import AssetServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AssetServiceGrpcTransport
class AssetServiceClientMeta(type):
"""Metaclass for the AssetService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AssetServiceTransport]]
_transport_registry["grpc"] = AssetServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AssetServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AssetServiceClient(metaclass=AssetServiceClientMeta):
"""Service to manage assets. Asset types can be created with
AssetService are YoutubeVideoAsset, MediaBundleAsset and
ImageAsset. TextAsset should be created with Ad inline.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AssetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AssetServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AssetServiceTransport:
"""Return the transport used by the client instance.
Returns:
AssetServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def asset_path(customer_id: str, asset_id: str,) -> str:
"""Return a fully-qualified asset string."""
return "customers/{customer_id}/assets/{asset_id}".format(
customer_id=customer_id, asset_id=asset_id,
)
@staticmethod
def parse_asset_path(path: str) -> Dict[str, str]:
"""Parse a asset path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/assets/(?P<asset_id>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def conversion_action_path(
customer_id: str, conversion_action_id: str,
) -> str:
"""Return a fully-qualified conversion_action string."""
return "customers/{customer_id}/conversionActions/{conversion_action_id}".format(
customer_id=customer_id, conversion_action_id=conversion_action_id,
)
@staticmethod
def parse_conversion_action_path(path: str) -> Dict[str, str]:
"""Parse a conversion_action path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/conversionActions/(?P<conversion_action_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AssetServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the asset service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AssetServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AssetServiceTransport):
# transport is a AssetServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AssetServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_asset(
self,
request: Union[asset_service.GetAssetRequest, dict] = None,
*,
resource_name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> asset.Asset:
r"""Returns the requested asset in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.GetAssetRequest, dict]):
The request object. Request message for
[AssetService.GetAsset][google.ads.googleads.v9.services.AssetService.GetAsset]
resource_name (:class:`str`):
Required. The resource name of the
asset to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.resources.types.Asset:
Asset is a part of an ad which can be
shared across multiple ads. It can be an
image (ImageAsset), a video
(YoutubeVideoAsset), etc. Assets are
immutable and cannot be removed. To stop
an asset from serving, remove the asset
from the entity that is using it.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a asset_service.GetAssetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, asset_service.GetAssetRequest):
request = asset_service.GetAssetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_asset]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_assets(
self,
request: Union[asset_service.MutateAssetsRequest, dict] = None,
*,
customer_id: str = None,
operations: Sequence[asset_service.AssetOperation] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> asset_service.MutateAssetsResponse:
r"""Creates assets. Operation statuses are returned.
List of thrown errors: `AssetError <>`__
`AuthenticationError <>`__ `AuthorizationError <>`__
`CollectionSizeError <>`__ `CurrencyCodeError <>`__
`DatabaseError <>`__ `DateError <>`__ `DistinctError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MediaUploadError <>`__ `MutateError <>`__
`NotAllowlistedError <>`__ `NotEmptyError <>`__
`OperatorError <>`__ `QuotaError <>`__ `RangeError <>`__
`RequestError <>`__ `SizeLimitError <>`__
`StringFormatError <>`__ `StringLengthError <>`__
`UrlFieldError <>`__ `YoutubeVideoRegistrationError <>`__
Args:
request (Union[google.ads.googleads.v9.services.types.MutateAssetsRequest, dict]):
The request object. Request message for
[AssetService.MutateAssets][google.ads.googleads.v9.services.AssetService.MutateAssets]
customer_id (:class:`str`):
Required. The ID of the customer
whose assets are being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v9.services.types.AssetOperation]`):
Required. The list of operations to
perform on individual assets.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v9.services.types.MutateAssetsResponse:
Response message for an asset mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a asset_service.MutateAssetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, asset_service.MutateAssetsRequest):
request = asset_service.MutateAssetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_assets]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AssetServiceClient",)
|
|
import re
from rpython.rlib.objectmodel import compute_hash, specialize, always_inline
from rpython.rlib import jit, rgc
import space, weakref
class Object:
_immutable_fields_ = ['interface', 'custom_interface', 'flag', 'number', 'value', 'contents', 'data', 'string', 'iterator', 'arity', 'methods', 'default', 'cells']
__slots__ = []
__attrs__ = []
# The metaclass here takes care every object will get an interface.
# So programmer doesn't need to do that.
class __metaclass__(type):
def __init__(cls, name, bases, dict):
if name not in ('Object', 'Interface', 'Null') and 'interface' not in dict:
cls.interface = Interface(
parent = cls.__bases__[0].interface,
name = re.sub("(.)([A-Z]+)", r"\1_\2", name).lower().decode('utf-8'),
methods = {})
if re.match("^L[A-Z]", name):
cls.interface.name = name[1:].decode('utf-8')
if name not in ('BoundMethod', 'Builtin'):
expose_internal_methods(cls.interface, dict)
def call(self, argv):
raise space.unwind(space.LTypeError(u"cannot call " + self.repr()))
def getitem(self, index):
raise space.unwind(space.LKeyError(self, index))
def setitem(self, index, value):
raise space.unwind(space.LKeyError(self, index))
def iter(self):
raise space.unwind(space.LTypeError(u"cannot iterate " + self.repr()))
def listattr(self):
listing = []
for name in self.__class__.interface.methods.keys():
listing.append(space.String(name))
return listing
def getattr(self, index):
method = self.__class__.interface.lookup_method(index)
if method is not None:
return BoundMethod(self, index, method)
else:
raise space.unwind(space.LAttributeError(self, index))
def setattr(self, index, value):
raise space.unwind(space.LAttributeError(self, index))
def callattr(self, name, argv):
return self.getattr(name).call(argv)
def getattr_or(self, index, default):
try:
return self.getattr(index)
except space.Unwinder as w:
if isinstance(w.exception, space.LAttributeError):
return default
raise
def contains(self, obj):
raise space.unwind(space.LTypeError(u"%s cannot contain" % self.repr()))
def repr(self):
return u"<%s>" % space.get_interface(self).name
def hash(self):
return compute_hash(self)
def eq(self, other):
return self is other
@classmethod
def instantiator(cls, fn):
def _instantiate_b_(interface, argv):
return fn(argv)
cls.interface.instantiate = _instantiate_b_
register_instantiator(cls.interface, fn)
return fn
@classmethod
def instantiator2(cls, decorator):
def _decorator_(fn):
fn = decorator(fn)
def _instantiate_wrapper_(interface, argv):
return fn(argv)
cls.interface.instantiate = _instantiate_wrapper_
register_instantiator(cls.interface, fn)
return fn
return _decorator_
@classmethod
def builtin_method(cls, fn):
from builtin import Builtin
builtin = Builtin(fn)
cls.interface.methods[builtin.name] = builtin
@classmethod
def method(cls, name, decorator):
def _decarotar_(fn):
from builtin import Builtin
builtin = Builtin(decorator(fn), name)
cls.interface.methods[builtin.name] = builtin
return fn
return _decarotar_
class Interface(Object):
_immutable_fields_ = ['instantiate?', 'methods', 'parent']
# Should add possibility to freeze the interface?
def __init__(self, parent, name, methods, instantiate=None):
assert isinstance(name, unicode)
self.parent = parent
self.name = name
self.instantiate = instantiate
self.methods = methods
self.doc = None
self.multimethods = {} # Describes which multimethods are defined for
# this interface. The record is described in the
# runtime/space/multimethod.py
self.multimethod_index = {}
self.weakref = WeakInterface(self)
def call(self, argv):
if self.instantiate is None:
if self.name == u'null':
raise space.unwind(space.LTypeError(u"cannot call null"))
raise space.unwind(space.LTypeError(u"cannot instantiate " + self.name))
return self.instantiate(self, argv)
def repr(self):
return self.name
def getattr(self, name):
if name == u"doc":
return null if self.doc is None else self.doc
method = self.lookup_method(name)
if method is not None:
return method
method = self.__class__.interface.lookup_method(name)
if method is not None:
return BoundMethod(self, name, method)
return Object.getattr(self, name)
@jit.elidable
def lookup_method(self, name):
this = self
method = this.methods.get(name, None)
while method is None and (this.parent not in (null, Interface.interface)):
this = this.parent
method = this.methods.get(name, None)
return method
def setattr(self, name, value):
if name == u"doc":
self.doc = value
return null
else:
return Object.setattr(self, name, value)
def listattr(self):
listing = []
listing.append(space.String(u"doc"))
for methodname in self.methods.keys():
listing.append(space.String(methodname))
return listing
class WeakInterface(object):
def __init__(self, interface):
self.weakref = weakref.ref(interface)
Interface.interface = Interface(None, u"interface", {})
Interface.interface.parent = Interface.interface
# TODO: explain myself, why parent of an interface is an interface?
# ... I forgot.. that happens.
# This approach ensures that we have Null.__class__.interface that points to Null.
# It allows the null to behave like an interface, except that null is its own interface.
class Null(Interface):
pass # The class was a late addition.. Apparently the behavior relied on a bug
# that was fixed somewhere on the way in the PyPy.
null = Null(None, u"null", {})
null.parent = null
Null.interface = null
Object.interface = Interface(null, u"object", {})
class BoundMethod(Object):
_immutable_fields_ = ['obj', 'name', 'methodfn']
def __init__(self, obj, name, methodfn):
self.obj = obj
self.name = name
self.methodfn = methodfn
def call(self, argv):
return self.methodfn.call([self.obj] + argv)
def getattr(self, name):
return self.methodfn.getattr(name)
def setattr(self, name, value):
return self.methodfn.setattr(name, value)
def listattr(self):
return self.methodfn.listattr()
def repr(self):
return u"%s.%s" % (self.obj.repr(), self.name)
# Notice that cast != instantiation.
# The distinction is very important.
cast_methods = {}
def cast_for(cls):
def _cast_decorator_(x):
cast_methods[cls] = x
return x
return _cast_decorator_
# Cast didn't appear to handle well as a class method, so I made this
# convenient table construct that uses default handling when conversion
# is not available.
# User objects will not have access to implement this method of casting.
# Userspace casting will be treated as separate problem.
# TODO: frame entry association could be "cool" here. So you would know
# where a cast attempt failed.
@specialize.arg(1, 2)
def cast(x, cls, info=u"something"):
if isinstance(x, cls): # This here means that cast won't change object
return x # if it is already correct type.
try:
fn = cast_methods[cls]
except KeyError as _:
raise space.unwind(space.LTypeError(u"expected %s is %s, got %s" % (
info, cls.interface.name, x.repr())))
res = fn(x)
if isinstance(res, cls):
return res
# TODO: Consider alternative ways to say it. :)
raise space.unwind(space.LTypeError(u"implicit conversion of %s at %s into %s returned %s" % (
x.repr(), info, cls.interface.name, res.repr())))
# Variation of cast that accepts a null value and translates it to None.
@always_inline
@specialize.arg(1, 2)
def cast_n(x, cls, info=u"something"):
if x is null:
return None
return cast(x, cls, info)
# Yes, this is a hacky hack.
import builtin
def expose_internal_methods(interface, methods):
for name in methods:
if name in internal_methods:
interface.methods[u"+" + name.decode('utf-8')] = builtin.Builtin(
hate_them,
spec=internal_methods[name],
source_location=builtin.get_source_location(methods[name]))
internal_methods = {
u"call": (0, 0, True, ['argv'], None),
u"getitem": (0, 0, False, ['index'], None),
u"setitem": (0, 0, False, ['index', 'value'], None),
u"iter": (0, 0, False, [], None),
#u"listattr": (0, 0, False, []), # TODO: figure out what to do with these.
#u"getattr": (0, 0, False, ['name'], None), # these all are usually
#u"setattr": (0, 0, False, ['name', 'value'], None), # overloaded to handle attributes.
u"contains": (0, 0, False, ['value'], None),
u"repr": (0, 0, False, [], None),
u"hash": (0, 0, False, [], None),
}
def register_instantiator(interface, fn):
# You should not be able to call the true instantiator of an object.
# But calling a fake shouldn't harm either.
interface.methods[u"+init"] = builtin.Builtin(
(lambda argv: None),
spec=builtin.get_spec(fn),
source_location=builtin.get_source_location(fn))
# Internal methods help at documenting the system.
# TODO: rethink about lifting this eventually?
def hate_them(argv):
raise space.unwind(space.LError(u"hate them"))
#expose_internal_methods(Interface)
#expose_internal_methods(Object) # if I do this,
# every method will have internal_methods
# Besides, Object methods are placeholders.
# I doubt we miss these.
#expose_internal_methods(BoundMethod)
#expose_internal_methods(builtin.Builtin)
# When your good names are your best.
@Interface.instantiator2(builtin.signature(Object))
def Interface_init_is_cast(obj):
return space.get_interface(obj)
# Only active with the user-defined interfaces that may be 'lost'.
@Interface.method(u"+finalize", builtin.signature(Interface))
def Interface_finalize(self):
for record in self.multimethods:
record.multimethod.unregister_record(record)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.preview.trusted_comms.branded_channel.channel import ChannelList
class BrandedChannelList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version):
"""
Initialize the BrandedChannelList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelList
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelList
"""
super(BrandedChannelList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, sid):
"""
Constructs a BrandedChannelContext
:param sid: Branded Channel Sid.
:returns: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelContext
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelContext
"""
return BrandedChannelContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a BrandedChannelContext
:param sid: Branded Channel Sid.
:returns: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelContext
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelContext
"""
return BrandedChannelContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.BrandedChannelList>'
class BrandedChannelPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the BrandedChannelPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelPage
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelPage
"""
super(BrandedChannelPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of BrandedChannelInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelInstance
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelInstance
"""
return BrandedChannelInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.TrustedComms.BrandedChannelPage>'
class BrandedChannelContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, sid):
"""
Initialize the BrandedChannelContext
:param Version version: Version that contains the resource
:param sid: Branded Channel Sid.
:returns: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelContext
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelContext
"""
super(BrandedChannelContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/BrandedChannels/{sid}'.format(**self._solution)
# Dependents
self._channels = None
def fetch(self):
"""
Fetch the BrandedChannelInstance
:returns: The fetched BrandedChannelInstance
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return BrandedChannelInstance(self._version, payload, sid=self._solution['sid'], )
@property
def channels(self):
"""
Access the channels
:returns: twilio.rest.preview.trusted_comms.branded_channel.channel.ChannelList
:rtype: twilio.rest.preview.trusted_comms.branded_channel.channel.ChannelList
"""
if self._channels is None:
self._channels = ChannelList(self._version, branded_channel_sid=self._solution['sid'], )
return self._channels
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.BrandedChannelContext {}>'.format(context)
class BrandedChannelInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload, sid=None):
"""
Initialize the BrandedChannelInstance
:returns: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelInstance
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelInstance
"""
super(BrandedChannelInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'business_sid': payload.get('business_sid'),
'brand_sid': payload.get('brand_sid'),
'sid': payload.get('sid'),
'links': payload.get('links'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: BrandedChannelContext for this BrandedChannelInstance
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelContext
"""
if self._context is None:
self._context = BrandedChannelContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def account_sid(self):
"""
:returns: Account Sid.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def business_sid(self):
"""
:returns: Business Sid.
:rtype: unicode
"""
return self._properties['business_sid']
@property
def brand_sid(self):
"""
:returns: Brand Sid.
:rtype: unicode
"""
return self._properties['brand_sid']
@property
def sid(self):
"""
:returns: Branded Channel Sid.
:rtype: unicode
"""
return self._properties['sid']
@property
def links(self):
"""
:returns: Nested resource URLs.
:rtype: unicode
"""
return self._properties['links']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the BrandedChannelInstance
:returns: The fetched BrandedChannelInstance
:rtype: twilio.rest.preview.trusted_comms.branded_channel.BrandedChannelInstance
"""
return self._proxy.fetch()
@property
def channels(self):
"""
Access the channels
:returns: twilio.rest.preview.trusted_comms.branded_channel.channel.ChannelList
:rtype: twilio.rest.preview.trusted_comms.branded_channel.channel.ChannelList
"""
return self._proxy.channels
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.TrustedComms.BrandedChannelInstance {}>'.format(context)
|
|
from io import BytesIO
from fleet.client import fleet_base
from fabric.api import run, settings, put, hide
import logging
import time
import random
__author__ = 'sukrit'
DEFAULT_FAB_SETTINGS = {
'timeout': 30,
'command_timeout': 180,
'connection_attempts': 3,
'disable_known_hosts': True,
'abort_on_prompts': True,
'abort_exception': None,
'forward_agent': True,
'colorize_errors': False,
'user': 'core'
}
FLEETCTL_VERSION_CMD = 'fleetctl version'
FLEET_UPLOAD_DIR = '/tmp/services'
logger = logging.getLogger(__name__)
def _apply_defaults(fab_settings, default_settings=DEFAULT_FAB_SETTINGS):
fab_settings = fab_settings or {}
for key, value in default_settings.iteritems():
fab_settings.setdefault(key, value)
return fab_settings
class Provider(fleet_base.Provider):
"""
Provider for fabric based implementation. Requires fabric : 1.10+
"""
def __init__(self, hosts='[email protected]', **kwargs):
fab_settings = kwargs.get('fab_settings', {})
self.log_metadata = kwargs.get('meta_data', {})
self.fab_settings = _apply_defaults(fab_settings)
self.host = random.choice(hosts.split(','))
def _settings(self, **additional_settings):
additional_settings = additional_settings or {}
additional_settings = _apply_defaults(
additional_settings, self.fab_settings)
return settings(hide('warnings'),
host_string=self.host,
**additional_settings)
def _fabric_wrapper(self):
return FabricWrapper(log_metadata=self.log_metadata)
def _fabric_error_wrapper(self):
pass
def client_version(self):
with self._settings():
with self._fabric_wrapper() as stream:
try:
version_string = run(FLEETCTL_VERSION_CMD, stderr=stream)
except SystemExit:
raise FleetExecutionException(
message='Failed to get fleet client version',
command_output=stream.getvalue())
version_string = version_string.decode(encoding='UTF-8') \
.strip()
if version_string.startswith('{} '.format(
FLEETCTL_VERSION_CMD)):
return version_string.replace(
'{} '.format(FLEETCTL_VERSION_CMD), '')
else:
return None
def deploy_units(self, template_name, service_data_stream, units=1,
start=True):
"""
:param template_name: Template name must contain '@' param for
deploying multiple instances
:param service_data_stream: Stream cotaining template data
:param units: No. of units to deploy. Defaults to 1. It is only needed
when units needs to be started after install.
:return: None
"""
destination_service = '{upload_dir}/{template_name}'. \
format(template_name=template_name, upload_dir=FLEET_UPLOAD_DIR)
with self._settings():
with self._fabric_wrapper() as stream:
try:
run('mkdir -p {}'.format(FLEET_UPLOAD_DIR), stdout=stream,
stderr=stream)
put(service_data_stream, destination_service)
run('fleetctl submit {destination_service}'
.format(destination_service=destination_service),
stdout=stream, stderr=stream)
service = template_name.replace('@', '@{1..%d}'
% units)
run('fleetctl load -no-block=true {service}'
.format(service=service), stdout=stream, stderr=stream)
# Sleep for sometime as we are using no-block
# In future we need to write a logic to ensure units have
# been loaded
time.sleep(2)
if start:
run('fleetctl start -no-block=true {service}'
.format(service=service),
stdout=stream, stderr=stream)
except SystemExit:
raise FleetExecutionException(
message='Failed to deploy unit: %s' % template_name,
command_output=stream.getvalue())
def start_units(self, template_name, units=1):
"""
Starts units with given count for a given template.
:param template_name: The template name for the unit.
Note: It assumes that template_name is already installed.
See: deploy_units for installing templates programtically.
:param units: No. of units to deploy
:return: None
"""
with self._settings():
with self._fabric_wrapper() as stream:
try:
service = template_name.replace('@', '@{1..%d}'
% units)
run('fleetctl start -no-block=true {service}'
.format(service=service),
stdout=stream, stderr=stream)
except SystemExit:
raise FleetExecutionException(
message='Failed to deploy unit: %s' % template_name,
command_output=stream.getvalue())
def deploy(self, service_name, service_data_stream, force_remove=False):
destination_service = '{upload_dir}/{service_name}'. \
format(service_name=service_name, upload_dir=FLEET_UPLOAD_DIR)
with self._settings():
with self._fabric_wrapper() as stream:
try:
run('mkdir -p {}'.format(FLEET_UPLOAD_DIR), stdout=stream,
stderr=stream)
put(service_data_stream, destination_service)
if force_remove:
run('fleetctl stop -no-block=true {}'.format(
destination_service), stdout=stream, stderr=stream,
warn_only=True)
run('fleetctl destroy {destination_service}'
.format(destination_service=destination_service),
stdout=stream, stderr=stream)
run('fleetctl start -no-block {destination_service}'
.format(destination_service=destination_service),
stdout=stream, stderr=stream)
except SystemExit:
raise FleetExecutionException(
message='Failed to deploy service: %s' % service_name,
command_output=stream.getvalue())
def destroy_units_matching(self, service_prefix, exclude_prefix=None):
with self._fabric_wrapper() as stream:
with self._settings():
try:
exclude_prefix = exclude_prefix or '^$'
run('fleetctl list-units | grep \'%s\' | grep -v \'%s\' |'
' awk \'{print $1}\' | '
'xargs fleetctl stop -no-block=true '
% (service_prefix, exclude_prefix), stdout=stream,
stderr=stream, warn_only=True)
# Sleep for couple of seconds to prevent timeout issue
# with fabric.
time.sleep(2)
run('fleetctl list-units | grep \'%s\' | grep -v \'%s\' |'
' awk \'{print $1}\' | xargs fleetctl unload'
% (service_prefix, exclude_prefix), stdout=stream,
stderr=stream)
run('fleetctl list-unit-files | grep \'%s\' | '
'grep -v \'%s\' | awk \'{print $1}\' | '
'xargs fleetctl destroy'
% (service_prefix, exclude_prefix), stdout=stream,
stderr=stream)
except SystemExit:
raise FleetExecutionException(
message='Failed to destroy units with prefix: %s '
'exclude prefix: %s'
% (service_prefix, exclude_prefix),
command_output=stream.getvalue())
def fetch_units_matching(self, service_prefix, exclude_prefix=None):
exclude_prefix = exclude_prefix or '^$'
list_cmd = 'fleetctl list-units -no-legend ' \
'-fields unit,machine,active,sub -full | ' \
'grep \'{}\' | grep -v \'{}\''.format(
service_prefix, exclude_prefix)
with self._fabric_wrapper() as stream:
with self._settings():
try:
logger.info('fetch_units_matching: {}'.format(list_cmd))
units_raw = run(list_cmd, stdout=stream, stderr=stream,
warn_only=True)
for line in units_raw.splitlines():
cols = line.split()
if not cols or len(cols) < 4:
continue
else:
yield {
'unit': cols[0],
'machine': cols[1],
'active': cols[2],
'sub': cols[3]
}
except SystemExit:
raise FleetExecutionException(
message='Failed to Fetch units with prefix: %s'
% service_prefix,
command_output=stream.getvalue())
def destroy(self, service):
with self._fabric_wrapper() as stream:
with self._settings():
try:
run('fleetctl stop -no-block=true {}'.format(service),
stdout=stream, stderr=stream, warn_only=False)
run('fleetctl destroy {}'.format(service), stdout=stream,
stderr=stream)
except SystemExit:
raise FleetExecutionException(
message='Failed to destroy unit: %s'
% service,
command_output=stream.getvalue())
def status(self, service_name):
with self._fabric_wrapper() as stream:
with self._settings():
try:
return run('fleetctl list-units | grep {} | '
'awk \'{{{{print $4}}}}\''.format(service_name),
stdout=stream, stderr=stream)
except SystemExit:
raise FleetExecutionException(
message='Failed to get status for unit: %s'
% service_name,
command_output=stream.getvalue())
def stop_units_matching(self, service_prefix, exclude_prefix=None):
exclude_prefix = exclude_prefix or '^$'
stop_cmd = 'fleetctl list-units | grep \'%s\' | grep -v \'%s\' | ' \
'awk \'{print $1}\' | ' \
'xargs fleetctl stop -no-block=true ' % (
service_prefix, exclude_prefix)
with self._fabric_wrapper() as stream:
with self._settings():
try:
logger.info('stop_units_matching: {}'.format(stop_cmd))
run(stop_cmd, stdout=stream,
stderr=stream, warn_only=False)
# Sleep for couple of seconds to prevent timeout issue
# with fabric.
time.sleep(2)
except SystemExit:
raise FleetExecutionException(
message='Failed to stop units with prefix: %s '
'exclude prefix: %s'
% (service_prefix, exclude_prefix),
command_output=stream.getvalue())
class FleetExecutionException(Exception):
def __init__(self, message='One or more commands failed to get '
'executed on coreos cluster.',
command_output='', log_metadata=None):
self.message = message
self.command_output = command_output
Exception.__init__(self, message, command_output, log_metadata)
def __repr__(self):
return 'FleetExecutionException: %s \noutput: %s' % \
(self.message, self.command_output)
def __str__(self):
return self.__repr__()
class FabricWrapper:
def __init__(self, stream=None, log_metadata=None):
self.stream = stream or BytesIO()
self.log_metadata = log_metadata or {}
def __enter__(self):
return self.stream
def __exit__(self, exc_type, value, traceback):
output = self.stream.getvalue()
if isinstance(value, BaseException):
logger.exception(value)
else:
logger.info(output, extra=self.log_metadata)
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
__all__ = [
'ShuffleNetV2', 'ShuffleNetV2_x0_5', 'ShuffleNetV2_x1_0',
'ShuffleNetV2_x1_5', 'ShuffleNetV2_x2_0'
]
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class ShuffleNetV2():
def __init__(self, scale=1.0):
self.params = train_parameters
self.scale = scale
def net(self, input, class_dim=1000):
scale = self.scale
stage_repeats = [4, 8, 4]
if scale == 0.5:
stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif scale == 1.0:
stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif scale == 1.5:
stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif scale == 2.0:
stage_out_channels = [-1, 24, 224, 488, 976, 2048]
else:
raise ValueError("""{} groups is not supported for
1x1 Grouped Convolutions""".format(num_groups))
#conv1
input_channel = stage_out_channels[1]
conv1 = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=input_channel,
padding=1,
stride=2)
pool1 = fluid.layers.pool2d(
input=conv1,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
conv = pool1
# bottleneck sequences
for idxstage in range(len(stage_repeats)):
numrepeat = stage_repeats[idxstage]
output_channel = stage_out_channels[idxstage + 2]
for i in range(numrepeat):
if i == 0:
conv = self.inverted_residual_unit(
input=conv,
num_filters=output_channel,
stride=2,
benchmodel=2)
else:
conv = self.inverted_residual_unit(
input=conv,
num_filters=output_channel,
stride=1,
benchmodel=1)
conv_last = self.conv_bn_layer(
input=conv,
filter_size=1,
num_filters=stage_out_channels[-1],
padding=0,
stride=1)
pool_last = fluid.layers.pool2d(
input=conv_last,
pool_size=7,
pool_stride=7,
pool_padding=0,
pool_type='avg')
output = fluid.layers.fc(input=pool_last,
size=class_dim,
param_attr=ParamAttr(initializer=MSRA()))
return output
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride,
padding,
num_groups=1,
use_cudnn=True,
if_act=True):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(initializer=MSRA()),
bias_attr=False)
if if_act:
return fluid.layers.batch_norm(input=conv, act='relu')
else:
return fluid.layers.batch_norm(input=conv)
def channel_shuffle(self, x, groups):
batchsize, num_channels, height, width = x.shape[0], x.shape[
1], x.shape[2], x.shape[3]
channels_per_group = num_channels // groups
# reshape
x = fluid.layers.reshape(
x=x, shape=[batchsize, groups, channels_per_group, height, width])
x = fluid.layers.transpose(x=x, perm=[0, 2, 1, 3, 4])
# flatten
x = fluid.layers.reshape(
x=x, shape=[batchsize, num_channels, height, width])
return x
def inverted_residual_unit(self, input, num_filters, stride, benchmodel):
assert stride in [1, 2], \
"supported stride are {} but your stride is {}".format([1,2], stride)
oup_inc = num_filters // 2
inp = input.shape[1]
if benchmodel == 1:
x1, x2 = fluid.layers.split(
input,
num_or_sections=[input.shape[1] // 2, input.shape[1] // 2],
dim=1)
conv_pw = self.conv_bn_layer(
input=x2,
num_filters=oup_inc,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
conv_dw = self.conv_bn_layer(
input=conv_pw,
num_filters=oup_inc,
filter_size=3,
stride=stride,
padding=1,
num_groups=oup_inc,
if_act=False)
conv_linear = self.conv_bn_layer(
input=conv_dw,
num_filters=oup_inc,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
out = fluid.layers.concat([x1, conv_linear], axis=1)
else:
#branch1
conv_dw = self.conv_bn_layer(
input=input,
num_filters=inp,
filter_size=3,
stride=stride,
padding=1,
num_groups=inp,
if_act=False)
conv_linear_1 = self.conv_bn_layer(
input=conv_dw,
num_filters=oup_inc,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
#branch2
conv_pw = self.conv_bn_layer(
input=input,
num_filters=oup_inc,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
conv_dw = self.conv_bn_layer(
input=conv_pw,
num_filters=oup_inc,
filter_size=3,
stride=stride,
padding=1,
num_groups=oup_inc,
if_act=False)
conv_linear_2 = self.conv_bn_layer(
input=conv_dw,
num_filters=oup_inc,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
out = fluid.layers.concat([conv_linear_1, conv_linear_2], axis=1)
return self.channel_shuffle(out, 2)
def ShuffleNetV2_x0_5():
model = ShuffleNetV2(scale=0.5)
return model
def ShuffleNetV2_x1_0():
model = ShuffleNetV2(scale=1.0)
return model
def ShuffleNetV2_x1_5():
model = ShuffleNetV2(scale=1.5)
return model
def ShuffleNetV2_x2_0():
model = ShuffleNetV2(scale=2.0)
return model
|
|
"""
SoftLayer.tests.CLI.modules.ticket_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import json
import mock
from SoftLayer.CLI import exceptions
from SoftLayer import testing
class TicketTests(testing.TestCase):
def test_list(self):
result = self.run_command(['ticket', 'list'])
expected = [{
'assigned_user': 'John Smith',
'id': 102,
'last_edited': '2013-08-01T14:16:47-07:00',
'status': 'Open',
'title': 'Cloud Instance Cancellation - 08/01/13'}]
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output), expected)
def test_detail(self):
result = self.run_command(['ticket', 'detail', '1'])
expected = {
'created': '2013-08-01T14:14:04-07:00',
'edited': '2013-08-01T14:16:47-07:00',
'id': 100,
'status': 'Closed',
'title': 'Cloud Instance Cancellation - 08/01/13',
'update 1': 'a bot says something',
'update 2': 'By John Smith\nuser says something',
'update 3': 'By emp1 (Employee)\nemployee says something',
}
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output), expected)
def test_create(self):
result = self.run_command(['ticket', 'create', '--title=Test',
'--subject-id=1000',
'--body=ticket body'])
self.assert_no_fail(result)
args = ({'subjectId': 1000,
'contents': 'ticket body',
'assignedUserId': 12345,
'title': 'Test'}, 'ticket body')
self.assert_called_with('SoftLayer_Ticket', 'createStandardTicket',
args=args)
def test_create_and_attach(self):
result = self.run_command(['ticket', 'create', '--title=Test',
'--subject-id=1000',
'--body=ticket body',
'--hardware=234',
'--virtual=567'])
self.assert_no_fail(result)
args = ({'subjectId': 1000,
'contents': 'ticket body',
'assignedUserId': 12345,
'title': 'Test'}, 'ticket body')
self.assert_called_with('SoftLayer_Ticket', 'createStandardTicket',
args=args)
self.assert_called_with('SoftLayer_Ticket', 'addAttachedHardware',
args=(234,),
identifier=100)
self.assert_called_with('SoftLayer_Ticket', 'addAttachedVirtualGuest',
args=(567,),
identifier=100)
@mock.patch('click.edit')
def test_create_no_body(self, edit_mock):
edit_mock.return_value = 'ticket body'
result = self.run_command(['ticket', 'create', '--title=Test',
'--subject-id=1000'])
self.assert_no_fail(result)
args = ({'subjectId': 1000,
'contents': 'ticket body',
'assignedUserId': 12345,
'title': 'Test'}, 'ticket body')
self.assert_called_with('SoftLayer_Ticket', 'createStandardTicket',
args=args)
def test_subjects(self):
list_expected_ids = [1001, 1002, 1003, 1004, 1005]
result = self.run_command(['ticket', 'subjects'])
self.assert_no_fail(result)
results = json.loads(result.output)
for result in results:
self.assertIn(result['id'], list_expected_ids)
def test_attach_no_identifier(self):
result = self.run_command(['ticket', 'attach', '1'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.ArgumentError)
def test_attach_two_identifiers(self):
result = self.run_command(['ticket',
'attach',
'1',
'--hardware=100',
'--virtual=100'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.ArgumentError)
def test_ticket_attach_hardware(self):
result = self.run_command(['ticket', 'attach', '1', '--hardware=100'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Ticket', 'addAttachedHardware',
args=(100,),
identifier=1)
def test_ticket_attach_virtual_server(self):
result = self.run_command(['ticket', 'attach', '1', '--virtual=100'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Ticket', 'addAttachedVirtualGuest',
args=(100,),
identifier=1)
def test_detach_no_identifier(self):
result = self.run_command(['ticket', 'detach', '1'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.ArgumentError)
def test_detach_two_identifiers(self):
result = self.run_command(['ticket',
'detach',
'1',
'--hardware=100',
'--virtual=100'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.ArgumentError)
def test_ticket_detach_hardware(self):
result = self.run_command(['ticket', 'detach', '1', '--hardware=100'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Ticket',
'removeAttachedHardware',
args=(100,),
identifier=1)
def test_ticket_detach_virtual_server(self):
result = self.run_command(['ticket', 'detach', '1', '--virtual=100'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Ticket',
'removeAttachedVirtualGuest',
args=(100,),
identifier=1)
def test_ticket_upload_no_path(self):
result = self.run_command(['ticket', 'upload', '1'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.ArgumentError)
def test_ticket_upload_invalid_path(self):
result = self.run_command(['ticket', 'upload', '1',
'--path=tests/resources/nonexistent_file',
'--name=a_file_name'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.ArgumentError)
def test_ticket_upload_no_name(self):
result = self.run_command(['ticket', 'upload', '1',
'--path=tests/resources/attachment_upload'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Ticket',
'addAttachedFile',
args=({"filename": "attachment_upload",
"data": b"ticket attached data"},),
identifier=1)
def test_ticket_upload(self):
result = self.run_command(['ticket', 'upload', '1',
'--path=tests/resources/attachment_upload',
'--name=a_file_name'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Ticket',
'addAttachedFile',
args=({"filename": "a_file_name",
"data": b"ticket attached data"},),
identifier=1)
|
|
"""
2D inversion for the basement relief of sedimentary basins.
There are different parametrizations available.
The simplest are meant more as an exercise and initiation in inverse problems:
* :func:`~fatiando.gravmag.basin2d.Triangular`: assumes a basin with a
triangular cross-section (think "foreland").
* :func:`~fatiando.gravmag.basin2d.Trapezoidal`: assumes a basin with a
trapezoidal cross-section (think "grabben").
More complex parametrizations are:
* :func:`~fatiando.gravmag.basin2d.PolygonalBasinGravity`: approximate the
basin by a polygon.
----
"""
from __future__ import division
from future.builtins import super
import numpy as np
from ..inversion.misfit import Misfit
from . import talwani
from ..mesher import Polygon
from .. import utils
class PolygonalBasinGravity(Misfit):
"""
Estimate the relief of a sedimentary basin approximating by a polygon.
Currently only works with gravity data.
The top of the basin is straight and fixed at a given height. Polygon
vertices are distributed evenly in the x-direction. The inversion
estimates the depths of each vertex.
This is a non-linear inversion. Therefore you must configure it before
running to choose a solver method and set the initial estimate.
Use the ``config`` method for this.
Recommended configuration: Levemberg-Marquardt algorithm (``'levmarq'``)
with initial estimate to the average expected depth of the basin.
Typical regularization to use with this class are:
:class:`~fatiando.inversion.regularization.Damping`,
:class:`~fatiando.inversion.regularization.Smoothness1D`,
:class:`~fatiando.inversion.regularization.TotalVariation1D`.
The forward modeling is done using :mod:`~fatiando.gravmag.talwani`.
Derivatives are calculated using a 2-point finite difference approximation.
.. tip::
Use the ``estimate_`` attribute to get a
:class:`~fatiando.mesher.Polygon` version of the estimated parameters
(attribute ``p_``).
Parameters:
* x, z : 1d-arrays
The x and z coordinates of the observations. In meters.
* data : 1d-array
The observed data.
* npoints : int
Number of points to use
* props : dict
The physical properties dictionary that will be assigned to the
basin :class:`~fatiando.mesher.Polygon`. Ex: to give the basin a
density contrast of 500 kg/m3 ``props={'density': 500}``.
* top : float
The value of the z-coordinate where the top of the basin will be fixed.
In meters. Default: 0.
* xlim : None or list = [xmin, xmax]
The horizontal limits of the model. If not given, will use the limits
of the data (i.e., ``[x.min(), x.max()]``).
Examples:
Lets run an inversion on synthetic data from a simple model of a trapezoid
basin (a polygon with 4 vertices). We'll assume that the horizontal limits
of the basin are the same as the limits of the data:
>>> from fatiando.mesher import Polygon
>>> from fatiando.gravmag import talwani
>>> import numpy as np
>>> # Make some synthetic data from a simple basin
>>> props = {'density': -500}
>>> model = [Polygon([[3000, 0], [2000, 800], [1000, 500], [0, 0]],
... props)]
>>> x = np.linspace(0, 3000, 50)
>>> z = -np.ones_like(x) # Put data at 1m height
>>> data = talwani.gz(x, z, model)
>>> # Make the solver, configure, and invert.
>>> # Will use only 2 points because the two in the corners are
>>> # considered fixed in the inversion (at 'top').
>>> misfit = PolygonalBasinGravity(x, z, data, 2, props, top=0)
>>> _ = misfit.config('levmarq', initial=100*np.ones(misfit.nparams)).fit()
>>> misfit.p_
array([ 800., 500.])
>>> type(misfit.estimate_)
<class 'fatiando.mesher.Polygon'>
>>> misfit.estimate_.vertices
array([[ 3000., 0.],
[ 2000., 800.],
[ 1000., 500.],
[ 0., 0.]])
If the x range of the data points is larger than the basin, you can specify
a horizontal range for the basin model. When this is not specified, it is
deduced from the data:
>>> x = np.linspace(-500, 3500, 80)
>>> z = -np.ones_like(x)
>>> data = talwani.gz(x, z, model)
>>> # Specify that the model used for inversion should be within
>>> # x => [0, 3000]
>>> misfit = PolygonalBasinGravity(x, z, data, 2, props, top=0,
... xlim=[0, 3000])
>>> _ = misfit.config('levmarq', initial=100*np.ones(misfit.nparams)).fit()
>>> misfit.p_
array([ 800., 500.])
>>> misfit.estimate_.vertices
array([[ 3000., 0.],
[ 2000., 800.],
[ 1000., 500.],
[ 0., 0.]])
"""
def __init__(self, x, z, data, npoints, props, top=0, xlim=None):
super().__init__(data=data.ravel(), nparams=npoints, islinear=False)
self.npoints = npoints
self.x = x
self.z = z
self.props = props
self.top = top
if xlim is None:
xlim = [x.min(), x.max()]
self.xlim = xlim
self._modelx = np.linspace(xlim[0], xlim[1], npoints + 2)[::-1]
def p2vertices(self, p):
"""
Convert a parameter vector into vertices a Polygon.
Parameters:
* p : 1d-array
The parameter vector with the depth of the polygon vertices
Returns:
* vertices : 2d-array
Like a list of [x, z] coordinates of each vertex
Examples:
>>> import numpy as np
>>> # Make some arrays to create the estimator clas
>>> x = np.linspace(-100, 300, 50)
>>> z = np.zeros_like(x)
>>> data = z
>>> misfit = PolygonalBasinGravity(x, z, data, 3, {}, top=-100)
>>> misfit.p2vertices([1, 2, 3])
array([[ 300., -100.],
[ 200., 1.],
[ 100., 2.],
[ 0., 3.],
[-100., -100.]])
"""
h = self.top
verts = np.empty((self.nparams + 2, 2))
verts[:, 0] = self._modelx
verts[:, 1] = np.concatenate([[h], p, [h]])
return verts
def predicted(self, p):
"""
Calculate the predicted data for a parameter vector.
"""
verts = self.p2vertices(p)
poly = Polygon(verts, self.props)
return talwani.gz(self.x, self.z, [poly])
def jacobian(self, p):
"""
Calculate the Jacobian (sensitivity) matrix for a parameter vector.
"""
verts = self.p2vertices(p)
delta = np.array([0, 1])
jac = np.empty((self.ndata, self.nparams))
for i in xrange(self.nparams):
diff = Polygon([verts[i + 2], verts[i + 1] - delta,
verts[i], verts[i + 1] + delta], self.props)
jac[:, i] = talwani.gz(self.x, self.z, [diff])/(2*delta[1])
return jac
def fmt_estimate(self, p):
"""
Convert the parameter vector to a :class:`fatiando.mesher.Polygon` so
that it can be used for plotting and forward modeling.
Examples:
>>> import numpy as np
>>> # Make some arrays to create the estimator clas
>>> x = np.linspace(-100, 300, 50)
>>> z = np.zeros_like(x)
>>> data = z
>>> misfit = PolygonalBasinGravity(x, z, data, 3, {}, top=-100)
>>> poly = misfit.fmt_estimate([1, 2, 3])
>>> type(poly)
<class 'fatiando.mesher.Polygon'>
>>> poly.vertices
array([[ 300., -100.],
[ 200., 1.],
[ 100., 2.],
[ 0., 3.],
[-100., -100.]])
"""
poly = Polygon(self.p2vertices(p), self.props)
return poly
class Triangular(Misfit):
"""
Estimate the relief of a triangular basin.
Use when the basin can be approximated by a 2D body with **triangular**
vertical cross-section, like foreland basins.
The triangle is assumed to have 2 known vertices at the surface (the edges
of the basin) and one unknown vertex in the subsurface.
The inversion will estimate the (x, z) coordinates of the unknown vertex.
The forward modeling is done using :mod:`~fatiando.gravmag.talwani`.
Derivatives are calculated using a 2-point finite difference approximation.
.. tip::
Use the ``estimate_`` attribute to produce a polygon from the
estimated parameter vector (``p_``).
Parameters:
* x, z : array
Arrays with the x and z coordinates of the profile data points
* gz : array
The profile gravity anomaly data
* verts : list of lists
``[[x1, z1], [x2, z2]]`` List of the [x, z] coordinates of the left and
right know vertices, respectively.
.. warning::
Very important that the vertices in the list be ordered from left
to right! Otherwise the forward model will give results with an
inverted sign and terrible things may happen!
* density : float
Density contrast of the basin
* delta : float
Interval used to calculate the approximate derivatives
.. note::
The recommended solver for this inverse problem is the
Levemberg-Marquardt method. Since this is a non-linear problem, set the
desired method and initial solution using the ``config`` method of
this class. See the example bellow.
Example using synthetic data:
>>> import numpy as np
>>> from fatiando.mesher import Polygon
>>> from fatiando.gravmag import talwani
>>> # Make a triangular basin model (will estimate the last point)
>>> verts = [(10000, 1), (90000, 1), (50000, 5000)]
>>> left, middle, right = verts
>>> model = Polygon(verts, {'density':500})
>>> # Generate the synthetic gz profile
>>> x = np.linspace(0, 100000, 50)
>>> z = np.zeros_like(x)
>>> gz = talwani.gz(x, z, [model])
>>> # Make a solver and fit it to the data
>>> solver = Triangular(x, z, gz, [left, middle], 500).config(
... 'levmarq', initial=[10000, 1000]).fit()
>>> # p_ is the estimated parameter vector (x and z in this case)
>>> x, z = solver.p_
>>> print('{:.1f}, {:.1f}'.format(x, z))
50000.0, 5000.0
>>> # The parameter vector is not that useful so use estimate_ to get a
>>> # Polygon object
>>> poly = solver.estimate_
>>> poly.vertices
array([[ 1.00000000e+04, 1.00000000e+00],
[ 9.00000000e+04, 1.00000000e+00],
[ 5.00000000e+04, 5.00000000e+03]])
>>> poly.props
{'density': 500}
>>> # Check is the residuals are all small
>>> np.all(np.abs(solver.residuals()) < 10**-10)
True
"""
def __init__(self, x, z, gz, verts, density):
assert x.shape == z.shape == gz.shape, \
"x, z, and data must be of same length"
assert len(verts) == 2, \
"Need exactly 2 vertices. {} given".format(len(verts))
super().__init__(data=gz, nparams=2, islinear=False)
self.x = np.array(x, dtype=np.float)
self.z = np.array(z, dtype=np.float)
self.density = density
self.verts = list(verts)
def predicted(self, p):
"""
Calculate predicted data for a given parameter vector.
"""
polygon = Polygon(self.verts + [p], {'density': self.density})
return talwani.gz(self.x, self.z, [polygon])
def jacobian(self, p):
"""
Calculate the Jacobian (sensitivity) matrix for a given parameter
vector.
"""
delta = 1.
props = {'density': self.density}
xp, zp = self.x, self.z
verts = self.verts
x, z = p
jac = np.transpose([
(talwani.gz(xp, zp, [Polygon(verts + [[x + delta, z]], props)])
- talwani.gz(xp, zp, [Polygon(verts + [[x - delta, z]], props)])
) / (2. * delta),
(talwani.gz(xp, zp, [Polygon(verts + [[x, z + delta]], props)])
- talwani.gz(xp, zp, [Polygon(verts + [[x, z - delta]], props)])
) / (2. * delta)])
return jac
def fmt_estimate(self, p):
"""
Convert the parameter vector to a :class:`~fatiando.mesher.Polygon` so
that it can be used for plotting and forward modeling.
"""
left, right = self.verts
props = {'density': self.density}
poly = Polygon(np.array([left, right, p]), props=props)
return poly
class Trapezoidal(Misfit):
"""
Estimate the relief of a trapezoidal basin.
Use when the basin can be approximated by a 2D body with **trapezoidal**
vertical cross-section, like in rifts.
The trapezoid is assumed to have 2 known vertices at the surface
(the edges of the basin) and two unknown vertices in the subsurface.
We assume that the x coordinates of the unknown vertices are the same as
the x coordinates of the known vertices (i.e., the unknown vertices are
directly under the known vertices).
The inversion will then estimate the z coordinates of the unknown vertices.
The forward modeling is done using :mod:`~fatiando.gravmag.talwani`.
Derivatives are calculated using a 2-point finite difference approximation.
.. tip::
Use the ``estimate_`` attribute to produce a polygon from the
estimated parameter vector (``p_``).
Parameters:
* x, z : array
Arrays with the x and z coordinates of the profile data points
* gz : array
The profile gravity anomaly data
* verts : list of lists
``[[x1, z1], [x2, z2]]`` List of the [x, z] coordinates of the left and
right know vertices, respectively.
.. warning::
Very important that the vertices in the list be ordered from left
to right! Otherwise the forward model will give results with an
inverted sign and terrible things may happen!
* density : float
Density contrast of the basin
* delta : float
Interval used to calculate the approximate derivatives
.. note::
The recommended solver for this inverse problem is the
Levemberg-Marquardt method. Since this is a non-linear problem, set the
desired method and initial solution using the ``config`` method.
See the example bellow.
Example with synthetic data:
>>> import numpy as np
>>> from fatiando.mesher import Polygon
>>> from fatiando.gravmag import talwani
>>> # Make a trapezoidal basin model (will estimate the z coordinates
>>> # of the last two points)
>>> verts = [[10000, 1], [90000, 1], [90000, 5000], [10000, 3000]]
>>> model = Polygon(verts, {'density':500})
>>> # Generate the synthetic gz profile
>>> x = np.linspace(0, 100000, 50)
>>> z = np.zeros_like(x)
>>> gz = talwani.gz(x, z, [model])
>>> # Make a solver and fit it to the data
>>> solver = Trapezoidal(x, z, gz, verts[0:2], 500).config(
... 'levmarq', initial=[1000, 500]).fit()
>>> # p_ is the estimated parameter vector (z1 and z2 in this case)
>>> z1, z2 = solver.p_
>>> print('{:.1f}, {:.1f}'.format(z1, z2))
5000.0, 3000.0
>>> # The parameter vector is not that useful so use estimate_ to get a
>>> # Polygon object
>>> poly = solver.estimate_
>>> poly.vertices
array([[ 1.00000000e+04, 1.00000000e+00],
[ 9.00000000e+04, 1.00000000e+00],
[ 9.00000000e+04, 5.00000000e+03],
[ 1.00000000e+04, 3.00000000e+03]])
>>> poly.props
{'density': 500}
>>> # Check is the residuals are all small
>>> np.all(np.abs(solver.residuals()) < 10**-10)
True
"""
def __init__(self, x, z, gz, verts, density):
assert x.shape == z.shape == gz.shape, \
"x, z, and data must be of same length"
assert len(verts) == 2, \
"Need exactly 2 vertices. {} given".format(len(verts))
super().__init__(data=gz, nparams=2, islinear=False)
self.x = np.array(x, dtype=np.float)
self.z = np.array(z, dtype=np.float)
self.density = density
self.props = {'density': self.density}
self.verts = list(verts)
self.x1, self.x2 = verts[1][0], verts[0][0]
def predicted(self, p):
z1, z2 = p
model = [Polygon(self.verts + [[self.x1, z1], [self.x2, z2]],
self.props)]
pred = talwani.gz(self.x, self.z, model)
return pred
def jacobian(self, p):
z1, z2 = p
x1, x2 = self.x1, self.x2
x, z = self.x, self.z
props = self.props
verts = self.verts
delta = 1.
jac = np.empty((self.ndata, self.nparams), dtype=np.float)
z1p = [Polygon(verts + [[x1, z1 + delta], [x2, z2]], props)]
z1m = [Polygon(verts + [[x1, z1 - delta], [x2, z2]], props)]
jac[:, 0] = (talwani.gz(x, z, z1p) - talwani.gz(x, z, z1m))/(2*delta)
z2p = [Polygon(verts + [[x1, z1], [x2, z2 + delta]], props)]
z2m = [Polygon(verts + [[x1, z1], [x2, z2 - delta]], props)]
jac[:, 1] = (talwani.gz(x, z, z2p) - talwani.gz(x, z, z2m))/(2*delta)
return jac
def fmt_estimate(self, p):
"""
Convert the parameter vector to a :class:`fatiando.mesher.Polygon` so
that it can be used for plotting and forward modeling.
"""
z1, z2 = p
left, right = self.verts
poly = Polygon(np.array([left, right, [self.x1, z1], [self.x2, z2]]),
self.props)
return poly
|
|
from datetime import datetime, time, timedelta
from time import sleep
import unittest
from airflow import configuration
configuration.test_mode()
from airflow import jobs, models, DAG, executors, utils, operators
from airflow.www.app import app
from airflow import utils
NUM_EXAMPLE_DAGS = 5
DEV_NULL = '/dev/null'
LOCAL_EXECUTOR = executors.LocalExecutor()
DEFAULT_DATE = datetime(2015, 1, 1)
configuration.test_mode()
class TransferTests(unittest.TestCase):
def setUp(self):
configuration.test_mode()
utils.initdb()
args = {'owner': 'airflow', 'start_date': datetime(2015, 1, 1)}
dag = DAG('hive_test', default_args=args)
self.dag = dag
def test_mysql_to_hive(self):
sql = "SELECT * FROM task_instance LIMIT 1000;"
t = operators.MySqlToHiveTransfer(
task_id='test_m2h',
mysql_conn_id='airflow_db',
sql=sql,
hive_table='airflow.test_mysql_to_hive',
recreate=True,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_mysql_to_hive_partition(self):
sql = "SELECT * FROM task_instance LIMIT 1000;"
t = operators.MySqlToHiveTransfer(
task_id='test_m2h',
mysql_conn_id='airflow_db',
sql=sql,
hive_table='airflow.test_mysql_to_hive_part',
partition={'ds': '2015-01-02'},
recreate=False,
create=True,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
class HivePrestoTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
utils.initdb()
args = {'owner': 'airflow', 'start_date': datetime(2015, 1, 1)}
dag = DAG('hive_test', default_args=args)
self.dag = dag
self.hql = """
USE airflow;
DROP TABLE IF EXISTS static_babynames_partitioned;
CREATE TABLE IF NOT EXISTS static_babynames_partitioned (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY (ds string);
INSERT OVERWRITE TABLE static_babynames_partitioned
PARTITION(ds='{{ ds }}')
SELECT state, year, name, gender, num FROM static_babynames;
"""
def test_hive(self):
t = operators.HiveOperator(
task_id='basic_hql', hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_beeline(self):
t = operators.HiveOperator(
task_id='beeline_hql', hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_presto(self):
sql = """
SELECT count(1) FROM airflow.static_babynames_partitioned;
"""
t = operators.PrestoCheckOperator(
task_id='presto_check', sql=sql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_hdfs_sensor(self):
t = operators.HdfsSensor(
task_id='hdfs_sensor_check',
filepath='/user/hive/warehouse/airflow.db/static_babynames',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_sql_sensor(self):
t = operators.SqlSensor(
task_id='hdfs_sensor_check',
conn_id='presto_default',
sql="SELECT 'x' FROM airflow.static_babynames LIMIT 1;",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_hive_stats(self):
t = operators.HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': '2015-01-01'},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_hive_partition_sensor(self):
t = operators.HivePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_hive2samba(self):
t = operators.Hive2SambaOperator(
task_id='hive2samba_check',
samba_conn_id='tableau_samba',
hql="SELECT * FROM airflow.static_babynames LIMIT 10000",
destination_filepath='test_airflow.csv',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_hive_to_mysql(self):
t = operators.HiveToMySqlTransfer(
task_id='hive_to_mysql_check',
sql="""
SELECT name, count(*) as ccount
FROM airflow.static_babynames
GROUP BY name
""",
mysql_table='test_static_babynames',
mysql_preoperator='TRUNCATE TABLE test_static_babynames;',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
class CoreTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
utils.initdb()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
utils.initdb()
args = {'owner': 'airflow', 'start_date': datetime(2015, 1, 1)}
dag = DAG('core_test', default_args=args)
self.dag = dag
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def test_confirm_unittest_mod(self):
assert configuration.conf.get('core', 'unit_test_mode')
def test_time_sensor(self):
t = operators.TimeSensor(
task_id='time_sensor_check',
target_time=time(0),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_timeout(self):
t = operators.PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=2),
python_callable=lambda: sleep(10),
dag=self.dag)
self.assertRaises(
utils.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, force=True)
job.run()
def test_scheduler_job(self):
job = jobs.SchedulerJob(dag_id='example_bash_operator', test_mode=True)
job.run()
def test_local_backfill_job(self):
self.dag_bash.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
job = jobs.BackfillJob(
dag=self.dag_bash,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(force=True)
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.test_mode()
utils.initdb()
app.config['TESTING'] = True
self.app = app.test_client()
def test_index(self):
response = self.app.get('/', follow_redirects=True)
assert "DAGs" in response.data
assert "example_bash_operator" in response.data
def test_query(self):
response = self.app.get('/admin/queryview/')
assert "Ad Hoc Query" in response.data
response = self.app.get(
"/admin/queryview/?"
"conn_id=presto_default&"
"sql=SELECT+COUNT%281%29+FROM+airflow.static_babynames")
assert "Ad Hoc Query" in response.data
def test_health(self):
response = self.app.get('/health')
assert 'The server is healthy!' in response.data
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
assert "runme_0" in response.data
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
assert "runme_0" in response.data
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
assert "example_bash_operator" in response.data
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_bash_operator')
assert "example_bash_operator" in response.data
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
assert "example_bash_operator" in response.data
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
assert "example_bash_operator" in response.data
response = self.app.get(
'/admin/configurationview/')
assert "Airflow Configuration" in response.data
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date=2015-01-07T00:00:00')
assert "example_bash_operator__runme_1__20150107" in response.data
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date=2015-01-01T00:00:00')
assert "run_this_last" in response.data
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date=2015-01-01')
assert "Attributes" in response.data
response = self.app.get(
'/admin/airflow/dag_stats')
assert "example_bash_operator" in response.data
response = self.app.get(
'/admin/airflow/action?action=clear&task_id=run_this_last&'
'dag_id=example_bash_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date=2015-01-01T00:00:00&'
'origin=http%3A%2F%2Fjn8.brain.musta.ch%3A8080%2Fadmin%2Fairflow'
'%2Ftree%3Fnum_runs%3D65%26dag_id%3Dexample_bash_operator')
assert "Wait a minute" in response.data
response = self.app.get(
'/admin/airflow/action?action=clear&task_id=run_this_last&'
'dag_id=example_bash_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date=2015-01-01T00:00:00&confirmed=true&'
'origin=http%3A%2F%2Fjn8.brain.musta.ch%3A8080%2Fadmin%2Fairflow'
'%2Ftree%3Fnum_runs%3D65%26dag_id%3Dexample_bash_operator')
def test_charts(self):
response = self.app.get(
'/admin/airflow/chart?chart_id=1&iteration_no=1')
assert "Most Popular" in response.data
response = self.app.get(
'/admin/airflow/chart_data?chart_id=1&iteration_no=1')
assert "Michael" in response.data
def tearDown(self):
pass
if 'MySqlOperator' in dir(operators):
# Only testing if the operator is installed
class MySqlTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
utils.initdb()
args = {'owner': 'airflow', 'start_date': datetime(2015, 1, 1)}
dag = DAG('hive_test', default_args=args)
self.dag = dag
def mysql_operator_test(self):
sql = """
CREATE TABLE IF NOT EXISTS test_airflow (
dummy VARCHAR(50)
);
"""
t = operators.MySqlOperator(
task_id='basic_mysql', sql=sql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
if 'PostgresOperator' in dir(operators):
# Only testing if the operator is installed
class PostgresTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
utils.initdb()
args = {'owner': 'airflow', 'start_date': datetime(2015, 1, 1)}
dag = DAG('hive_test', default_args=args)
self.dag = dag
def postgres_operator_test(self):
sql = """
CREATE TABLE IF NOT EXISTS test_airflow (
dummy VARCHAR(50)
);
"""
t = operators.PostgresOperator(
task_id='basic_postgres', sql=sql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
autocommitTask = operators.PostgresOperator(
task_id='basic_postgres_with_autocommit',
sql=sql,
dag=self.dag,
autocommit=True)
autocommitTask.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
force=True)
class HttpOpSensorTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
utils.initdb()
args = {'owner': 'airflow', 'start_date': datetime(2015, 1, 1)}
dag = DAG('http_test', default_args=args)
self.dag = dag
def test_get(self):
t = operators.SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_get_response_check(self):
t = operators.SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
response_check=lambda response: ("airbnb/airflow" in response.text),
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_sensor(self):
sensor = operators.HttpSensor(
task_id='http_sensor_check',
conn_id='http_default',
endpoint='/search',
params={"client": "ubuntu", "q": "airflow"},
headers={},
response_check=lambda response: ("airbnb/airflow" in response.text),
poke_interval=5,
timeout=15,
dag=self.dag)
sensor.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_sensor_timeout(self):
sensor = operators.HttpSensor(
task_id='http_sensor_check',
conn_id='http_default',
endpoint='/search',
params={"client": "ubuntu", "q": "airflow"},
headers={},
response_check=lambda response: ("dingdong" in response.text),
poke_interval=2,
timeout=5,
dag=self.dag)
with self.assertRaises(utils.AirflowSensorTimeout):
sensor.run(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
if __name__ == '__main__':
unittest.main()
|
|
import time
from decimal import Decimal
import hashlib
import logging
from django.utils import six
from six.moves.urllib.request import Request, urlopen
from six.moves.urllib.parse import urlencode
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from getpaid import signals
from getpaid.backends import PaymentProcessorBase
from getpaid.backends.payu.tasks import get_payment_status_task, accept_payment
logger = logging.getLogger('getpaid.backends.payu')
class PayUTransactionStatus:
NEW = 1
CANCELED = 2
REJECTED = 3
STARTED = 4
AWAITING = 5
REJECTED_AFTER_CANCEL = 7
FINISHED = 99
ERROR = 888
class PaymentProcessor(PaymentProcessorBase):
BACKEND = u'getpaid.backends.payu'
BACKEND_NAME = _(u'PayU')
BACKEND_ACCEPTED_CURRENCY = (u'PLN',)
BACKEND_LOGO_URL = u'getpaid/backends/payu/payu_logo.png'
_GATEWAY_URL = u'https://www.platnosci.pl/paygw/'
_ACCEPTED_LANGS = (u'pl', u'en')
_REQUEST_SIG_FIELDS = (
u'pos_id', u'pay_type', u'session_id',
u'pos_auth_key', u'amount', u'desc', u'desc2', u'trsDesc', u'order_id',
u'first_name', u'last_name', u'payback_login', u'street', u'street_hn',
u'street_an', u'city', u'post_code', u'country', u'email', u'phone',
u'language', u'client_ip', u'ts'
)
_ONLINE_SIG_FIELDS = (u'pos_id', u'session_id', u'ts',)
_GET_SIG_FIELDS = (u'pos_id', u'session_id', u'ts',)
_GET_RESPONSE_SIG_FIELDS = (
u'trans_pos_id', u'trans_session_id', u'trans_order_id',
u'trans_status', u'trans_amount', u'trans_desc', u'trans_ts',)
_GET_ACCEPT_SIG_FIELDS = (u'trans_pos_id', u'trans_session_id', u'trans_ts',)
@staticmethod
def compute_sig(params, fields, key):
text = u''
for field in fields:
param_value = params.get(field, '')
text += six.text_type(param_value)
text += key
text_encoded = text.encode('utf-8')
return six.text_type(hashlib.md5(text_encoded).hexdigest())
@staticmethod
def online(pos_id, session_id, ts, sig):
params = {
u'pos_id': pos_id,
u'session_id': session_id,
u'ts': ts,
u'sig': sig
}
key2 = six.text_type(PaymentProcessor.get_backend_setting('key2'))
if sig != PaymentProcessor.compute_sig(params, PaymentProcessor._ONLINE_SIG_FIELDS, key2):
logger.warning('Got message with wrong sig, %s' % str(params))
return u'SIG ERR'
try:
params['pos_id'] = int(params['pos_id'])
except ValueError:
return u'POS_ID ERR'
if params['pos_id'] != int(PaymentProcessor.get_backend_setting('pos_id')):
return u'POS_ID ERR'
try:
payment_id, session = session_id.split(':')
except ValueError:
logger.warning(
'Got message with wrong session_id, %s' % str(params))
return u'SESSION_ID ERR'
get_payment_status_task.delay(payment_id, session_id)
return u'OK'
def get_gateway_url(self, request):
"""
Routes a payment to Gateway, should return URL for redirection.
"""
params = {
u'pos_id': PaymentProcessor.get_backend_setting('pos_id'),
u'pos_auth_key': PaymentProcessor.get_backend_setting('pos_auth_key'),
u'desc': self.get_order_description(self.payment, self.payment.order),
}
user_data = {
u'email': None,
u'lang': None,
}
signals.user_data_query.send(
sender=None, order=self.payment.order, user_data=user_data)
if user_data['email']:
params['email'] = user_data['email']
if user_data['lang'] \
and user_data['lang'].lower() in PaymentProcessor._ACCEPTED_LANGS:
params['language'] = user_data['lang'].lower()
elif PaymentProcessor.get_backend_setting('lang', False) and \
PaymentProcessor.get_backend_setting('lang').lower() in PaymentProcessor._ACCEPTED_LANGS:
params['language'] = six.text_type(PaymentProcessor.get_backend_setting('lang').lower())
key1 = six.text_type(PaymentProcessor.get_backend_setting('key1'))
signing = PaymentProcessor.get_backend_setting('signing', True)
testing = PaymentProcessor.get_backend_setting('testing', False)
if testing:
# Switch to testing mode, where payment method is set to "test payment"->"t"
# Warning: testing mode need to be enabled also in payu.pl system
# for this POS
params['pay_type'] = u't'
# Here we put payment.pk as we can get order through payment model
params['order_id'] = self.payment.pk
# amount is number of Grosz, not PLN
params['amount'] = int(self.payment.amount * 100)
params['session_id'] = u"%d:%s" % (self.payment.pk, time.time())
# Warning: please make sure that this header actually has client IP
# rather then web server proxy IP in your WSGI environment
params['client_ip'] = request.META['REMOTE_ADDR']
if signing:
params['ts'] = six.text_type(time.time())
params['sig'] = PaymentProcessor.compute_sig(
params, self._REQUEST_SIG_FIELDS, key1)
if PaymentProcessor.get_backend_setting('method', 'get').lower() == 'post':
logger.info(u'New payment using POST: %s' % params)
return self._GATEWAY_URL + 'UTF/NewPayment', 'POST', params
elif PaymentProcessor.get_backend_setting('method', 'get').lower() == 'get':
logger.info(u'New payment using GET: %s' % params)
for key in params.keys():
params[key] = six.text_type(params[key]).encode('utf-8')
return self._GATEWAY_URL + 'UTF/NewPayment?' + urlencode(params), 'GET', {}
else:
logger.error(u'New payment raises error - bad HTTP method')
raise ImproperlyConfigured(
'PayU payment backend accepts only GET or POST')
def get_payment_status(self, session_id):
params = {
u'pos_id': PaymentProcessor.get_backend_setting('pos_id'),
u'session_id': session_id,
u'ts': time.time()
}
key1 = PaymentProcessor.get_backend_setting('key1')
key2 = PaymentProcessor.get_backend_setting('key2')
params['sig'] = PaymentProcessor.compute_sig(
params, self._GET_SIG_FIELDS, key1)
for key in params.keys():
params[key] = six.text_type(params[key]).encode('utf-8')
data = six.text_type(urlencode(params)).encode('utf-8')
url = self._GATEWAY_URL + 'UTF/Payment/get/txt'
request = Request(url, data)
response = urlopen(request)
response_data = response.read().decode('utf-8')
response_params = PaymentProcessor._parse_text_response(response_data)
if not response_params['status'] == u'OK':
logger.warning(u'Payment status error: %s' % response_params)
return
if PaymentProcessor.compute_sig(response_params, self._GET_RESPONSE_SIG_FIELDS,
key2) == response_params['trans_sig']:
if not (int(response_params['trans_pos_id']) == int(params['pos_id']) or
int(response_params['trans_order_id']) == self.payment.pk):
logger.error(u'Payment status wrong pos_id and/or order id: %s' % response_params)
return
logger.info(u'Fetching payment status: %s' % response_params)
self.payment.external_id = response_params['trans_id']
status = int(response_params['trans_status'])
if status in (PayUTransactionStatus.AWAITING, PayUTransactionStatus.FINISHED):
if self.payment.on_success(Decimal(response_params['trans_amount']) / Decimal('100')):
# fully paid
if status == PayUTransactionStatus.AWAITING:
accept_payment.delay(self.payment.id, session_id)
elif status in (
PayUTransactionStatus.CANCELED,
PayUTransactionStatus.ERROR,
PayUTransactionStatus.REJECTED,
PayUTransactionStatus.REJECTED_AFTER_CANCEL):
self.payment.on_failure()
else:
logger.error(u'Payment status wrong response signature: %s' % response_params)
def accept_payment(self, session_id):
params = {
'pos_id': PaymentProcessor.get_backend_setting('pos_id'),
'session_id': session_id,
'ts': time.time()
}
key1 = PaymentProcessor.get_backend_setting('key1')
key2 = PaymentProcessor.get_backend_setting('key2')
params['sig'] = PaymentProcessor.compute_sig(
params, self._GET_SIG_FIELDS, key1)
for key in params.keys():
params[key] = six.text_type(params[key]).encode('utf-8')
data = six.text_type(urlencode(params)).encode('utf-8')
url = self._GATEWAY_URL + 'UTF/Payment/confirm/txt'
request = Request(url, data)
response = urlopen(request)
response_data = response.read().decode('utf-8')
response_params = PaymentProcessor._parse_text_response(response_data)
if response_params['status'] == 'OK':
if PaymentProcessor.compute_sig(
response_params,
self._GET_ACCEPT_SIG_FIELDS, key2) != response_params['trans_sig']:
logger.error(u'Wrong signature for Payment/confirm response: %s' % response_params)
return
if int(response_params['trans_pos_id']) != int(params['pos_id']):
logger.error(u'Wrong pos_id for Payment/confirm response: %s' % response_params)
return
logger.info(u'Payment accepted: %s' % response_params)
else:
logger.warning(u'Payment not accepted, error: %s' % response_params)
@staticmethod
def _parse_text_response(text):
"""
Parses inputs like:
variable : some value
variable2 : 123.44
into dict
"""
return dict(
map(lambda kv: (kv[0].rstrip(), kv[1].lstrip()),
filter(
lambda l: len(l) == 2,
map(lambda l: l.split(':', 1),
text.splitlines()))
)
)
|
|
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from ironic.common import boot_devices
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common import image_service
from ironic.common import keystone
from ironic.common import paths
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.openstack.common import fileutils
agent_opts = [
cfg.StrOpt('agent_pxe_append_params',
default='nofb nomodeset vga=normal',
help='Additional append parameters for baremetal PXE boot.'),
cfg.StrOpt('agent_pxe_config_template',
default=paths.basedir_def(
'drivers/modules/agent_config.template'),
help='Template file for PXE configuration.'),
cfg.StrOpt('agent_pxe_bootfile_name',
default='pxelinux.0',
help='Neutron bootfile DHCP parameter.'),
cfg.IntOpt('agent_erase_devices_priority',
help='Priority to run in-band erase devices via the Ironic '
'Python Agent ramdisk. If unset, will use the priority '
'set in the ramdisk (defaults to 10 for the '
'GenericHardwareManager). If set to 0, will not run '
'during cleaning.'),
cfg.BoolOpt('manage_tftp',
default=True,
help='Whether Ironic will manage TFTP files for the deploy '
'ramdisks. If set to False, you will need to configure '
'your own TFTP server that allows booting the deploy '
'ramdisks.'
),
]
CONF = cfg.CONF
CONF.import_opt('my_ip', 'ironic.netconf')
CONF.register_opts(agent_opts, group='agent')
LOG = log.getLogger(__name__)
REQUIRED_PROPERTIES = {
'deploy_kernel': _('UUID (from Glance) of the deployment kernel. '
'Required.'),
'deploy_ramdisk': _('UUID (from Glance) of the ramdisk with agent that is '
'used at deploy time. Required.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES
def _time():
"""Broken out for testing."""
return time.time()
def _get_client():
client = agent_client.AgentClient()
return client
def build_agent_options(node):
"""Build the options to be passed to the agent ramdisk.
:param node: an ironic node object
:returns: a dictionary containing the parameters to be passed to
agent ramdisk.
"""
ironic_api = (CONF.conductor.api_url or
keystone.get_service_url()).rstrip('/')
agent_config_opts = {
'ipa-api-url': ironic_api,
'ipa-driver-name': node.driver,
# NOTE: The below entry is a temporary workaround for bug/1433812
'coreos.configdrive': 0,
}
root_device = deploy_utils.parse_root_device_hints(node)
if root_device:
agent_config_opts['root_device'] = root_device
return agent_config_opts
def _build_pxe_config_options(node, pxe_info):
"""Builds the pxe config options for booting agent.
This method builds the config options to be replaced on
the agent pxe config template.
:param node: an ironic node object
:param pxe_info: A dict containing the 'deploy_kernel' and
'deploy_ramdisk' for the agent pxe config template.
:returns: a dict containing the options to be applied on
the agent pxe config template.
"""
agent_config_opts = {
'deployment_aki_path': pxe_info['deploy_kernel'][1],
'deployment_ari_path': pxe_info['deploy_ramdisk'][1],
'pxe_append_params': CONF.agent.agent_pxe_append_params,
}
agent_opts = build_agent_options(node)
agent_config_opts.update(agent_opts)
return agent_config_opts
def _get_tftp_image_info(node):
return pxe_utils.get_deploy_kr_info(node.uuid, node.driver_info)
@image_cache.cleanup(priority=25)
class AgentTFTPImageCache(image_cache.ImageCache):
def __init__(self, image_service=None):
super(AgentTFTPImageCache, self).__init__(
CONF.pxe.tftp_master_path,
# MiB -> B
CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
CONF.pxe.image_cache_ttl * 60,
image_service=image_service)
def _cache_tftp_images(ctx, node, pxe_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.pxe.tftp_root, node.uuid))
LOG.debug("Fetching kernel and ramdisk for node %s",
node.uuid)
deploy_utils.fetch_images(ctx, AgentTFTPImageCache(), pxe_info.values())
def build_instance_info_for_deploy(task):
"""Build instance_info necessary for deploying to a node.
:param task: a TaskManager object containing the node
:returns: a dictionary containing the properties to be updated
in instance_info
:raises: exception.ImageRefValidationFailed if image_source is not
Glance href and is not HTTP(S) URL.
"""
node = task.node
instance_info = node.instance_info
image_source = instance_info['image_source']
if service_utils.is_glance_image(image_source):
glance = image_service.GlanceImageService(version=2,
context=task.context)
image_info = glance.show(image_source)
swift_temp_url = glance.swift_temp_url(image_info)
LOG.debug('Got image info: %(info)s for node %(node)s.',
{'info': image_info, 'node': node.uuid})
instance_info['image_url'] = swift_temp_url
instance_info['image_checksum'] = image_info['checksum']
instance_info['image_disk_format'] = image_info['disk_format']
instance_info['image_container_format'] = (
image_info['container_format'])
else:
try:
image_service.HttpImageService().validate_href(image_source)
except exception.ImageRefValidationFailed:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Agent deploy supports only HTTP(S) URLs as "
"instance_info['image_source']. Either %s "
"is not a valid HTTP(S) URL or "
"is not reachable."), image_source)
instance_info['image_url'] = image_source
return instance_info
def _prepare_pxe_boot(task):
"""Prepare the files required for PXE booting the agent."""
if CONF.agent.manage_tftp:
pxe_info = _get_tftp_image_info(task.node)
pxe_options = _build_pxe_config_options(task.node, pxe_info)
pxe_utils.create_pxe_config(task,
pxe_options,
CONF.agent.agent_pxe_config_template)
_cache_tftp_images(task.context, task.node, pxe_info)
def _do_pxe_boot(task, ports=None):
"""Reboot the node into the PXE ramdisk.
:param task: a TaskManager instance
:param ports: a list of Neutron port dicts to update DHCP options on. If
None, will get the list of ports from the Ironic port objects.
"""
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts, ports)
manager_utils.node_set_boot_device(task, boot_devices.PXE, persistent=True)
manager_utils.node_power_action(task, states.REBOOT)
def _clean_up_pxe(task):
"""Clean up left over PXE and DHCP files."""
if CONF.agent.manage_tftp:
pxe_info = _get_tftp_image_info(task.node)
for label in pxe_info:
path = pxe_info[label][1]
utils.unlink_without_raise(path)
AgentTFTPImageCache().clean_up()
pxe_utils.clean_up_pxe_config(task)
class AgentDeploy(base.DeployInterface):
"""Interface for deploy-related actions."""
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return COMMON_PROPERTIES
def validate(self, task):
"""Validate the driver-specific Node deployment info.
This method validates whether the properties of the supplied node
contain the required information for this driver to deploy images to
the node.
:param task: a TaskManager instance
:raises: MissingParameterValue
"""
node = task.node
params = {}
if CONF.agent.manage_tftp:
params['driver_info.deploy_kernel'] = node.driver_info.get(
'deploy_kernel')
params['driver_info.deploy_ramdisk'] = node.driver_info.get(
'deploy_ramdisk')
image_source = node.instance_info.get('image_source')
params['instance_info.image_source'] = image_source
error_msg = _('Node %s failed to validate deploy image info. Some '
'parameters were missing') % node.uuid
deploy_utils.check_for_missing_params(params, error_msg)
if not service_utils.is_glance_image(image_source):
if not node.instance_info.get('image_checksum'):
raise exception.MissingParameterValue(_(
"image_source's image_checksum must be provided in "
"instance_info for node %s") % node.uuid)
is_whole_disk_image = node.driver_internal_info.get(
'is_whole_disk_image')
# TODO(sirushtim): Remove once IPA has support for partition images.
if is_whole_disk_image is False:
raise exception.InvalidParameterValue(_(
"Node %(node)s is configured to use the %(driver)s driver "
"which currently does not support deploying partition "
"images.") % {'node': node.uuid, 'driver': node.driver})
# Validate the root device hints
deploy_utils.parse_root_device_hints(node)
@task_manager.require_exclusive_lock
def deploy(self, task):
"""Perform a deployment to a node.
Perform the necessary work to deploy an image onto the specified node.
This method will be called after prepare(), which may have already
performed any preparatory steps, such as pre-caching some data for the
node.
:param task: a TaskManager instance.
:returns: status of the deploy. One of ironic.common.states.
"""
_do_pxe_boot(task)
return states.DEPLOYWAIT
@task_manager.require_exclusive_lock
def tear_down(self, task):
"""Tear down a previous deployment on the task's node.
:param task: a TaskManager instance.
:returns: status of the deploy. One of ironic.common.states.
"""
manager_utils.node_power_action(task, states.POWER_OFF)
return states.DELETED
def prepare(self, task):
"""Prepare the deployment environment for this node.
:param task: a TaskManager instance.
"""
node = task.node
_prepare_pxe_boot(task)
node.instance_info = build_instance_info_for_deploy(task)
node.save()
def clean_up(self, task):
"""Clean up the deployment environment for this node.
If preparation of the deployment environment ahead of time is possible,
this method should be implemented by the driver. It should erase
anything cached by the `prepare` method.
If implemented, this method must be idempotent. It may be called
multiple times for the same node on the same conductor, and it may be
called by multiple conductors in parallel. Therefore, it must not
require an exclusive lock.
This method is called before `tear_down`.
:param task: a TaskManager instance.
"""
_clean_up_pxe(task)
def take_over(self, task):
"""Take over management of this node from a dead conductor.
If conductors' hosts maintain a static relationship to nodes, this
method should be implemented by the driver to allow conductors to
perform the necessary work during the remapping of nodes to conductors
when a conductor joins or leaves the cluster.
For example, the PXE driver has an external dependency:
Neutron must forward DHCP BOOT requests to a conductor which has
prepared the tftpboot environment for the given node. When a
conductor goes offline, another conductor must change this setting
in Neutron as part of remapping that node's control to itself.
This is performed within the `takeover` method.
:param task: a TaskManager instance.
"""
pass
def get_clean_steps(self, task):
"""Get the list of clean steps from the agent.
:param task: a TaskManager object containing the node
:returns: A list of clean step dictionaries
"""
steps = deploy_utils.agent_get_clean_steps(task)
if CONF.agent.agent_erase_devices_priority is not None:
for step in steps:
if (step.get('step') == 'erase_devices' and
step.get('interface') == 'deploy'):
# Override with operator set priority
step['priority'] = CONF.agent.agent_erase_devices_priority
return steps
def execute_clean_step(self, task, step):
"""Execute a clean step asynchronously on the agent.
:param task: a TaskManager object containing the node
:param step: a clean step dictionary to execute
:raises: NodeCleaningFailure if the agent does not return a command
status
:returns: states.CLEANING to signify the step will be completed async
"""
return deploy_utils.agent_execute_clean_step(task, step)
def prepare_cleaning(self, task):
"""Boot into the agent to prepare for cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the previous cleaning ports cannot
be removed or if new cleaning ports cannot be created
:returns: states.CLEANING to signify an asynchronous prepare
"""
provider = dhcp_factory.DHCPFactory()
# If we have left over ports from a previous cleaning, remove them
if getattr(provider.provider, 'delete_cleaning_ports', None):
# Allow to raise if it fails, is caught and handled in conductor
provider.provider.delete_cleaning_ports(task)
# Create cleaning ports if necessary
ports = None
if getattr(provider.provider, 'create_cleaning_ports', None):
# Allow to raise if it fails, is caught and handled in conductor
ports = provider.provider.create_cleaning_ports(task)
_prepare_pxe_boot(task)
_do_pxe_boot(task, ports)
# Tell the conductor we are waiting for the agent to boot.
return states.CLEANING
def tear_down_cleaning(self, task):
"""Clean up the PXE and DHCP files after cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the cleaning ports cannot be
removed
"""
manager_utils.node_power_action(task, states.POWER_OFF)
_clean_up_pxe(task)
# If we created cleaning ports, delete them
provider = dhcp_factory.DHCPFactory()
if getattr(provider.provider, 'delete_cleaning_ports', None):
# Allow to raise if it fails, is caught and handled in conductor
provider.provider.delete_cleaning_ports(task)
class AgentVendorInterface(agent_base_vendor.BaseAgentVendor):
def deploy_is_done(self, task):
commands = self._client.get_commands_status(task.node)
if not commands:
return False
last_command = commands[-1]
if last_command['command_name'] != 'prepare_image':
# catches race condition where prepare_image is still processing
# so deploy hasn't started yet
return False
if last_command['command_status'] != 'RUNNING':
return True
return False
@task_manager.require_exclusive_lock
def continue_deploy(self, task, **kwargs):
task.process_event('resume')
node = task.node
image_source = node.instance_info.get('image_source')
LOG.debug('Continuing deploy for %s', node.uuid)
image_info = {
'id': image_source.split('/')[-1],
'urls': [node.instance_info['image_url']],
'checksum': node.instance_info['image_checksum'],
# NOTE(comstud): Older versions of ironic do not set
# 'disk_format' nor 'container_format', so we use .get()
# to maintain backwards compatibility in case code was
# upgraded in the middle of a build request.
'disk_format': node.instance_info.get('image_disk_format'),
'container_format': node.instance_info.get(
'image_container_format')
}
# Tell the client to download and write the image with the given args
res = self._client.prepare_image(node, image_info)
LOG.debug('prepare_image got response %(res)s for node %(node)s',
{'res': res, 'node': node.uuid})
def check_deploy_success(self, node):
# should only ever be called after we've validated that
# the prepare_image command is complete
command = self._client.get_commands_status(node)[-1]
if command['command_status'] == 'FAILED':
return command['command_error']
def reboot_to_instance(self, task, **kwargs):
node = task.node
LOG.debug('Preparing to reboot to instance for node %s',
node.uuid)
error = self.check_deploy_success(node)
if error is not None:
# TODO(jimrollenhagen) power off if using neutron dhcp to
# align with pxe driver?
msg = _('node %(node)s command status errored: %(error)s') % (
{'node': node.uuid, 'error': error})
LOG.error(msg)
deploy_utils.set_failed_state(task, msg)
return
LOG.debug('Rebooting node %s to disk', node.uuid)
manager_utils.node_set_boot_device(task, 'disk', persistent=True)
self.reboot_and_finish_deploy(task)
|
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' statemanager.py '''
import abc
import socket
import subprocess
from heron.statemgrs.src.python.log import Log as LOG
HERON_EXECUTION_STATE_PREFIX = "{0}/executionstate/"
HERON_PACKING_PLANS_PREFIX = "{0}/packingplans/"
HERON_PPLANS_PREFIX = "{0}/pplans/"
HERON_SCHEDULER_LOCATION_PREFIX = "{0}/schedulers/"
HERON_TMANAGER_PREFIX = "{0}/tmanagers/"
HERON_TOPOLOGIES_KEY = "{0}/topologies"
# pylint: disable=too-many-public-methods, attribute-defined-outside-init
class StateManager(metaclass=abc.ABCMeta):
"""
This is the abstract base class for state manager. It provides methods to get/set/delete various
state from the state store. The getters accept an optional callback, which will watch for state
changes of the object and invoke the callback when one occurs.
"""
TIMEOUT_SECONDS = 5
@property
def name(self):
return self.__name
@name.setter
def name(self, newName):
self.__name = newName
@property
def hostportlist(self):
return self.__hostportlist
@hostportlist.setter
def hostportlist(self, newHostportList):
self.__hostportlist = newHostportList
@property
def rootpath(self):
""" Getter for the path where the heron states are stored. """
return self.__hostport
@rootpath.setter
def rootpath(self, newRootPath):
""" Setter for the path where the heron states are stored. """
self.__hostport = newRootPath
@property
def tunnelhost(self):
""" Getter for the tunnelhost to create the tunnel if host is not accessible """
return self.__tunnelhost
@tunnelhost.setter
def tunnelhost(self, newTunnelHost):
""" Setter for the tunnelhost to create the tunnel if host is not accessible """
self.__tunnelhost = newTunnelHost
def __init__(self):
self.tunnel = []
def is_host_port_reachable(self):
"""
Returns true if the host is reachable. In some cases, it may not be reachable a tunnel
must be used.
"""
for hostport in self.hostportlist:
try:
socket.create_connection(hostport, StateManager.TIMEOUT_SECONDS)
return True
except:
LOG.info("StateManager %s Unable to connect to host: %s port %i"
% (self.name, hostport[0], hostport[1]))
continue
return False
# pylint: disable=no-self-use
def pick_unused_port(self):
""" Pick an unused port. There is a slight chance that this wont work. """
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('127.0.0.1', 0))
_, port = s.getsockname()
s.close()
return port
def establish_ssh_tunnel(self):
"""
Establish an ssh tunnel for each local host and port
that can be used to communicate with the state host.
"""
localportlist = []
for (host, port) in self.hostportlist:
localport = self.pick_unused_port()
self.tunnel.append(subprocess.Popen(
('ssh', self.tunnelhost, '-NL127.0.0.1:%d:%s:%d' % (localport, host, port))))
localportlist.append(('127.0.0.1', localport))
return localportlist
def terminate_ssh_tunnel(self):
for tunnel in self.tunnel:
tunnel.terminate()
@abc.abstractmethod
def start(self):
""" If the state manager needs to connect to a remote host. """
@abc.abstractmethod
def stop(self):
""" If the state manager had connected to a remote server, it would need to stop as well. """
def get_topologies_path(self):
return HERON_TOPOLOGIES_KEY.format(self.rootpath)
def get_topology_path(self, topologyName):
return HERON_TOPOLOGIES_KEY.format(self.rootpath) + "/" + topologyName
def get_packing_plan_path(self, topologyName):
return HERON_PACKING_PLANS_PREFIX.format(self.rootpath) + topologyName
def get_pplan_path(self, topologyName):
return HERON_PPLANS_PREFIX.format(self.rootpath) + topologyName
def get_execution_state_path(self, topologyName):
return HERON_EXECUTION_STATE_PREFIX.format(self.rootpath) + topologyName
def get_tmanager_path(self, topologyName):
return HERON_TMANAGER_PREFIX.format(self.rootpath) + topologyName
def get_scheduler_location_path(self, topologyName):
return HERON_SCHEDULER_LOCATION_PREFIX.format(self.rootpath) + topologyName
@abc.abstractmethod
def get_topologies(self, callback=None):
pass
@abc.abstractmethod
def get_topology(self, topologyName, callback=None):
pass
@abc.abstractmethod
def create_topology(self, topologyName, topology):
pass
@abc.abstractmethod
def delete_topology(self, topologyName):
pass
@abc.abstractmethod
def get_packing_plan(self, topologyName, callback=None):
"""
Gets the packing_plan for the topology.
If the callback is provided,
sets watch on the path and calls the callback
with the new packing_plan.
"""
@abc.abstractmethod
def get_pplan(self, topologyName, callback=None):
pass
@abc.abstractmethod
def create_pplan(self, topologyName, pplan):
pass
@abc.abstractmethod
def delete_pplan(self, topologyName):
pass
@abc.abstractmethod
def get_execution_state(self, topologyName, callback=None):
pass
@abc.abstractmethod
def create_execution_state(self, topologyName, executionState):
pass
@abc.abstractmethod
def delete_execution_state(self, topologyName):
pass
@abc.abstractmethod
def get_tmanager(self, topologyName, callback=None):
pass
@abc.abstractmethod
def get_scheduler_location(self, topologyName, callback=None):
pass
def delete_topology_from_zk(self, topologyName):
"""
Removes the topology entry from:
1. topologies list,
2. pplan,
3. execution_state, and
"""
self.delete_pplan(topologyName)
self.delete_execution_state(topologyName)
self.delete_topology(topologyName)
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Resolve interface dependencies, producing a merged IdlDefinitions object.
This library computes interface dependencies (partial interfaces and
implements), reads the dependency files, and merges them to the IdlDefinitions
for the main IDL file, producing an IdlDefinitions object representing the
entire interface.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler#TOC-Dependency-resolution
"""
import os.path
from utilities import idl_filename_to_component, is_valid_component_dependency, merge_dict_recursively
# The following extended attributes can be applied to a dependency interface,
# and are then applied to the individual members when merging.
# Note that this moves the extended attribute from the interface to the member,
# which changes the semantics and yields different code than the same extended
# attribute on the main interface.
DEPENDENCY_EXTENDED_ATTRIBUTES = frozenset([
'OriginTrialEnabled',
'RuntimeEnabled',
'SecureContext',
])
class InterfaceDependencyResolver(object):
def __init__(self, interfaces_info, reader):
"""Initialize dependency resolver.
Args:
interfaces_info:
dict of interfaces information, from compute_dependencies.py
reader:
IdlReader, used for reading dependency files
"""
self.interfaces_info = interfaces_info
self.reader = reader
def resolve_dependencies(self, definitions, component):
"""Resolve dependencies, merging them into IDL definitions of main file.
Dependencies consist of 'partial interface' for the same interface as
in the main file, and other interfaces that this interface 'implements'.
These are merged into the main IdlInterface, as the main IdlInterface
implements all these members.
Referenced interfaces are added to IdlDefinitions, but not merged into
the main IdlInterface, as these are only referenced (their members are
introspected, but not implemented in this interface).
Inherited extended attributes are also added to the main IdlInterface.
Modifies definitions in place by adding parsed dependencies.
Args:
definitions: IdlDefinitions object, modified in place
component:
string, describing where the above definitions are defined,
'core' or 'modules'. See KNOWN_COMPONENTS in utilities.py
Returns:
A dictionary whose key is component and value is IdlDefinitions
object whose dependency is resolved.
Raises:
Exception:
A given IdlDefinitions object doesn't have any interfaces,
or a given IdlDefinitions object has incorrect referenced
interfaces.
"""
# FIXME: we need to resolve dependency when we implement partial
# dictionary.
if not definitions.interfaces:
raise Exception('No need to resolve any dependencies of '
'this definition: %s, because this should '
'have a dictionary.' % definitions.idl_name)
target_interface = next(definitions.interfaces.itervalues())
interface_name = target_interface.name
interface_info = self.interfaces_info[interface_name]
if 'inherited_extended_attributes' in interface_info:
target_interface.extended_attributes.update(
interface_info['inherited_extended_attributes'])
resolved_definitions = merge_interface_dependencies(
definitions,
component,
target_interface,
interface_info['dependencies_full_paths'] +
interface_info['dependencies_other_component_full_paths'],
self.reader)
inherit_unforgeable_attributes(resolved_definitions, self.interfaces_info)
for referenced_interface_name in interface_info['referenced_interfaces']:
referenced_definitions = self.reader.read_idl_definitions(
self.interfaces_info[referenced_interface_name]['full_path'])
for referenced_component in referenced_definitions:
if not is_valid_component_dependency(component, referenced_component):
raise Exception('This definitions: %s is defined in %s '
'but reference interface:%s is defined '
'in %s' % (definitions.idl_name,
component,
referenced_interface_name,
referenced_component))
resolved_definitions[component].update(referenced_definitions[component])
return resolved_definitions
def merge_interface_dependencies(definitions, component, target_interface, dependency_idl_filenames, reader):
"""Merge dependencies ('partial interface' and 'implements') in dependency_idl_filenames into target_interface.
Args:
definitions: IdlDefinitions object, modified in place
component:
string, describing where the above definitions are defined,
'core' or 'modules'. See KNOWN_COMPONENTS in utilities.py
target_interface: IdlInterface object, modified in place
dependency_idl_filenames:
Idl filenames which depend on the above definitions.
reader: IdlReader object.
Returns:
A dictionary whose key is component and value is IdlDefinitions
object whose dependency is resolved.
"""
resolved_definitions = {component: definitions}
# Sort so order consistent, so can compare output from run to run.
for dependency_idl_filename in sorted(dependency_idl_filenames):
dependency_definitions = reader.read_idl_file(dependency_idl_filename)
dependency_component = idl_filename_to_component(dependency_idl_filename)
dependency_interface = next(dependency_definitions.interfaces.itervalues())
dependency_interface_basename, _ = os.path.splitext(os.path.basename(dependency_idl_filename))
transfer_extended_attributes(dependency_interface,
dependency_interface_basename)
# We need to use different checkdeps here for partial interface and
# inheritance.
if dependency_interface.is_partial:
# Case: dependency_interface is a partial interface of
# target_interface.
# So,
# - A partial interface defined in modules can update
# the original interface defined in core.
# However,
# - A partial interface defined in core cannot update
# the original interface defined in modules.
if not is_valid_component_dependency(dependency_component, component):
raise Exception('The partial interface:%s in %s cannot update '
'the original interface:%s in %s' % (dependency_interface.name,
dependency_component,
target_interface.name,
component))
if dependency_component in resolved_definitions:
# When merging a new partial interfaces, should not overwrite
# ImpelemntedAs extended attributes in merged partial
# interface.
# See also the below "if 'ImplementedAs' not in ... " line's
# comment.
dependency_interface.extended_attributes.pop('ImplementedAs', None)
resolved_definitions[dependency_component].update(dependency_definitions)
continue
dependency_interface.extended_attributes.update(target_interface.extended_attributes)
assert target_interface == definitions.interfaces[dependency_interface.name]
# A partial interface should use its original interface's
# ImplementedAs. If the original interface doesn't have,
# remove ImplementedAs defined in the partial interface.
# Because partial interface needs the original interface's
# cpp class to obtain partial interface's cpp class.
# e.g.. V8WindowPartial.cpp:
# DOMWindow* impl = V8Window::toImpl(holder);
# DOMWindowQuota* cppValue(DOMWindowQuota::webkitStorageInfo(impl));
# TODO(tasak): remove ImplementedAs extended attributes
# from all partial interfaces. Instead, rename all cpp/header
# files correctly. ImplementedAs should not be allowed in
# partial interfaces.
if 'ImplementedAs' not in target_interface.extended_attributes:
dependency_interface.extended_attributes.pop('ImplementedAs', None)
dependency_interface.original_interface = target_interface
target_interface.partial_interfaces.append(dependency_interface)
resolved_definitions[dependency_component] = dependency_definitions
else:
# Case: target_interface implements dependency_interface.
# So,
# - An interface defined in modules can implement some interface
# defined in core.
# In this case, we need "NoInterfaceObject" extended attribute.
# However,
# - An interface defined in core cannot implement any interface
# defined in modules.
if not is_valid_component_dependency(component, dependency_component):
raise Exception('The interface:%s in %s cannot implement '
'the interface:%s in %s.' % (dependency_interface.name,
dependency_component,
target_interface.name,
component))
if component != dependency_component and 'NoInterfaceObject' not in dependency_interface.extended_attributes:
raise Exception('The interface:%s in %s cannot implement '
'the interface:%s in %s because of '
'missing NoInterfaceObject.' % (dependency_interface.name,
dependency_component,
target_interface.name,
component))
resolved_definitions[component].update(dependency_definitions) # merges partial interfaces
# Implemented interfaces (non-partial dependencies) are also merged
# into the target interface, so Code Generator can just iterate
# over one list (and not need to handle 'implements' itself).
target_interface.merge(dependency_interface)
return resolved_definitions
def transfer_extended_attributes(dependency_interface, dependency_interface_basename):
"""Transfer extended attributes from dependency interface onto members.
Merging consists of storing certain interface-level data in extended
attributes of the *members* (because there is no separate dependency
interface post-merging).
The data storing consists of:
* moving certain extended attributes from the dependency interface
to its members (deleting the extended attribute from the interface)
* storing the C++ class of the implementation in an internal
extended attribute of each member, [PartialInterfaceImplementedAs]
No return: modifies dependency_interface in place.
"""
merged_extended_attributes = {}
for key in DEPENDENCY_EXTENDED_ATTRIBUTES:
if key not in dependency_interface.extended_attributes:
continue
merged_extended_attributes[key] = dependency_interface.extended_attributes[key]
# Remove the merged attributes from the original dependency interface.
# This ensures that if other dependency interfaces are merged onto this
# one, its extended_attributes do not leak through
# (https://crbug.com/603782).
del dependency_interface.extended_attributes[key]
# A partial interface's members are implemented as static member functions
# in a separate C++ class. This class name is stored in
# [PartialInterfaceImplementedAs] which defaults to the basename of
# dependency IDL file.
# This class name can be overridden by [ImplementedAs] on the partial
# interface definition.
#
# Note that implemented interfaces do *not* need [ImplementedAs], since
# they are implemented on the C++ object |impl| itself, just like members of
# the main interface definition, so the bindings do not need to know in
# which class implemented interfaces are implemented.
#
# Currently [LegacyTreatAsPartialInterface] can be used to have partial
# interface behavior on implemented interfaces, but this is being removed
# as legacy cruft:
# FIXME: Remove [LegacyTreatAsPartialInterface]
# http://crbug.com/360435
#
# Note that [ImplementedAs] is used with different meanings on interfaces
# and members:
# for Blink class name and function name (or constant name), respectively.
# Thus we do not want to copy this from the interface to the member, but
# instead extract it and handle it separately.
if (dependency_interface.is_partial or
'LegacyTreatAsPartialInterface' in dependency_interface.extended_attributes):
merged_extended_attributes['PartialInterfaceImplementedAs'] = (
dependency_interface.extended_attributes.pop(
'ImplementedAs', dependency_interface_basename))
def update_attributes(attributes, extras):
for key, value in extras.items():
if key not in attributes:
attributes[key] = value
for attribute in dependency_interface.attributes:
update_attributes(attribute.extended_attributes, merged_extended_attributes)
for constant in dependency_interface.constants:
update_attributes(constant.extended_attributes, merged_extended_attributes)
for operation in dependency_interface.operations:
update_attributes(operation.extended_attributes, merged_extended_attributes)
def inherit_unforgeable_attributes(resolved_definitions, interfaces_info):
"""Inherits [Unforgeable] attributes and updates the arguments accordingly.
For each interface in |resolved_definitions|, collects all [Unforgeable]
attributes in ancestor interfaces in the same component and adds them to
the interface. 'referenced_interfaces' and 'cpp_includes' in
|interfaces_info| are updated accordingly.
"""
def collect_unforgeable_attributes_in_ancestors(interface_name, component):
if not interface_name:
# unforgeable_attributes, referenced_interfaces, cpp_includes
return [], [], set()
interface = interfaces_info[interface_name]
unforgeable_attributes, referenced_interfaces, cpp_includes = collect_unforgeable_attributes_in_ancestors(interface.get('parent'), component)
this_unforgeable = interface.get('unforgeable_attributes', {}).get(component, [])
unforgeable_attributes.extend(this_unforgeable)
this_referenced = [attr.idl_type.base_type for attr in this_unforgeable
if attr.idl_type.base_type in
interface.get('referenced_interfaces', [])]
referenced_interfaces.extend(this_referenced)
cpp_includes.update(interface.get('cpp_includes', {}).get(component, {}))
return unforgeable_attributes, referenced_interfaces, cpp_includes
for component, definitions in resolved_definitions.iteritems():
for interface_name, interface in definitions.interfaces.iteritems():
interface_info = interfaces_info[interface_name]
inherited_unforgeable_attributes, referenced_interfaces, cpp_includes = collect_unforgeable_attributes_in_ancestors(interface_info.get('parent'), component)
# This loop may process the same interface many times, so it's
# possible that we're adding the same attributes twice or more.
# So check if there is a duplicate.
for attr in inherited_unforgeable_attributes:
if attr not in interface.attributes:
interface.attributes.append(attr)
referenced_interfaces.extend(interface_info.get('referenced_interfaces', []))
interface_info['referenced_interfaces'] = sorted(set(referenced_interfaces))
merge_dict_recursively(interface_info,
{'cpp_includes': {component: cpp_includes}})
|
|
from relay.warnings.models import *
import re
import time
from django.db.models.query import Q
# Search for certain kinds of races
#------------------------------------------------------------
# UI :: Search forms, etc.
#------------------------------------------------------------
from django import newforms as forms
class SearchForm(forms.Form):
lval_1 = forms.CharField(max_length=75)
file_1 = forms.CharField(max_length=100)
line_1 = forms.IntegerField()
lval_2 = forms.CharField(max_length=75)
file_2 = forms.CharField(max_length=100)
line_2 = forms.IntegerField()
#-- UI for table of links to results for each label?
#------------------------------------------------------------
# Quantifiers, "true", "false", and, or, etc.
# for building more complex filtering predicates
#------------------------------------------------------------
def exists(condition, list):
return reduce(lambda x,y,c=condition: x or c(y), list, 0)
def forall(condition, list):
return reduce(lambda x,y,c=condition: x and c(y), list, 1)
def both(c1, c2):
return lambda x: c1(x) and c2(x)
def either(c1, c2):
return lambda x: c1(x) or c2(x)
def isnt(c):
return lambda x: not c(x)
def trueP(x):
return True
def falseP(x):
return False
def all_of_list(l):
uber_cond = trueP
for (m, p) in l:
if (p) :
uber_cond = both(uber_cond, m)
else :
uber_cond = both(uber_cond, isnt(m))
return uber_cond
def any_of_list(l):
uber_cond = falseP
for (m, p) in l:
if (p) :
uber_cond = either(uber_cond, m)
else :
uber_cond = either(uber_cond, isnt(m))
return uber_cond
def filters_of_list(l):
uber_filt = {}
for f in l:
uber_filt.update(f)
return uber_filt
#------------------------------------------------------------
# Mechanism for filtering races, given a predicates
#------------------------------------------------------------
def race_matches(run, check, _depth):
""" Return all races which evaluate to true for a given check
(and from the given run) """
t = time.time()
rcs = []
this_run = Race_cluster.objects.filter(run = run)
for rc in this_run:
races = rc.races.select_related(depth=_depth)
if (exists(check, races)):
rcs.append(rc)
print "Filtered in %.3f" % (time.time() - t)
return rcs
def race_filter_match(run, include, exclude, check, _depth):
""" Return all races which are included in the search criteria (include),
and not excluded (from exclude) and are from the given run """
t = time.time()
rcs = []
this_run = Race_cluster.objects.filter(run = run)
for rc in this_run:
races = rc.races.select_related(depth=_depth).filter(**include).exclude(**exclude)
if (exists(check, races)):
rcs.append(rc)
print "Filtered in %.3f" % (time.time() - t)
return rcs
def filter_more(race_clusts, check, depth=5):
""" Given a list of races (presumably this is the result list of
a previously run filter), return the subset that pass the check """
t = time.time()
res = [r for r in race_clusts
if (exists(check, r.races.select_related(depth)))]
print "Filtered in %.3f" % (time.time() - t)
return res
#------------------------------------------------------------
# Predicates for race heuristic filters
#------------------------------------------------------------
def lval_match_pat1(pat):
return lambda race: pat.match (str(race.access1.lval.printed)) != None
def lval_match_pat2(pat):
return lambda race: pat.match (str(race.access2.lval.printed)) != None
lval_alloc_sql_re = r'.*_a[0-9]+_[0-9]+.*'
lval_alloc_re = re.compile('.*_a\d+_\d+.*')
is_alloc1 = lval_match_pat1(lval_alloc_re)
is_alloc2 = lval_match_pat2(lval_alloc_re)
def both_alloc():
filt = Q(races__access1__lval__printed__regex = lval_alloc_sql_re)
filt = filt & Q(races__access2__lval__printed__regex = lval_alloc_sql_re)
return filt
def some_alloc():
filt = Q(races__access1__lval__printed__regex = lval_alloc_sql_re)
filt = filt | Q(races__access2__lval__printed__regex = lval_alloc_sql_re)
return filt
def lvals_syntactic(race):
return str(race.access1.lval.printed) == str(race.access2.lval.printed)
#---
def fun_match(pf, pat):
if (pf):
return pat.match (str(pf.name)) != None
else:
return False
def fun_match_pat1(pat):
return lambda race: fun_match(race.access1.occurs_at.parent_function, pat)
def fun_match_pat2(pat):
return lambda race: fun_match(race.access2.occurs_at.parent_function, pat)
def fun_match_filter(pat):
filt = Q(races__access1__occurs_at__parent_function__name__regex = pat)
filt = filt & \
Q(races__access2__occurs_at__parent_function__name__regex = pat)
return filt
#---
def tr_match_pat1(pat):
return lambda race: fun_match(race.access1.accessed_through.root_function, pat)
def tr_match_pat2(pat):
return lambda race: fun_match(race.access2.accessed_through.root_function, pat)
def tr_match_filter(pat):
filt = Q(races__access1__accessed_through__root_function__name__regex = pat)
filt = filt & \
Q(races__access2__accessed_through__root_function__name__regex = pat)
return filt
#---
def uses_blob(n):
return lambda race : (race.access1.lval.rep_size > n or
race.access2.lval.rep_size > n)
# Faster, filter requiring both race clusters to only consist of accesses
# that use blobs of size < n
def uses_blob_lt_filter(n):
filt = Q(races__access1__lval__rep_size__lt = n)
filt = filt & Q(races__access2__lval__rep_size__lt = n)
return filt
#---
def uses_global(race):
return race.access1.lval.is_global or race.access2.lval.is_global
def uses_global_filter():
filt = Q(races__access1__lval__is_global=True)
filt = filt & Q(races__access2__lval__is_global=True)
return filt
#---
def with_lock(race):
return race.access1.locks.count() > 0 or race.access2.locks.count() > 0
def with_lock_filter():
filt = Q(races__access1__locks__isnull=False)
filt = filt | Q(races__access2__locks__isnull=False)
return filt
#---
def same_thread(race):
return (race.access1.accessed_through.spawn_site == race.access2.accessed_through.spawn_site) and (race.access1.accessed_through.root_function == race.access2.accessed_through.root_function)
#------------------------------------------------------------
# Search for races matching given search terms
def race_on_at(rcs, lv1=None, lv2=None, locs=[], d=5):
"""
Return race clusters on races that access the given lvals
(printed form) at some location in the given locations (filename, line)
"""
t = time.time()
filt = Q()
if (lv1 != None):
filt = filt & Q(races__access1__lval__printed__icontains = lv1)
if (lv2 != None):
filt = filt & Q(races__access2__lval__printed__icontains = lv2)
filts = Q()
for (f, l) in locs:
filts = filts | (filt & Q(races__access1__occurs_at__line_num = l,
races__access1__occurs_at__file_name__icontains = f))
filts = filts | (filt & Q(races__access2__occurs_at__line_num = l,
races__access2__occurs_at__file_name__icontains = f))
x = rcs.filter(filts).distinct()
print "Num results: %d -> %s" % (x.count(), [rc.id for rc in x])
print "Filtered in %.3f" % (time.time() - t)
return x
def race_on_exact(rcs, lv1, lv2, (f1,l1), (f2,l2), d=5):
"""
Return race clusters on races that access the given lvals
(printed form) at the exact given locations (filename, line)
"""
t = time.time()
filt = Q()
filt = filt & Q(races__access1__lval__printed__icontains = lv1)
filt = filt & Q(races__access2__lval__printed__icontains = lv2)
filts = ((filt & Q(races__access1__occurs_at__line_num = l1,
races__access1__occurs_at__file_name__icontains = f1) &
Q(races__access2__occurs_at__line_num = l2,
races__access2__occurs_at__file_name__icontains = f2)) |
(filt & Q(races__access1__occurs_at__line_num = l2,
races__access1__occurs_at__file_name__icontains = f2) &
Q(races__access2__occurs_at__line_num = l1,
races__access2__occurs_at__file_name__icontains = f1)))
x = rcs.filter(filts).distinct()
print "Num results: %d -> %s" % (x.count(), [rc.id for rc in x])
print "Filtered in %.3f" % (time.time() - t)
return x
def race_on_search(rcs, searchTerm):
lv1, lv2, locs = searchTerm
return race_on_at(rcs, lv1, lv2, locs)
def race_on_search_exact(rcs, searchTerm):
lv1, lv2, loc1, loc2 = searchTerm
return race_on_exact(rcs, lv1, lv2, loc1, loc2)
def race_on_at2(rcs, lv1=None, lv2=None, locs=[], d=5):
"""
OLD VERSION: Return race clusters on races that access the given lvals
(printed form) at some location in the given locations (filename, line)
"""
t = time.time()
filt = {}
if (lv1 != None):
filt = dict(filt, **{'access1__lval__printed__icontains' : lv1})
if (lv2 != None):
filt = dict(filt, **{'access2__lval__printed__icontains' : lv2})
filts = [dict({'access1__occurs_at__line_num' : l,
'access1__occurs_at__file_name__icontains' : f})
for (f, l) in locs]
filts = filts + [dict({'access2__occurs_at__line_num' : l,
'access2__occurs_at__file_name__icontains' : f})
for (f, l) in locs]
results = []
found = False # hack because continue restarts inner loop
for r in rcs:
found = False
rs = r.races.select_related().filter(**filt)
if (rs.count() <= 0):
continue
for f in filts:
if(found):
continue
rs = rs.filter(**f)
if (rs.count() > 0):
results.append(r)
found = True
continue
print "Filtered in %.3f" % (time.time() - t)
return results
#------------- TESTS -------------
"""
from relay.warnings.search import *
from relay.warnings.models import *
r = Run.objects.get(id=66)
rcs = Race_cluster.objects.filter(run=r)
results = race_on_at(rcs, "shift_state", "shift_state",
[("drivers/char/keyboard.c", 385), ("drivers/char/keyboard.c", 361)])
len(results)
>>> 3
results = race_on_at(rcs, "app_abort_code", "app_abort_code",
[("net/rxrpc/call.c", 102),
("net/rxrpc/call.c", 889)])
len(results)
>>> 2
"""
|
|
from django.contrib.auth.models import User
from djblets.webapi.errors import PERMISSION_DENIED
from djblets.webapi.testing.decorators import webapi_test_template
from reviewboard.reviews.models import GeneralComment
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (
general_comment_item_mimetype,
general_comment_list_mimetype)
from reviewboard.webapi.tests.mixins import (
BasicTestsMetaclass,
ReviewRequestChildItemMixin,
ReviewRequestChildListMixin)
from reviewboard.webapi.tests.mixins_comment import (
CommentItemMixin,
CommentListMixin)
from reviewboard.webapi.tests.urls import (
get_review_general_comment_item_url,
get_review_general_comment_list_url)
class BaseTestCase(BaseWebAPITestCase):
fixtures = ['test_users']
def _create_general_review_with_issue(self, publish=False,
comment_text=None):
"""Sets up a review for a general comment that includes an open issue.
If `publish` is True, the review is published. The review request is
always published.
Returns the response from posting the comment, the review object, and
the review request object.
"""
if not comment_text:
comment_text = 'Test general comment with an opened issue'
review_request = self.create_review_request(publish=True,
submitter=self.user)
review = self.create_review(review_request, user=self.user,
publish=publish)
comment = self.create_general_comment(review, comment_text,
issue_opened=True)
return comment, review, review_request
class ResourceListTests(CommentListMixin, ReviewRequestChildListMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewGeneralCommentResource list APIs."""
sample_api_url = 'review-requests/<id>/reviews/<id>/general-comments/'
resource = resources.review_general_comment
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, user=self.user)
return (get_review_general_comment_list_url(review),
general_comment_list_mimetype)
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user)
if populate_items:
items = [self.create_general_comment(review)]
else:
items = []
return (get_review_general_comment_list_url(review,
local_site_name),
general_comment_list_mimetype,
items)
#
# HTTP POST tests
#
def setup_basic_post_test(self, user, with_local_site, local_site_name,
post_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user)
return (get_review_general_comment_list_url(review,
local_site_name),
general_comment_item_mimetype,
{
'text': 'Test comment',
},
[review])
def check_post_result(self, user, rsp, review):
comment = \
GeneralComment.objects.get(pk=rsp['general_comment']['id'])
self.compare_item(rsp['general_comment'], comment)
def test_post_with_issue(self):
"""Testing the
POST review-requests/<id>/reviews/<id>/general-comments/ API
with an issue
"""
comment_text = "Test general comment with an opened issue"
comment, review, review_request = \
self._create_general_review_with_issue(
publish=False, comment_text=comment_text)
rsp = self.api_get(
get_review_general_comment_list_url(review),
expected_mimetype=general_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn('general_comments', rsp)
self.assertEqual(len(rsp['general_comments']), 1)
self.assertEqual(rsp['general_comments'][0]['text'], comment_text)
self.assertTrue(rsp['general_comments'][0]['issue_opened'])
@webapi_test_template
def test_post_with_non_review_owner(self):
"""Testing the POST <URL> API as non-owner of review"""
review_request = self.create_review_request(publish=True,
submitter=self.user)
review = self.create_review(review_request,
user=self.user)
self.assertNotEqual(self.user.username, 'doc')
self.client.login(username='doc', password='doc')
rsp = self.api_post(
get_review_general_comment_list_url(review),
{
'text': 'Test',
},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
class ResourceItemTests(CommentItemMixin, ReviewRequestChildItemMixin,
BaseTestCase, metaclass=BasicTestsMetaclass):
"""Testing the ReviewGeneralCommentResource item APIs."""
fixtures = ['test_users']
sample_api_url = \
'review-requests/<id>/reviews/<id>/general-comments/<id>/'
resource = resources.review_general_comment
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
self.assertEqual(item_rsp['extra_data'], comment.extra_data)
if comment.rich_text:
self.assertEqual(item_rsp['text_type'], 'markdown')
else:
self.assertEqual(item_rsp['text_type'], 'plain')
def setup_review_request_child_test(self, review_request):
review = self.create_review(review_request, user=self.user)
comment = self.create_general_comment(review)
return (get_review_general_comment_item_url(review, comment.pk),
general_comment_item_mimetype)
#
# HTTP DELETE tests
#
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user)
comment = self.create_general_comment(review)
return (get_review_general_comment_item_url(review, comment.pk,
local_site_name),
[comment, review])
def check_delete_result(self, user, comment, review):
self.assertNotIn(comment, review.general_comments.all())
def test_delete_with_does_not_exist_error(self):
"""Testing the
DELETE review-requests/<id>/reviews/<id>/general-comments/<id>/ API
with Does Not Exist error
"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, user=self.user)
self.api_delete(get_review_general_comment_item_url(review, 123),
expected_status=404)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user)
comment = self.create_general_comment(review)
return (get_review_general_comment_item_url(review, comment.pk,
local_site_name),
general_comment_item_mimetype,
comment)
#
# HTTP PUT tests
#
def setup_basic_put_test(self, user, with_local_site, local_site_name,
put_valid_data):
review_request = self.create_review_request(
with_local_site=with_local_site,
submitter=user,
publish=True)
review = self.create_review(review_request, user=user)
comment = self.create_general_comment(review)
return (get_review_general_comment_item_url(review, comment.pk,
local_site_name),
general_comment_item_mimetype,
{
'text': 'Test comment',
},
comment,
[])
def check_put_result(self, user, item_rsp, comment, *args):
comment = GeneralComment.objects.get(pk=comment.pk)
self.assertEqual(item_rsp['text_type'], 'plain')
self.assertEqual(item_rsp['text'], 'Test comment')
self.compare_item(item_rsp, comment)
def test_put_with_issue(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/general-comments/<id>/ API
with an issue, removing issue_opened
"""
comment, review, review_request = \
self._create_general_review_with_issue()
rsp = self.api_put(
get_review_general_comment_item_url(review, comment.pk),
{'issue_opened': False},
expected_mimetype=general_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertFalse(rsp['general_comment']['issue_opened'])
def test_put_issue_status_before_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/general-comments/<id> API
with an issue, before review is published
"""
comment, review, review_request = \
self._create_general_review_with_issue()
# The issue_status should not be able to be changed while the review is
# unpublished.
rsp = self.api_put(
get_review_general_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=general_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
# The issue_status should still be "open"
self.assertEqual(rsp['general_comment']['issue_status'], 'open')
def test_put_issue_status_after_publish(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/general-comments/<id>/ API
with an issue, after review is published
"""
comment, review, review_request = \
self._create_general_review_with_issue(publish=True)
rsp = self.api_put(
get_review_general_comment_item_url(review, comment.pk),
{'issue_status': 'resolved'},
expected_mimetype=general_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['general_comment']['issue_status'], 'resolved')
def test_put_issue_status_by_issue_creator(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/general-comments/<id>/ API
permissions for issue creator
"""
comment, review, review_request = \
self._create_general_review_with_issue(publish=True)
# Change the owner of the review request so that it's not owned by
# self.user
review_request.submitter = User.objects.get(username='doc')
review_request.save()
# The review/comment (and therefore issue) is still owned by self.user,
# so we should be able to change the issue status.
rsp = self.api_put(
get_review_general_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_mimetype=general_comment_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['general_comment']['issue_status'], 'dropped')
def test_put_issue_status_by_uninvolved_user(self):
"""Testing the
PUT review-requests/<id>/reviews/<id>/general-comments/<id>/ API
permissions for an uninvolved user
"""
comment, review, review_request = \
self._create_general_review_with_issue(publish=True)
# Change the owner of the review request and review so that they're not
# owned by self.user.
new_owner = User.objects.get(username='doc')
review_request.submitter = new_owner
review_request.save()
review.user = new_owner
review.save()
rsp = self.api_put(
get_review_general_comment_item_url(review, comment.pk),
{'issue_status': 'dropped'},
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
|
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_base_info
except ImportError:
bt_base_info = sys.modules["onshape_client.oas.models.bt_base_info"]
try:
from onshape_client.oas.models import bt_document_info
except ImportError:
bt_document_info = sys.modules["onshape_client.oas.models.bt_document_info"]
try:
from onshape_client.oas.models import bt_document_label_info
except ImportError:
bt_document_label_info = sys.modules[
"onshape_client.oas.models.bt_document_label_info"
]
try:
from onshape_client.oas.models import bt_document_processing_info_all_of
except ImportError:
bt_document_processing_info_all_of = sys.modules[
"onshape_client.oas.models.bt_document_processing_info_all_of"
]
try:
from onshape_client.oas.models import bt_owner_info
except ImportError:
bt_owner_info = sys.modules["onshape_client.oas.models.bt_owner_info"]
try:
from onshape_client.oas.models import bt_thumbnail_info
except ImportError:
bt_thumbnail_info = sys.modules["onshape_client.oas.models.bt_thumbnail_info"]
try:
from onshape_client.oas.models import bt_user_basic_summary_info
except ImportError:
bt_user_basic_summary_info = sys.modules[
"onshape_client.oas.models.bt_user_basic_summary_info"
]
try:
from onshape_client.oas.models import bt_workspace_info
except ImportError:
bt_workspace_info = sys.modules["onshape_client.oas.models.bt_workspace_info"]
class BTDocumentProcessingInfo(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("permission",): {
"NOACCESS": "NOACCESS",
"ANONYMOUS_ACCESS": "ANONYMOUS_ACCESS",
"READ": "READ",
"READ_COPY_EXPORT": "READ_COPY_EXPORT",
"COMMENT": "COMMENT",
"WRITE": "WRITE",
"RESHARE": "RESHARE",
"FULL": "FULL",
"OWNER": "OWNER",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"translation_event_key": (str,), # noqa: E501
"translation_id": (str,), # noqa: E501
"anonymous_access_allowed": (bool,), # noqa: E501
"anonymous_allows_export": (bool,), # noqa: E501
"beta_capability_ids": ([str],), # noqa: E501
"can_move": (bool,), # noqa: E501
"can_unshare": (bool,), # noqa: E501
"created_at": (datetime,), # noqa: E501
"created_by": (
bt_user_basic_summary_info.BTUserBasicSummaryInfo,
), # noqa: E501
"created_with_education_plan": (bool,), # noqa: E501
"default_element_id": (str,), # noqa: E501
"default_workspace": (bt_workspace_info.BTWorkspaceInfo,), # noqa: E501
"description": (str,), # noqa: E501
"document_labels": (
[bt_document_label_info.BTDocumentLabelInfo],
), # noqa: E501
"document_thumbnail_element_id": (str,), # noqa: E501
"duplicate_name_violation_error": (str,), # noqa: E501
"has_release_revisionable_objects": (bool,), # noqa: E501
"has_relevant_insertables": (bool,), # noqa: E501
"href": (str,), # noqa: E501
"id": (str,), # noqa: E501
"is_container": (bool,), # noqa: E501
"is_enterprise_owned": (bool,), # noqa: E501
"is_mutable": (bool,), # noqa: E501
"is_orphaned": (bool,), # noqa: E501
"is_upgraded_to_latest_version": (bool,), # noqa: E501
"is_using_managed_workflow": (bool,), # noqa: E501
"json_type": (str,), # noqa: E501
"liked_by_current_user": (bool,), # noqa: E501
"likes": (int,), # noqa: E501
"modified_at": (datetime,), # noqa: E501
"modified_by": (
bt_user_basic_summary_info.BTUserBasicSummaryInfo,
), # noqa: E501
"name": (str,), # noqa: E501
"not_revision_managed": (bool,), # noqa: E501
"number_of_times_copied": (int,), # noqa: E501
"number_of_times_referenced": (int,), # noqa: E501
"owner": (bt_owner_info.BTOwnerInfo,), # noqa: E501
"parent_id": (str,), # noqa: E501
"permission": (str,), # noqa: E501
"permission_set": ([str],), # noqa: E501
"project_id": (str,), # noqa: E501
"public": (bool,), # noqa: E501
"recent_version": (bt_base_info.BTBaseInfo,), # noqa: E501
"resource_type": (str,), # noqa: E501
"support_team_user_and_shared": (bool,), # noqa: E501
"tags": ([str],), # noqa: E501
"thumbnail": (bt_thumbnail_info.BTThumbnailInfo,), # noqa: E501
"total_workspaces_scheduled_for_update": (int,), # noqa: E501
"total_workspaces_updating": (int,), # noqa: E501
"trash": (bool,), # noqa: E501
"trashed_at": (datetime,), # noqa: E501
"tree_href": (str,), # noqa: E501
"user_account_limits_breached": (bool,), # noqa: E501
"view_ref": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"translation_event_key": "translationEventKey", # noqa: E501
"translation_id": "translationId", # noqa: E501
"anonymous_access_allowed": "anonymousAccessAllowed", # noqa: E501
"anonymous_allows_export": "anonymousAllowsExport", # noqa: E501
"beta_capability_ids": "betaCapabilityIds", # noqa: E501
"can_move": "canMove", # noqa: E501
"can_unshare": "canUnshare", # noqa: E501
"created_at": "createdAt", # noqa: E501
"created_by": "createdBy", # noqa: E501
"created_with_education_plan": "createdWithEducationPlan", # noqa: E501
"default_element_id": "defaultElementId", # noqa: E501
"default_workspace": "defaultWorkspace", # noqa: E501
"description": "description", # noqa: E501
"document_labels": "documentLabels", # noqa: E501
"document_thumbnail_element_id": "documentThumbnailElementId", # noqa: E501
"duplicate_name_violation_error": "duplicateNameViolationError", # noqa: E501
"has_release_revisionable_objects": "hasReleaseRevisionableObjects", # noqa: E501
"has_relevant_insertables": "hasRelevantInsertables", # noqa: E501
"href": "href", # noqa: E501
"id": "id", # noqa: E501
"is_container": "isContainer", # noqa: E501
"is_enterprise_owned": "isEnterpriseOwned", # noqa: E501
"is_mutable": "isMutable", # noqa: E501
"is_orphaned": "isOrphaned", # noqa: E501
"is_upgraded_to_latest_version": "isUpgradedToLatestVersion", # noqa: E501
"is_using_managed_workflow": "isUsingManagedWorkflow", # noqa: E501
"json_type": "jsonType", # noqa: E501
"liked_by_current_user": "likedByCurrentUser", # noqa: E501
"likes": "likes", # noqa: E501
"modified_at": "modifiedAt", # noqa: E501
"modified_by": "modifiedBy", # noqa: E501
"name": "name", # noqa: E501
"not_revision_managed": "notRevisionManaged", # noqa: E501
"number_of_times_copied": "numberOfTimesCopied", # noqa: E501
"number_of_times_referenced": "numberOfTimesReferenced", # noqa: E501
"owner": "owner", # noqa: E501
"parent_id": "parentId", # noqa: E501
"permission": "permission", # noqa: E501
"permission_set": "permissionSet", # noqa: E501
"project_id": "projectId", # noqa: E501
"public": "public", # noqa: E501
"recent_version": "recentVersion", # noqa: E501
"resource_type": "resourceType", # noqa: E501
"support_team_user_and_shared": "supportTeamUserAndShared", # noqa: E501
"tags": "tags", # noqa: E501
"thumbnail": "thumbnail", # noqa: E501
"total_workspaces_scheduled_for_update": "totalWorkspacesScheduledForUpdate", # noqa: E501
"total_workspaces_updating": "totalWorkspacesUpdating", # noqa: E501
"trash": "trash", # noqa: E501
"trashed_at": "trashedAt", # noqa: E501
"tree_href": "treeHref", # noqa: E501
"user_account_limits_breached": "userAccountLimitsBreached", # noqa: E501
"view_ref": "viewRef", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_document_processing_info.BTDocumentProcessingInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
translation_event_key (str): [optional] # noqa: E501
translation_id (str): [optional] # noqa: E501
anonymous_access_allowed (bool): [optional] # noqa: E501
anonymous_allows_export (bool): [optional] # noqa: E501
beta_capability_ids ([str]): [optional] # noqa: E501
can_move (bool): [optional] # noqa: E501
can_unshare (bool): [optional] # noqa: E501
created_at (datetime): [optional] # noqa: E501
created_by (bt_user_basic_summary_info.BTUserBasicSummaryInfo): [optional] # noqa: E501
created_with_education_plan (bool): [optional] # noqa: E501
default_element_id (str): [optional] # noqa: E501
default_workspace (bt_workspace_info.BTWorkspaceInfo): [optional] # noqa: E501
description (str): [optional] # noqa: E501
document_labels ([bt_document_label_info.BTDocumentLabelInfo]): [optional] # noqa: E501
document_thumbnail_element_id (str): [optional] # noqa: E501
duplicate_name_violation_error (str): [optional] # noqa: E501
has_release_revisionable_objects (bool): [optional] # noqa: E501
has_relevant_insertables (bool): [optional] # noqa: E501
href (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
is_container (bool): [optional] # noqa: E501
is_enterprise_owned (bool): [optional] # noqa: E501
is_mutable (bool): [optional] # noqa: E501
is_orphaned (bool): [optional] # noqa: E501
is_upgraded_to_latest_version (bool): [optional] # noqa: E501
is_using_managed_workflow (bool): [optional] # noqa: E501
json_type (str): [optional] # noqa: E501
liked_by_current_user (bool): [optional] # noqa: E501
likes (int): [optional] # noqa: E501
modified_at (datetime): [optional] # noqa: E501
modified_by (bt_user_basic_summary_info.BTUserBasicSummaryInfo): [optional] # noqa: E501
name (str): [optional] # noqa: E501
not_revision_managed (bool): [optional] # noqa: E501
number_of_times_copied (int): [optional] # noqa: E501
number_of_times_referenced (int): [optional] # noqa: E501
owner (bt_owner_info.BTOwnerInfo): [optional] # noqa: E501
parent_id (str): [optional] # noqa: E501
permission (str): [optional] # noqa: E501
permission_set ([str]): [optional] # noqa: E501
project_id (str): [optional] # noqa: E501
public (bool): [optional] # noqa: E501
recent_version (bt_base_info.BTBaseInfo): [optional] # noqa: E501
resource_type (str): [optional] # noqa: E501
support_team_user_and_shared (bool): [optional] # noqa: E501
tags ([str]): [optional] # noqa: E501
thumbnail (bt_thumbnail_info.BTThumbnailInfo): [optional] # noqa: E501
total_workspaces_scheduled_for_update (int): [optional] # noqa: E501
total_workspaces_updating (int): [optional] # noqa: E501
trash (bool): [optional] # noqa: E501
trashed_at (datetime): [optional] # noqa: E501
tree_href (str): [optional] # noqa: E501
user_account_limits_breached (bool): [optional] # noqa: E501
view_ref (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_document_info.BTDocumentInfo,
bt_document_processing_info_all_of.BTDocumentProcessingInfoAllOf,
],
"oneOf": [],
}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import iso8601
from lxml import etree
from oslo.config import cfg
import webob
from nova.api.openstack import compute
from nova.api.openstack.compute import extensions as compute_extensions
from nova.api.openstack import extensions as base_extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova.openstack.common import jsonutils
import nova.policy
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
CONF = cfg.CONF
NS = "{http://docs.openstack.org/common/api/v1.0}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
response_body = "Try to say this Mr. Knox, sir..."
extension_body = "I am not a fox!"
class StubController(object):
def __init__(self, body):
self.body = body
def index(self, req):
return self.body
def create(self, req):
msg = 'All aboard the fail train!'
raise webob.exc.HTTPBadRequest(explanation=msg)
def show(self, req, id):
raise webob.exc.HTTPNotFound()
class StubActionController(wsgi.Controller):
def __init__(self, body):
self.body = body
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return self.body
class StubControllerExtension(base_extensions.ExtensionDescriptor):
name = 'twaadle'
def __init__(self):
pass
class StubEarlyExtensionController(wsgi.Controller):
def __init__(self, body):
self.body = body
@wsgi.extends
def index(self, req):
yield self.body
@wsgi.extends(action='fooAction')
def _action_foo(self, req, id, body):
yield self.body
class StubLateExtensionController(wsgi.Controller):
def __init__(self, body):
self.body = body
@wsgi.extends
def index(self, req, resp_obj):
return self.body
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp_obj, id, body):
return self.body
class StubExtensionManager(object):
"""Provides access to Tweedle Beetles."""
name = "Tweedle Beetle Extension"
alias = "TWDLBETL"
def __init__(self, resource_ext=None, action_ext=None, request_ext=None,
controller_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
self.request_ext = request_ext
self.controller_ext = controller_ext
self.extra_resource_ext = None
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
if self.extra_resource_ext:
resource_exts.append(self.extra_resource_ext)
return resource_exts
def get_actions(self):
action_exts = []
if self.action_ext:
action_exts.append(self.action_ext)
return action_exts
def get_request_extensions(self):
request_extensions = []
if self.request_ext:
request_extensions.append(self.request_ext)
return request_extensions
def get_controller_extensions(self):
controller_extensions = []
if self.controller_ext:
controller_extensions.append(self.controller_ext)
return controller_extensions
class ExtensionTestCase(test.TestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
ext_list = CONF.osapi_compute_extension[:]
fox = ('nova.tests.api.openstack.compute.extensions.'
'foxinsocks.Foxinsocks')
if fox not in ext_list:
ext_list.append(fox)
self.flags(osapi_compute_extension=ext_list)
self.fake_context = nova.context.RequestContext('fake', 'fake')
def test_extension_authorizer_throws_exception_if_policy_fails(self):
target = {'project_id': '1234',
'user_id': '5678'}
self.mox.StubOutWithMock(nova.policy, 'enforce')
nova.policy.enforce(self.fake_context,
"compute_extension:used_limits_for_admin",
target).AndRaise(
exception.PolicyNotAuthorized(
action="compute_extension:used_limits_for_admin"))
('compute', 'used_limits_for_admin')
self.mox.ReplayAll()
authorize = base_extensions.extension_authorizer('compute',
'used_limits_for_admin'
)
self.assertRaises(exception.PolicyNotAuthorized, authorize,
self.fake_context, target=target)
class ExtensionControllerTest(ExtensionTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.ext_list = [
"AdminActions",
"Aggregates",
"AvailabilityZone",
"Agents",
"Certificates",
"Cloudpipe",
"CloudpipeUpdate",
"ConsoleOutput",
"Consoles",
"Createserverext",
"DeferredDelete",
"DiskConfig",
"ExtendedAvailabilityZone",
"ExtendedFloatingIps",
"ExtendedIps",
"ExtendedIpsMac",
"ExtendedVIFNet",
"Evacuate",
"ExtendedStatus",
"ExtendedVolumes",
"ExtendedServerAttributes",
"FixedIPs",
"FlavorAccess",
"FlavorDisabled",
"FlavorExtraSpecs",
"FlavorExtraData",
"FlavorManage",
"FlavorRxtx",
"FlavorSwap",
"FloatingIps",
"FloatingIpDns",
"FloatingIpPools",
"FloatingIpsBulk",
"Fox In Socks",
"Hosts",
"ImageSize",
"InstanceActions",
"Keypairs",
"Multinic",
"MultipleCreate",
"QuotaClasses",
"Quotas",
"ExtendedQuotas",
"Rescue",
"SchedulerHints",
"SecurityGroupDefaultRules",
"SecurityGroups",
"ServerDiagnostics",
"ServerPassword",
"ServerStartStop",
"Services",
"SimpleTenantUsage",
"UsedLimits",
"UserData",
"VirtualInterfaces",
"VolumeAttachmentUpdate",
"Volumes",
]
self.ext_list.sort()
def test_list_extensions_json(self):
app = compute.APIRouter(init_only=('extensions',))
request = webob.Request.blank("/fake/extensions")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
# Make sure we have all the extensions, extra extensions being OK.
data = jsonutils.loads(response.body)
names = [str(x['name']) for x in data['extensions']
if str(x['name']) in self.ext_list]
names.sort()
self.assertEqual(names, self.ext_list)
# Ensure all the timestamps are valid according to iso8601
for ext in data['extensions']:
iso8601.parse_date(ext['updated'])
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
self.assertEqual(fox_ext, {
'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
'name': 'Fox In Socks',
'updated': '2011-01-22T13:25:27-06:00',
'description': 'The Fox In Socks Extension.',
'alias': 'FOXNSOX',
'links': []
},
)
for ext in data['extensions']:
url = '/fake/extensions/%s' % ext['alias']
request = webob.Request.blank(url)
response = request.get_response(app)
output = jsonutils.loads(response.body)
self.assertEqual(output['extension']['alias'], ext['alias'])
def test_get_extension_json(self):
app = compute.APIRouter(init_only=('extensions',))
request = webob.Request.blank("/fake/extensions/FOXNSOX")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
data = jsonutils.loads(response.body)
self.assertEqual(data['extension'], {
"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
"name": "Fox In Socks",
"updated": "2011-01-22T13:25:27-06:00",
"description": "The Fox In Socks Extension.",
"alias": "FOXNSOX",
"links": []})
def test_get_non_existing_extension_json(self):
app = compute.APIRouter(init_only=('extensions',))
request = webob.Request.blank("/fake/extensions/4")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
def test_list_extensions_xml(self):
app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions'))
request = webob.Request.blank("/fake/extensions")
request.accept = "application/xml"
response = request.get_response(app)
self.assertEqual(200, response.status_int)
root = etree.XML(response.body)
self.assertEqual(root.tag.split('extensions')[0], NS)
# Make sure we have all the extensions, extras extensions being OK.
exts = root.findall('{0}extension'.format(NS))
self.assert_(len(exts) >= len(self.ext_list))
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
self.assertEqual(fox_ext.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension.')
xmlutil.validate_schema(root, 'extensions')
def test_get_extension_xml(self):
app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions'))
request = webob.Request.blank("/fake/extensions/FOXNSOX")
request.accept = "application/xml"
response = request.get_response(app)
self.assertEqual(200, response.status_int)
xml = response.body
root = etree.XML(xml)
self.assertEqual(root.tag.split('extension')[0], NS)
self.assertEqual(root.get('alias'), 'FOXNSOX')
self.assertEqual(root.get('name'), 'Fox In Socks')
self.assertEqual(root.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(root.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension.')
xmlutil.validate_schema(root, 'extension')
class ResourceExtensionTest(ExtensionTestCase):
def test_no_extension_present(self):
manager = StubExtensionManager(None)
app = compute.APIRouter(manager)
request = webob.Request.blank("/blah")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
def test_get_resources(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_get_resources_with_controller(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_bad_request(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
request.method = "POST"
response = request.get_response(app)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"badRequest": {
"message": "All aboard the fail train!",
"code": 400
}
}
self.assertThat(expected, matchers.DictMatches(body))
def test_non_exist_resource(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles/1")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"itemNotFound": {
"message": "The resource could not be found.",
"code": 404
}
}
self.assertThat(expected, matchers.DictMatches(body))
class InvalidExtension(object):
alias = "THIRD"
class ExtensionManagerTest(ExtensionTestCase):
response_body = "Try to say this Mr. Knox, sir..."
def test_get_resources(self):
app = compute.APIRouter()
request = webob.Request.blank("/fake/foxnsocks")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_invalid_extensions(self):
# Don't need the serialization middleware here because we're
# not testing any serialization
app = compute.APIRouter()
ext_mgr = compute_extensions.ExtensionManager()
ext_mgr.register(InvalidExtension())
self.assertTrue(ext_mgr.is_loaded('FOXNSOX'))
self.assertFalse(ext_mgr.is_loaded('THIRD'))
class ActionExtensionTest(ExtensionTestCase):
def _send_server_action_request(self, url, body):
app = compute.APIRouter(init_only=('servers',))
request = webob.Request.blank(url)
request.method = 'POST'
request.content_type = 'application/json'
request.body = jsonutils.dumps(body)
response = request.get_response(app)
return response
def test_extended_action(self):
body = dict(add_tweedle=dict(name="test"))
url = "/fake/servers/abcd/action"
response = self._send_server_action_request(url, body)
self.assertEqual(200, response.status_int)
self.assertEqual("Tweedle Beetle Added.", response.body)
body = dict(delete_tweedle=dict(name="test"))
response = self._send_server_action_request(url, body)
self.assertEqual(200, response.status_int)
self.assertEqual("Tweedle Beetle Deleted.", response.body)
def test_invalid_action(self):
body = dict(blah=dict(name="test")) # Doesn't exist
url = "/fake/servers/abcd/action"
response = self._send_server_action_request(url, body)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"badRequest": {
"message": "There is no such action: blah",
"code": 400
}
}
self.assertThat(expected, matchers.DictMatches(body))
def test_non_exist_action(self):
body = dict(blah=dict(name="test"))
url = "/fake/fdsa/1/action"
response = self._send_server_action_request(url, body)
self.assertEqual(404, response.status_int)
def test_failed_action(self):
body = dict(fail=dict(name="test"))
url = "/fake/servers/abcd/action"
response = self._send_server_action_request(url, body)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"badRequest": {
"message": "Tweedle fail",
"code": 400
}
}
self.assertThat(expected, matchers.DictMatches(body))
class RequestExtensionTest(ExtensionTestCase):
def test_get_resources_with_stub_mgr(self):
class GooGoose(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
# only handle JSON responses
resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
req_ext = base_extensions.ControllerExtension(
StubControllerExtension(), 'flavors', GooGoose())
manager = StubExtensionManager(None, None, None, req_ext)
app = fakes.wsgi_app(ext_mgr=manager)
request = webob.Request.blank("/v2/fake/flavors/1?chewing=bluegoo")
request.environ['api.version'] = '2'
response = request.get_response(app)
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('bluegoo', response_data['flavor']['googoose'])
def test_get_resources_with_mgr(self):
app = fakes.wsgi_app(init_only=('flavors',))
request = webob.Request.blank("/v2/fake/flavors/1?chewing=newblue")
request.environ['api.version'] = '2'
response = request.get_response(app)
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('newblue', response_data['flavor']['googoose'])
self.assertEqual("Pig Bands!", response_data['big_bands'])
class ControllerExtensionTest(ExtensionTestCase):
def test_controller_extension_early(self):
controller = StubController(response_body)
res_ext = base_extensions.ResourceExtension('tweedles', controller)
ext_controller = StubEarlyExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_extension_late(self):
# Need a dict for the body to convert to a ResponseObject
controller = StubController(dict(foo=response_body))
res_ext = base_extensions.ResourceExtension('tweedles', controller)
ext_controller = StubLateExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_extension_late_inherited_resource(self):
# Need a dict for the body to convert to a ResponseObject
controller = StubController(dict(foo=response_body))
parent_ext = base_extensions.ResourceExtension('tweedles', controller)
ext_controller = StubLateExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=parent_ext,
controller_ext=cont_ext)
child_ext = base_extensions.ResourceExtension('beetles', controller,
inherits='tweedles')
manager.extra_resource_ext = child_ext
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/beetles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_action_extension_early(self):
controller = StubActionController(response_body)
actions = dict(action='POST')
res_ext = base_extensions.ResourceExtension('tweedles', controller,
member_actions=actions)
ext_controller = StubEarlyExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles/foo/action")
request.method = 'POST'
request.headers['Content-Type'] = 'application/json'
request.body = jsonutils.dumps(dict(fooAction=True))
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_action_extension_late(self):
# Need a dict for the body to convert to a ResponseObject
controller = StubActionController(dict(foo=response_body))
actions = dict(action='POST')
res_ext = base_extensions.ResourceExtension('tweedles', controller,
member_actions=actions)
ext_controller = StubLateExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles/foo/action")
request.method = 'POST'
request.headers['Content-Type'] = 'application/json'
request.body = jsonutils.dumps(dict(fooAction=True))
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
class ExtensionsXMLSerializerTest(test.TestCase):
def test_serialize_extension(self):
serializer = base_extensions.ExtensionTemplate()
data = {'extension': {
'name': 'ext1',
'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0',
'alias': 'RS-PIE',
'updated': '2011-01-22T13:25:27-06:00',
'description': 'Adds the capability to share an image.',
'links': [{'rel': 'describedby',
'type': 'application/pdf',
'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'},
{'rel': 'describedby',
'type': 'application/vnd.sun.wadl+xml',
'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}}
xml = serializer.serialize(data)
root = etree.XML(xml)
ext_dict = data['extension']
self.assertEqual(root.findtext('{0}description'.format(NS)),
ext_dict['description'])
for key in ['name', 'namespace', 'alias', 'updated']:
self.assertEqual(root.get(key), ext_dict[key])
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(ext_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
xmlutil.validate_schema(root, 'extension')
def test_serialize_extensions(self):
serializer = base_extensions.ExtensionsTemplate()
data = {"extensions": [{
"name": "Public Image Extension",
"namespace": "http://foo.com/api/ext/pie/v1.0",
"alias": "RS-PIE",
"updated": "2011-01-22T13:25:27-06:00",
"description": "Adds the capability to share an image.",
"links": [{"rel": "describedby",
"type": "application/pdf",
"type": "application/vnd.sun.wadl+xml",
"href": "http://foo.com/api/ext/cs-pie.pdf"},
{"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://foo.com/api/ext/cs-pie.wadl"}]},
{"name": "Cloud Block Storage",
"namespace": "http://foo.com/api/ext/cbs/v1.0",
"alias": "RS-CBS",
"updated": "2011-01-12T11:22:33-06:00",
"description": "Allows mounting cloud block storage.",
"links": [{"rel": "describedby",
"type": "application/pdf",
"href": "http://foo.com/api/ext/cs-cbs.pdf"},
{"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]}
xml = serializer.serialize(data)
root = etree.XML(xml)
ext_elems = root.findall('{0}extension'.format(NS))
self.assertEqual(len(ext_elems), 2)
for i, ext_elem in enumerate(ext_elems):
ext_dict = data['extensions'][i]
self.assertEqual(ext_elem.findtext('{0}description'.format(NS)),
ext_dict['description'])
for key in ['name', 'namespace', 'alias', 'updated']:
self.assertEqual(ext_elem.get(key), ext_dict[key])
link_nodes = ext_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(ext_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
xmlutil.validate_schema(root, 'extensions')
class ExtensionControllerIdFormatTest(test.TestCase):
def _bounce_id(self, test_id):
class BounceController(object):
def show(self, req, id):
return id
res_ext = base_extensions.ResourceExtension('bounce',
BounceController())
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/bounce/%s" % test_id)
response = request.get_response(app)
return response.body
def test_id_with_xml_format(self):
result = self._bounce_id('foo.xml')
self.assertEqual(result, 'foo')
def test_id_with_json_format(self):
result = self._bounce_id('foo.json')
self.assertEqual(result, 'foo')
def test_id_with_bad_format(self):
result = self._bounce_id('foo.bad')
self.assertEqual(result, 'foo.bad')
|
|
# -*- coding: utf-8 -*-
"""
tests.testing
~~~~~~~~~~~~~
Test client and more.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
import werkzeug
from flask._compat import text_type
def test_environ_defaults_from_config(app, client):
app.config['SERVER_NAME'] = 'example.com:1234'
app.config['APPLICATION_ROOT'] = '/foo'
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
assert ctx.request.url == 'http://example.com:1234/foo/'
rv = client.get('/')
assert rv.data == b'http://example.com:1234/foo/'
def test_environ_defaults(app, client, app_ctx, req_ctx):
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
assert ctx.request.url == 'http://localhost/'
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'http://localhost/'
def test_environ_base_default(app, client, app_ctx):
app.testing = True
@app.route('/')
def index():
flask.g.user_agent = flask.request.headers["User-Agent"]
return flask.request.remote_addr
rv = client.get('/')
assert rv.data == b'127.0.0.1'
assert flask.g.user_agent == 'werkzeug/' + werkzeug.__version__
def test_environ_base_modified(app, client, app_ctx):
@app.route('/')
def index():
flask.g.user_agent = flask.request.headers["User-Agent"]
return flask.request.remote_addr
client.environ_base['REMOTE_ADDR'] = '0.0.0.0'
client.environ_base['HTTP_USER_AGENT'] = 'Foo'
rv = client.get('/')
assert rv.data == b'0.0.0.0'
assert flask.g.user_agent == 'Foo'
client.environ_base['REMOTE_ADDR'] = '0.0.0.1'
client.environ_base['HTTP_USER_AGENT'] = 'Bar'
rv = client.get('/')
assert rv.data == b'0.0.0.1'
assert flask.g.user_agent == 'Bar'
def test_redirect_keep_session(app, client, app_ctx):
app.secret_key = 'testing'
@app.route('/', methods=['GET', 'POST'])
def index():
if flask.request.method == 'POST':
return flask.redirect('/getsession')
flask.session['data'] = 'foo'
return 'index'
@app.route('/getsession')
def get_session():
return flask.session.get('data', '<missing>')
with client as c:
rv = c.get('/getsession')
assert rv.data == b'<missing>'
rv = c.get('/')
assert rv.data == b'index'
assert flask.session.get('data') == 'foo'
rv = c.post('/', data={}, follow_redirects=True)
assert rv.data == b'foo'
# This support requires a new Werkzeug version
if not hasattr(c, 'redirect_client'):
assert flask.session.get('data') == 'foo'
rv = c.get('/getsession')
assert rv.data == b'foo'
def test_session_transactions(app):
app.secret_key = 'testing'
@app.route('/')
def index():
return text_type(flask.session['foo'])
with app.test_client() as c:
with c.session_transaction() as sess:
assert len(sess) == 0
sess['foo'] = [42]
assert len(sess) == 1
rv = c.get('/')
assert rv.data == b'[42]'
with c.session_transaction() as sess:
assert len(sess) == 1
assert sess['foo'] == [42]
def test_session_transactions_no_null_sessions():
app = flask.Flask(__name__)
app.testing = True
with app.test_client() as c:
with pytest.raises(RuntimeError) as e:
with c.session_transaction() as sess:
pass
assert 'Session backend did not open a session' in str(e.value)
def test_session_transactions_keep_context(app, client, req_ctx):
app.secret_key = 'testing'
rv = client.get('/')
req = flask.request._get_current_object()
assert req is not None
with client.session_transaction():
assert req is flask.request._get_current_object()
def test_session_transaction_needs_cookies(app):
c = app.test_client(use_cookies=False)
with pytest.raises(RuntimeError) as e:
with c.session_transaction() as s:
pass
assert 'cookies' in str(e.value)
def test_test_client_context_binding():
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.route('/')
def index():
flask.g.value = 42
return 'Hello World!'
@app.route('/other')
def other():
1 // 0
with app.test_client() as c:
resp = c.get('/')
assert flask.g.value == 42
assert resp.data == b'Hello World!'
assert resp.status_code == 200
resp = c.get('/other')
assert not hasattr(flask.g, 'value')
assert b'Internal Server Error' in resp.data
assert resp.status_code == 500
flask.g.value = 23
try:
flask.g.value
except (AttributeError, RuntimeError):
pass
else:
raise AssertionError('some kind of exception expected')
def test_reuse_client():
app = flask.Flask(__name__)
c = app.test_client()
with c:
assert c.get('/').status_code == 404
with c:
assert c.get('/').status_code == 404
def test_test_client_calls_teardown_handlers():
app = flask.Flask(__name__)
called = []
@app.teardown_request
def remember(error):
called.append(error)
with app.test_client() as c:
assert called == []
c.get('/')
assert called == []
assert called == [None]
del called[:]
with app.test_client() as c:
assert called == []
c.get('/')
assert called == []
c.get('/')
assert called == [None]
assert called == [None, None]
def test_full_url_request():
app = flask.Flask(__name__)
app.testing = True
@app.route('/action', methods=['POST'])
def action():
return 'x'
with app.test_client() as c:
rv = c.post('http://domain.com/action?vodka=42', data={'gin': 43})
assert rv.status_code == 200
assert 'gin' in flask.request.form
assert 'vodka' in flask.request.args
def test_subdomain():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
@app.route('/', subdomain='<company_id>')
def view(company_id):
return company_id
with app.test_request_context():
url = flask.url_for('view', company_id='xxx')
with app.test_client() as c:
response = c.get(url)
assert 200 == response.status_code
assert b'xxx' == response.data
def test_nosubdomain():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
@app.route('/<company_id>')
def view(company_id):
return company_id
with app.test_request_context():
url = flask.url_for('view', company_id='xxx')
with app.test_client() as c:
response = c.get(url)
assert 200 == response.status_code
assert b'xxx' == response.data
|
|
"""Support for Automation Device Specification (ADS)."""
import threading
import struct
import logging
import ctypes
from collections import namedtuple
import asyncio
import async_timeout
import voluptuous as vol
from homeassistant.const import (
CONF_DEVICE,
CONF_IP_ADDRESS,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DATA_ADS = "data_ads"
# Supported Types
ADSTYPE_BOOL = "bool"
ADSTYPE_BYTE = "byte"
ADSTYPE_DINT = "dint"
ADSTYPE_INT = "int"
ADSTYPE_UDINT = "udint"
ADSTYPE_UINT = "uint"
CONF_ADS_FACTOR = "factor"
CONF_ADS_TYPE = "adstype"
CONF_ADS_VALUE = "value"
CONF_ADS_VAR = "adsvar"
CONF_ADS_VAR_BRIGHTNESS = "adsvar_brightness"
CONF_ADS_VAR_POSITION = "adsvar_position"
STATE_KEY_STATE = "state"
STATE_KEY_BRIGHTNESS = "brightness"
STATE_KEY_POSITION = "position"
DOMAIN = "ads"
SERVICE_WRITE_DATA_BY_NAME = "write_data_by_name"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DEVICE): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_IP_ADDRESS): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_SERVICE_WRITE_DATA_BY_NAME = vol.Schema(
{
vol.Required(CONF_ADS_TYPE): vol.In(
[
ADSTYPE_INT,
ADSTYPE_UINT,
ADSTYPE_BYTE,
ADSTYPE_BOOL,
ADSTYPE_DINT,
ADSTYPE_UDINT,
]
),
vol.Required(CONF_ADS_VALUE): vol.Coerce(int),
vol.Required(CONF_ADS_VAR): cv.string,
}
)
def setup(hass, config):
"""Set up the ADS component."""
import pyads
conf = config[DOMAIN]
net_id = conf.get(CONF_DEVICE)
ip_address = conf.get(CONF_IP_ADDRESS)
port = conf.get(CONF_PORT)
client = pyads.Connection(net_id, port, ip_address)
AdsHub.ADS_TYPEMAP = {
ADSTYPE_BOOL: pyads.PLCTYPE_BOOL,
ADSTYPE_BYTE: pyads.PLCTYPE_BYTE,
ADSTYPE_DINT: pyads.PLCTYPE_DINT,
ADSTYPE_INT: pyads.PLCTYPE_INT,
ADSTYPE_UDINT: pyads.PLCTYPE_UDINT,
ADSTYPE_UINT: pyads.PLCTYPE_UINT,
}
AdsHub.ADSError = pyads.ADSError
AdsHub.PLCTYPE_BOOL = pyads.PLCTYPE_BOOL
AdsHub.PLCTYPE_BYTE = pyads.PLCTYPE_BYTE
AdsHub.PLCTYPE_DINT = pyads.PLCTYPE_DINT
AdsHub.PLCTYPE_INT = pyads.PLCTYPE_INT
AdsHub.PLCTYPE_UDINT = pyads.PLCTYPE_UDINT
AdsHub.PLCTYPE_UINT = pyads.PLCTYPE_UINT
try:
ads = AdsHub(client)
except pyads.ADSError:
_LOGGER.error(
"Could not connect to ADS host (netid=%s, ip=%s, port=%s)",
net_id,
ip_address,
port,
)
return False
hass.data[DATA_ADS] = ads
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, ads.shutdown)
def handle_write_data_by_name(call):
"""Write a value to the connected ADS device."""
ads_var = call.data.get(CONF_ADS_VAR)
ads_type = call.data.get(CONF_ADS_TYPE)
value = call.data.get(CONF_ADS_VALUE)
try:
ads.write_by_name(ads_var, value, ads.ADS_TYPEMAP[ads_type])
except pyads.ADSError as err:
_LOGGER.error(err)
hass.services.register(
DOMAIN,
SERVICE_WRITE_DATA_BY_NAME,
handle_write_data_by_name,
schema=SCHEMA_SERVICE_WRITE_DATA_BY_NAME,
)
return True
# Tuple to hold data needed for notification
NotificationItem = namedtuple(
"NotificationItem", "hnotify huser name plc_datatype callback"
)
class AdsHub:
"""Representation of an ADS connection."""
def __init__(self, ads_client):
"""Initialize the ADS hub."""
self._client = ads_client
self._client.open()
# All ADS devices are registered here
self._devices = []
self._notification_items = {}
self._lock = threading.Lock()
def shutdown(self, *args, **kwargs):
"""Shutdown ADS connection."""
import pyads
_LOGGER.debug("Shutting down ADS")
for notification_item in self._notification_items.values():
_LOGGER.debug(
"Deleting device notification %d, %d",
notification_item.hnotify,
notification_item.huser,
)
try:
self._client.del_device_notification(
notification_item.hnotify, notification_item.huser
)
except pyads.ADSError as err:
_LOGGER.error(err)
try:
self._client.close()
except pyads.ADSError as err:
_LOGGER.error(err)
def register_device(self, device):
"""Register a new device."""
self._devices.append(device)
def write_by_name(self, name, value, plc_datatype):
"""Write a value to the device."""
import pyads
with self._lock:
try:
return self._client.write_by_name(name, value, plc_datatype)
except pyads.ADSError as err:
_LOGGER.error("Error writing %s: %s", name, err)
def read_by_name(self, name, plc_datatype):
"""Read a value from the device."""
import pyads
with self._lock:
try:
return self._client.read_by_name(name, plc_datatype)
except pyads.ADSError as err:
_LOGGER.error("Error reading %s: %s", name, err)
def add_device_notification(self, name, plc_datatype, callback):
"""Add a notification to the ADS devices."""
import pyads
attr = pyads.NotificationAttrib(ctypes.sizeof(plc_datatype))
with self._lock:
try:
hnotify, huser = self._client.add_device_notification(
name, attr, self._device_notification_callback
)
except pyads.ADSError as err:
_LOGGER.error("Error subscribing to %s: %s", name, err)
else:
hnotify = int(hnotify)
self._notification_items[hnotify] = NotificationItem(
hnotify, huser, name, plc_datatype, callback
)
_LOGGER.debug(
"Added device notification %d for variable %s", hnotify, name
)
def _device_notification_callback(self, notification, name):
"""Handle device notifications."""
contents = notification.contents
hnotify = int(contents.hNotification)
_LOGGER.debug("Received notification %d", hnotify)
data = contents.data
try:
with self._lock:
notification_item = self._notification_items[hnotify]
except KeyError:
_LOGGER.error("Unknown device notification handle: %d", hnotify)
return
# Parse data to desired datatype
if notification_item.plc_datatype == self.PLCTYPE_BOOL:
value = bool(struct.unpack("<?", bytearray(data)[:1])[0])
elif notification_item.plc_datatype == self.PLCTYPE_INT:
value = struct.unpack("<h", bytearray(data)[:2])[0]
elif notification_item.plc_datatype == self.PLCTYPE_BYTE:
value = struct.unpack("<B", bytearray(data)[:1])[0]
elif notification_item.plc_datatype == self.PLCTYPE_UINT:
value = struct.unpack("<H", bytearray(data)[:2])[0]
elif notification_item.plc_datatype == self.PLCTYPE_DINT:
value = struct.unpack("<i", bytearray(data)[:4])[0]
elif notification_item.plc_datatype == self.PLCTYPE_UDINT:
value = struct.unpack("<I", bytearray(data)[:4])[0]
else:
value = bytearray(data)
_LOGGER.warning("No callback available for this datatype")
notification_item.callback(notification_item.name, value)
class AdsEntity(Entity):
"""Representation of ADS entity."""
def __init__(self, ads_hub, name, ads_var):
"""Initialize ADS binary sensor."""
self._name = name
self._unique_id = ads_var
self._state_dict = {}
self._state_dict[STATE_KEY_STATE] = None
self._ads_hub = ads_hub
self._ads_var = ads_var
self._event = None
async def async_initialize_device(
self, ads_var, plctype, state_key=STATE_KEY_STATE, factor=None
):
"""Register device notification."""
def update(name, value):
"""Handle device notifications."""
_LOGGER.debug("Variable %s changed its value to %d", name, value)
if factor is None:
self._state_dict[state_key] = value
else:
self._state_dict[state_key] = value / factor
asyncio.run_coroutine_threadsafe(async_event_set(), self.hass.loop)
self.schedule_update_ha_state()
async def async_event_set():
"""Set event in async context."""
self._event.set()
self._event = asyncio.Event()
await self.hass.async_add_executor_job(
self._ads_hub.add_device_notification, ads_var, plctype, update
)
try:
with async_timeout.timeout(10):
await self._event.wait()
except asyncio.TimeoutError:
_LOGGER.debug("Variable %s: Timeout during first update", ads_var)
@property
def name(self):
"""Return the default name of the binary sensor."""
return self._name
@property
def unique_id(self):
"""Return an unique identifier for this entity."""
return self._unique_id
@property
def should_poll(self):
"""Return False because entity pushes its state to HA."""
return False
@property
def available(self):
"""Return False if state has not been updated yet."""
return self._state_dict[STATE_KEY_STATE] is not None
|
|
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import calendar
import email.utils
import httparchive
import unittest
def create_request(headers):
return httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/', None, headers)
def create_response(headers):
return httparchive.ArchivedHttpResponse(
11, 200, 'OK', headers, '')
class HttpArchiveTest(unittest.TestCase):
REQUEST_HEADERS = {}
REQUEST = create_request(REQUEST_HEADERS)
# Used for if-(un)modified-since checks
DATE_PAST = 'Wed, 13 Jul 2011 03:58:08 GMT'
DATE_PRESENT = 'Wed, 20 Jul 2011 04:58:08 GMT'
DATE_FUTURE = 'Wed, 27 Jul 2011 05:58:08 GMT'
DATE_INVALID = 'This is an invalid date!!'
# etag values
ETAG_VALID = 'etag'
ETAG_INVALID = 'This is an invalid etag value!!'
RESPONSE_HEADERS = [('last-modified', DATE_PRESENT), ('etag', ETAG_VALID)]
RESPONSE = create_response(RESPONSE_HEADERS)
def setUp(self):
self.archive = httparchive.HttpArchive()
self.archive[self.REQUEST] = self.RESPONSE
# Also add an identical POST request for testing
request = httparchive.ArchivedHttpRequest(
'POST', 'www.test.com', '/', None, self.REQUEST_HEADERS)
self.archive[request] = self.RESPONSE
def tearDown(self):
pass
def test_init(self):
archive = httparchive.HttpArchive()
self.assertEqual(len(archive), 0)
def test_request__TrimHeaders(self):
request = httparchive.ArchivedHttpRequest
header1 = {'accept-encoding': 'gzip,deflate'}
self.assertEqual(request._TrimHeaders(header1),
[(k, v) for k, v in header1.items()])
header2 = {'referer': 'www.google.com'}
self.assertEqual(request._TrimHeaders(header2), [])
header3 = {'referer': 'www.google.com', 'cookie': 'cookie_monster!',
'hello': 'world'}
self.assertEqual(request._TrimHeaders(header3), [('hello', 'world')])
# Tests that spaces and trailing comma get stripped.
header4 = {'accept-encoding': 'gzip, deflate,, '}
self.assertEqual(request._TrimHeaders(header4),
[('accept-encoding', 'gzip,deflate')])
# Tests that 'lzma' gets stripped.
header5 = {'accept-encoding': 'gzip, deflate, lzma'}
self.assertEqual(request._TrimHeaders(header5),
[('accept-encoding', 'gzip,deflate')])
# Tests that x-client-data gets stripped.
header6 = {'x-client-data': 'testdata'}
self.assertEqual(request._TrimHeaders(header6), [])
def test_matches(self):
headers = {}
request1 = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/index.html?hello=world', None, headers)
request2 = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/index.html?foo=bar', None, headers)
self.assert_(not request1.matches(
request2.command, request2.host, request2.full_path, use_query=True))
self.assert_(request1.matches(
request2.command, request2.host, request2.full_path, use_query=False))
self.assert_(request1.matches(
request2.command, request2.host, None, use_query=True))
self.assert_(request1.matches(
request2.command, None, request2.full_path, use_query=False))
empty_request = httparchive.ArchivedHttpRequest(
None, None, None, None, headers)
self.assert_(not empty_request.matches(
request2.command, request2.host, None, use_query=True))
self.assert_(not empty_request.matches(
request2.command, None, request2.full_path, use_query=False))
def setup_find_closest_request(self):
headers = {}
request1 = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/a?hello=world', None, headers)
request2 = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/a?foo=bar', None, headers)
request3 = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/b?hello=world', None, headers)
request4 = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/c?hello=world', None, headers)
archive = httparchive.HttpArchive()
# Add requests 2 and 3 and find closest match with request1
archive[request2] = self.RESPONSE
archive[request3] = self.RESPONSE
return archive, request1, request2, request3, request4
def test_find_closest_request(self):
archive, request1, request2, request3, request4 = (
self.setup_find_closest_request())
# Always favor requests with same paths, even if use_path=False.
self.assertEqual(
request2, archive.find_closest_request(request1, use_path=False))
# If we match strictly on path, request2 is the only match
self.assertEqual(
request2, archive.find_closest_request(request1, use_path=True))
# request4 can be matched with request3, if use_path=False
self.assertEqual(
request3, archive.find_closest_request(request4, use_path=False))
# ...but None, if use_path=True
self.assertEqual(
None, archive.find_closest_request(request4, use_path=True))
def test_find_closest_request_delete_simple(self):
archive, request1, request2, request3, request4 = (
self.setup_find_closest_request())
del archive[request3]
self.assertEqual(
request2, archive.find_closest_request(request1, use_path=False))
self.assertEqual(
request2, archive.find_closest_request(request1, use_path=True))
def test_find_closest_request_delete_complex(self):
archive, request1, request2, request3, request4 = (
self.setup_find_closest_request())
del archive[request2]
self.assertEqual(
request3, archive.find_closest_request(request1, use_path=False))
self.assertEqual(
None, archive.find_closest_request(request1, use_path=True))
def test_find_closest_request_timestamp(self):
headers = {}
request1 = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/index.html?time=100000000&important=true',
None, headers)
request2 = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/index.html?time=99999999&important=true',
None, headers)
request3 = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/index.html?time=10000000&important=false',
None, headers)
archive = httparchive.HttpArchive()
# Add requests 2 and 3 and find closest match with request1
archive[request2] = self.RESPONSE
archive[request3] = self.RESPONSE
# Although request3 is lexicographically closer, request2 is semantically
# more similar.
self.assertEqual(
request2, archive.find_closest_request(request1, use_path=True))
def test_get_cmp_seq(self):
# The order of key-value pairs in query and header respectively should not
# matter.
headers = {'k2': 'v2', 'k1': 'v1'}
request = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/a?c=d&a=b;e=f', None, headers)
self.assertEqual([('a', 'b'), ('c', 'd'), ('e', 'f'),
('k1', 'v1'), ('k2', 'v2')],
request._GetCmpSeq('c=d&a=b;e=f'))
def test_get_simple(self):
request = self.REQUEST
response = self.RESPONSE
archive = self.archive
self.assertEqual(archive.get(request), response)
false_request_headers = {'foo': 'bar'}
false_request = create_request(false_request_headers)
self.assertEqual(archive.get(false_request, default=None), None)
def test_get_modified_headers(self):
request = self.REQUEST
response = self.RESPONSE
archive = self.archive
not_modified_response = httparchive.create_response(304)
# Fail check and return response again
request_headers = {'if-modified-since': self.DATE_PAST}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
# Succeed check and return 304 Not Modified
request_headers = {'if-modified-since': self.DATE_FUTURE}
request = create_request(request_headers)
self.assertEqual(archive.get(request), not_modified_response)
# Succeed check and return 304 Not Modified
request_headers = {'if-modified-since': self.DATE_PRESENT}
request = create_request(request_headers)
self.assertEqual(archive.get(request), not_modified_response)
# Invalid date, fail check and return response again
request_headers = {'if-modified-since': self.DATE_INVALID}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
# fail check since the request is not a GET or HEAD request (as per RFC)
request_headers = {'if-modified-since': self.DATE_FUTURE}
request = httparchive.ArchivedHttpRequest(
'POST', 'www.test.com', '/', None, request_headers)
self.assertEqual(archive.get(request), response)
def test_get_unmodified_headers(self):
request = self.REQUEST
response = self.RESPONSE
archive = self.archive
not_modified_response = httparchive.create_response(304)
# Succeed check
request_headers = {'if-unmodified-since': self.DATE_PAST}
request = create_request(request_headers)
self.assertEqual(archive.get(request), not_modified_response)
# Fail check
request_headers = {'if-unmodified-since': self.DATE_FUTURE}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
# Succeed check
request_headers = {'if-unmodified-since': self.DATE_PRESENT}
request = create_request(request_headers)
self.assertEqual(archive.get(request), not_modified_response)
# Fail check
request_headers = {'if-unmodified-since': self.DATE_INVALID}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
# Fail check since the request is not a GET or HEAD request (as per RFC)
request_headers = {'if-modified-since': self.DATE_PAST}
request = httparchive.ArchivedHttpRequest(
'POST', 'www.test.com', '/', None, request_headers)
self.assertEqual(archive.get(request), response)
def test_get_etags(self):
request = self.REQUEST
response = self.RESPONSE
archive = self.archive
not_modified_response = httparchive.create_response(304)
precondition_failed_response = httparchive.create_response(412)
# if-match headers
request_headers = {'if-match': self.ETAG_VALID}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
request_headers = {'if-match': self.ETAG_INVALID}
request = create_request(request_headers)
self.assertEqual(archive.get(request), precondition_failed_response)
# if-none-match headers
request_headers = {'if-none-match': self.ETAG_VALID}
request = create_request(request_headers)
self.assertEqual(archive.get(request), not_modified_response)
request_headers = {'if-none-match': self.ETAG_INVALID}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
def test_get_multiple_match_headers(self):
request = self.REQUEST
response = self.RESPONSE
archive = self.archive
not_modified_response = httparchive.create_response(304)
precondition_failed_response = httparchive.create_response(412)
# if-match headers
# If the request would, without the If-Match header field,
# result in anything other than a 2xx or 412 status,
# then the If-Match header MUST be ignored.
request_headers = {
'if-match': self.ETAG_VALID,
'if-modified-since': self.DATE_PAST,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
# Invalid etag, precondition failed
request_headers = {
'if-match': self.ETAG_INVALID,
'if-modified-since': self.DATE_PAST,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), precondition_failed_response)
# 304 response; ignore if-match header
request_headers = {
'if-match': self.ETAG_VALID,
'if-modified-since': self.DATE_FUTURE,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), not_modified_response)
# 304 response; ignore if-match header
request_headers = {
'if-match': self.ETAG_INVALID,
'if-modified-since': self.DATE_PRESENT,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), not_modified_response)
# Invalid etag, precondition failed
request_headers = {
'if-match': self.ETAG_INVALID,
'if-modified-since': self.DATE_INVALID,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), precondition_failed_response)
def test_get_multiple_none_match_headers(self):
request = self.REQUEST
response = self.RESPONSE
archive = self.archive
not_modified_response = httparchive.create_response(304)
precondition_failed_response = httparchive.create_response(412)
# if-none-match headers
# If the request would, without the If-None-Match header field,
# result in anything other than a 2xx or 304 status,
# then the If-None-Match header MUST be ignored.
request_headers = {
'if-none-match': self.ETAG_VALID,
'if-modified-since': self.DATE_PAST,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
request_headers = {
'if-none-match': self.ETAG_INVALID,
'if-modified-since': self.DATE_PAST,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
# etag match, precondition failed
request_headers = {
'if-none-match': self.ETAG_VALID,
'if-modified-since': self.DATE_FUTURE,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), not_modified_response)
request_headers = {
'if-none-match': self.ETAG_INVALID,
'if-modified-since': self.DATE_PRESENT,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), not_modified_response)
request_headers = {
'if-none-match': self.ETAG_INVALID,
'if-modified-since': self.DATE_INVALID,
}
request = create_request(request_headers)
self.assertEqual(archive.get(request), response)
def test_response__TrimHeaders(self):
response = httparchive.ArchivedHttpResponse
header1 = [('access-control-allow-origin', '*'),
('content-type', 'image/jpeg'),
('content-length', 2878)]
self.assertEqual(response._TrimHeaders(header1), header1)
header2 = [('content-type', 'text/javascript; charset=utf-8'),
('connection', 'keep-alive'),
('cache-control', 'private, must-revalidate, max-age=0'),
('content-encoding', 'gzip')]
self.assertEqual(response._TrimHeaders(header2), header2)
header3 = [('content-security-policy', """\
default-src 'self' http://*.cnn.com:* https://*.cnn.com:* \
*.cnn.net:* *.turner.com:* *.ugdturner.com:* *.vgtf.net:*; \
script-src 'unsafe-inline' 'unsafe-eval' 'self' *; \
style-src 'unsafe-inline' 'self' *; frame-src 'self' *; \
object-src 'self' *; img-src 'self' * data:; media-src 'self' *; \
font-src 'self' *; connect-src 'self' *"""),
('access-control-allow-origin', '*'),
('content-type', 'text/html; charset=utf-8'),
('content-encoding', 'gzip')]
self.assertEqual(response._TrimHeaders(header3), [
('access-control-allow-origin', '*'),
('content-type', 'text/html; charset=utf-8'),
('content-encoding', 'gzip')
])
header4 = [('content-security-policy', """\
default-src * data: blob:;script-src *.facebook.com *.fbcdn.net \
*.facebook.net *.google-analytics.com *.virtualearth.net *.google.com \
127.0.0.1:* *.spotilocal.com:* 'unsafe-inline' 'unsafe-eval' \
fbstatic-a.akamaihd.net fbcdn-static-b-a.akamaihd.net *.atlassolutions.com \
blob: chrome-extension://lifbcibllhkdhoafpjfnlhfpfgnpldfl \
*.liverail.com;style-src * 'unsafe-inline' data:;connect-src *.facebook.com \
*.fbcdn.net *.facebook.net *.spotilocal.com:* *.akamaihd.net \
wss://*.facebook.com:* https://fb.scanandcleanlocal.com:* \
*.atlassolutions.com attachment.fbsbx.com ws://localhost:* \
blob: 127.0.0.1:* *.liverail.com""")]
self.assertEqual(response._TrimHeaders(header4), [])
class ArchivedHttpResponse(unittest.TestCase):
PAST_DATE_A = 'Tue, 13 Jul 2010 03:47:07 GMT'
PAST_DATE_B = 'Tue, 13 Jul 2010 02:47:07 GMT' # PAST_DATE_A -1 hour
PAST_DATE_C = 'Tue, 13 Jul 2010 04:47:07 GMT' # PAST_DATE_A +1 hour
NOW_DATE_A = 'Wed, 20 Jul 2011 04:58:08 GMT'
NOW_DATE_B = 'Wed, 20 Jul 2011 03:58:08 GMT' # NOW_DATE_A -1 hour
NOW_DATE_C = 'Wed, 20 Jul 2011 05:58:08 GMT' # NOW_DATE_A +1 hour
NOW_SECONDS = calendar.timegm(email.utils.parsedate(NOW_DATE_A))
def setUp(self):
self.response = create_response([('date', self.PAST_DATE_A)])
def test_update_date_same_date(self):
self.assertEqual(
self.response.update_date(self.PAST_DATE_A, now=self.NOW_SECONDS),
self.NOW_DATE_A)
def test_update_date_before_date(self):
self.assertEqual(
self.response.update_date(self.PAST_DATE_B, now=self.NOW_SECONDS),
self.NOW_DATE_B)
def test_update_date_after_date(self):
self.assertEqual(
self.response.update_date(self.PAST_DATE_C, now=self.NOW_SECONDS),
self.NOW_DATE_C)
def test_update_date_bad_date_param(self):
self.assertEqual(
self.response.update_date('garbage date', now=self.NOW_SECONDS),
'garbage date')
def test_update_date_bad_date_header(self):
self.response.set_header('date', 'garbage date')
self.assertEqual(
self.response.update_date(self.PAST_DATE_B, now=self.NOW_SECONDS),
self.PAST_DATE_B)
if __name__ == '__main__':
unittest.main()
|
|
import os
import tempfile
from django.core.management import call_command
from django.test import TestCase
from django.test.utils import override_settings
from mock import call, patch
from kolibri.content.models import LocalFile
CONTENT_STORAGE_DIR_TEMP = tempfile.mkdtemp()
CONTENT_STORAGE_SOURCE_DIR = tempfile.mkdtemp()
@patch('kolibri.content.management.commands.importchannel.channel_import.import_channel_from_local_db')
@patch('kolibri.content.management.commands.importchannel.AsyncCommand.start_progress')
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
)
class ImportChannelTestCase(TestCase):
"""
Test case for the importchannel management command.
"""
the_channel_id = '6199dde695db4ee4ab392222d5af1e5c'
@patch('kolibri.content.management.commands.importchannel.paths.get_content_database_file_url')
@patch('kolibri.content.management.commands.importchannel.paths.get_content_database_file_path')
@patch('kolibri.content.management.commands.importchannel.transfer.FileDownload')
@patch('kolibri.content.management.commands.importchannel.AsyncCommand.cancel', return_value=True)
@patch('kolibri.content.management.commands.importchannel.AsyncCommand.is_cancelled', return_value=True)
def test_remote_cancel_during_transfer(self, is_cancelled_mock, cancel_mock, FileDownloadMock, local_path_mock, remote_path_mock, start_progress_mock,
import_channel_mock):
local_path = tempfile.mkstemp()[1]
local_path_mock.return_value = local_path
remote_path_mock.return_value = 'notest'
FileDownloadMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("importchannel", "network", self.the_channel_id)
# Check that is_cancelled was called
is_cancelled_mock.assert_called_with()
# Check that the FileDownload initiated
FileDownloadMock.assert_called_with('notest', local_path)
# Check that cancel was called
cancel_mock.assert_called_with()
# Test that import channel cleans up database file if cancelled
self.assertFalse(os.path.exists(local_path))
import_channel_mock.assert_not_called()
@patch('kolibri.content.management.commands.importchannel.paths.get_content_database_file_path')
@patch('kolibri.content.management.commands.importchannel.transfer.FileCopy')
@patch('kolibri.content.management.commands.importchannel.AsyncCommand.cancel', return_value=True)
@patch('kolibri.content.management.commands.importchannel.AsyncCommand.is_cancelled', return_value=True)
def test_local_cancel_during_transfer(self, is_cancelled_mock, cancel_mock, FileCopyMock, local_path_mock, start_progress_mock, import_channel_mock):
local_dest_path = tempfile.mkstemp()[1]
local_src_path = tempfile.mkstemp()[1]
local_path_mock.side_effect = [local_dest_path, local_src_path]
FileCopyMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("importchannel", "disk", self.the_channel_id, CONTENT_STORAGE_SOURCE_DIR)
# Check that is_cancelled was called
is_cancelled_mock.assert_called_with()
# Check that the FileCopy initiated
FileCopyMock.assert_called_with(local_src_path, local_dest_path)
# Check that cancel was called
cancel_mock.assert_called_with()
# Test that import channel cleans up database file if cancelled
self.assertFalse(os.path.exists(local_dest_path))
import_channel_mock.assert_not_called()
@patch('kolibri.content.management.commands.importcontent.annotation')
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
)
class ImportContentTestCase(TestCase):
"""
Test case for the importcontent management command.
"""
fixtures = ['content_test.json']
the_channel_id = '6199dde695db4ee4ab392222d5af1e5c'
def setUp(self):
LocalFile.objects.update(available=False)
@patch('kolibri.content.management.commands.importcontent.transfer.FileDownload')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.cancel')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.is_cancelled', return_value=True)
def test_remote_cancel_immediately(self, is_cancelled_mock, cancel_mock, FileDownloadMock, annotation_mock):
# Check behaviour if cancellation is called before any file download starts
FileDownloadMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("importcontent", "network", self.the_channel_id)
is_cancelled_mock.assert_has_calls([call(), call()])
FileDownloadMock.assert_not_called()
cancel_mock.assert_called_with()
annotation_mock.mark_local_files_as_available.assert_not_called()
annotation_mock.set_leaf_node_availability_from_local_file_availability.assert_not_called()
annotation_mock.recurse_availability_up_tree.assert_not_called()
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.start_progress')
@patch('kolibri.content.management.commands.importcontent.paths.get_content_storage_remote_url')
@patch('kolibri.content.management.commands.importcontent.paths.get_content_storage_file_path')
@patch('kolibri.content.management.commands.importcontent.transfer.FileDownload')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.cancel')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.is_cancelled', side_effect=[False, True, True, True])
def test_remote_cancel_during_transfer(self, is_cancelled_mock, cancel_mock, FileDownloadMock, local_path_mock, remote_path_mock, start_progress_mock,
annotation_mock):
# If transfer is cancelled during transfer of first file
local_path = tempfile.mkstemp()[1]
local_path_mock.return_value = local_path
remote_path_mock.return_value = 'notest'
# Mock this __iter__ so that the filetransfer can be looped over
FileDownloadMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("importcontent", "network", self.the_channel_id)
# is_cancelled should be called thrice.
is_cancelled_mock.assert_has_calls([call(), call(), call()])
# Should be set to the local path we mocked
FileDownloadMock.assert_called_with('notest', local_path)
# Check that it was cancelled when the command was cancelled, this ensures cleanup
FileDownloadMock.assert_has_calls([call().cancel()])
# Check that the command itself was also cancelled.
cancel_mock.assert_called_with()
annotation_mock.mark_local_files_as_available.assert_not_called()
annotation_mock.set_leaf_node_availability_from_local_file_availability.assert_not_called()
annotation_mock.recurse_availability_up_tree.assert_not_called()
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.start_progress')
@patch('kolibri.content.management.commands.importcontent.paths.get_content_storage_remote_url')
@patch('kolibri.content.management.commands.importcontent.paths.get_content_storage_file_path')
@patch('kolibri.content.management.commands.importcontent.transfer.FileDownload')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.cancel')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.is_cancelled', side_effect=[False, False, False, False, False, True, True, True])
def test_remote_cancel_after_file_copy_file_not_deleted(self, is_cancelled_mock, cancel_mock, FileDownloadMock, local_path_mock, remote_path_mock,
start_progress_mock, annotation_mock):
# If transfer is cancelled after transfer of first file
local_path_1 = tempfile.mkstemp()[1]
local_path_2 = tempfile.mkstemp()[1]
local_path_mock.side_effect = [local_path_1, local_path_2]
remote_path_mock.return_value = 'notest'
# Mock this __iter__ so that the filetransfer can be looped over
FileDownloadMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("importcontent", "network", self.the_channel_id)
# Check that the command itself was also cancelled.
cancel_mock.assert_called_with()
# Check that the temp file we created where the first file was being downloaded to has not been deleted
self.assertTrue(os.path.exists(local_path_1))
annotation_mock.set_availability.assert_called()
@patch('kolibri.content.management.commands.importcontent.transfer.FileCopy')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.cancel')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.is_cancelled', return_value=True)
def test_local_cancel_immediately(self, is_cancelled_mock, cancel_mock, FileCopyMock, annotation_mock):
# Local version of test above
FileCopyMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("importcontent", "disk", self.the_channel_id, CONTENT_STORAGE_SOURCE_DIR)
is_cancelled_mock.assert_has_calls([call(), call()])
FileCopyMock.assert_not_called()
cancel_mock.assert_called_with()
annotation_mock.mark_local_files_as_available.assert_not_called()
annotation_mock.set_leaf_node_availability_from_local_file_availability.assert_not_called()
annotation_mock.recurse_availability_up_tree.assert_not_called()
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.start_progress')
@patch('kolibri.content.management.commands.importcontent.paths.get_content_storage_file_path')
@patch('kolibri.content.management.commands.importcontent.transfer.FileCopy')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.cancel')
@patch('kolibri.content.management.commands.importcontent.AsyncCommand.is_cancelled', side_effect=[False, True, True, True])
def test_local_cancel_during_transfer(self, is_cancelled_mock, cancel_mock, FileCopyMock, local_path_mock, start_progress_mock, annotation_mock):
# Local version of test above
local_dest_path = tempfile.mkstemp()[1]
local_src_path = tempfile.mkstemp()[1]
local_path_mock.side_effect = [local_dest_path, local_src_path]
FileCopyMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("importcontent", "disk", self.the_channel_id, CONTENT_STORAGE_SOURCE_DIR)
is_cancelled_mock.assert_has_calls([call(), call(), call()])
FileCopyMock.assert_called_with(local_src_path, local_dest_path)
FileCopyMock.assert_has_calls([call().cancel()])
cancel_mock.assert_called_with()
annotation_mock.set_availability.assert_called()
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
)
class ExportChannelTestCase(TestCase):
"""
Test case for the exportchannel management command.
"""
the_channel_id = '6199dde695db4ee4ab392222d5af1e5c'
@patch('kolibri.content.management.commands.exportchannel.AsyncCommand.start_progress')
@patch('kolibri.content.management.commands.exportchannel.paths.get_content_database_file_path')
@patch('kolibri.content.management.commands.exportchannel.transfer.FileCopy')
@patch('kolibri.content.management.commands.exportchannel.AsyncCommand.cancel')
@patch('kolibri.content.management.commands.exportchannel.AsyncCommand.is_cancelled', return_value=True)
def test_cancel_during_transfer(self, is_cancelled_mock, cancel_mock, FileCopyMock, local_path_mock, start_progress_mock):
# Make sure we clean up a database file that is canceled during export
local_dest_path = tempfile.mkstemp()[1]
local_src_path = tempfile.mkstemp()[1]
local_path_mock.side_effect = [local_src_path, local_dest_path]
FileCopyMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("exportchannel", self.the_channel_id, local_dest_path)
is_cancelled_mock.assert_called_with()
FileCopyMock.assert_called_with(local_src_path, local_dest_path)
cancel_mock.assert_called_with()
self.assertFalse(os.path.exists(local_dest_path))
@override_settings(
CONTENT_STORAGE_DIR=CONTENT_STORAGE_DIR_TEMP,
)
class ExportContentTestCase(TestCase):
"""
Test case for the exportcontent management command.
"""
fixtures = ['content_test.json']
the_channel_id = '6199dde695db4ee4ab392222d5af1e5c'
@patch('kolibri.content.management.commands.exportcontent.transfer.FileCopy')
@patch('kolibri.content.management.commands.exportcontent.AsyncCommand.cancel')
@patch('kolibri.content.management.commands.exportcontent.AsyncCommand.is_cancelled', return_value=True)
def test_local_cancel_immediately(self, is_cancelled_mock, cancel_mock, FileCopyMock):
# If cancel comes in before we do anything, make sure nothing happens!
FileCopyMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("exportcontent", self.the_channel_id, CONTENT_STORAGE_SOURCE_DIR)
is_cancelled_mock.assert_has_calls([call(), call()])
FileCopyMock.assert_not_called()
cancel_mock.assert_called_with()
@patch('kolibri.content.management.commands.exportcontent.AsyncCommand.start_progress')
@patch('kolibri.content.management.commands.exportcontent.paths.get_content_storage_file_path')
@patch('kolibri.content.management.commands.exportcontent.transfer.FileCopy')
@patch('kolibri.content.management.commands.exportcontent.AsyncCommand.cancel')
@patch('kolibri.content.management.commands.exportcontent.AsyncCommand.is_cancelled', side_effect=[False, True, True, True])
def test_local_cancel_during_transfer(self, is_cancelled_mock, cancel_mock, FileCopyMock, local_path_mock, start_progress_mock):
# Make sure we cancel during transfer
local_dest_path = tempfile.mkstemp()[1]
local_src_path = tempfile.mkstemp()[1]
local_path_mock.side_effect = [local_src_path, local_dest_path]
FileCopyMock.return_value.__iter__.return_value = ['one', 'two', 'three']
call_command("exportcontent", self.the_channel_id, CONTENT_STORAGE_SOURCE_DIR)
is_cancelled_mock.assert_has_calls([call(), call(), call()])
FileCopyMock.assert_called_with(local_src_path, local_dest_path)
FileCopyMock.assert_has_calls([call().cancel()])
cancel_mock.assert_called_with()
|
|
from __future__ import unicode_literals
from functools import reduce, wraps
import operator
import threading
from django.db import models
from django.db.models.base import ModelBase
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute
from django.utils import six
from django.utils.translation import ugettext as _
from mptt.fields import TreeForeignKey, TreeOneToOneField, TreeManyToManyField
from mptt.managers import TreeManager
from mptt.signals import node_moved
from mptt.utils import _get_tree_model
__all__ = (
'TreeForeignKey', 'TreeOneToOneField', 'TreeManyToManyField',
'TreeManager', 'MPTTOptions', 'MPTTModelBase', 'MPTTModel',
)
class _classproperty(object):
def __init__(self, getter, setter=None):
self.fget = getter
self.fset = setter
def __get__(self, cls, owner):
return self.fget(owner)
def __set__(self, cls, owner, value):
if not self.fset:
raise AttributeError("This classproperty is read only")
self.fset(owner, value)
class classpropertytype(property):
def __init__(self, name, bases=(), members={}):
return super(classpropertytype, self).__init__(
members.get('__get__'),
members.get('__set__'),
members.get('__delete__'),
members.get('__doc__')
)
classproperty = classpropertytype('classproperty')
class MPTTOptions(object):
"""
Options class for MPTT models. Use this as an inner class called ``MPTTMeta``::
class MyModel(MPTTModel):
class MPTTMeta:
order_insertion_by = ['name']
parent_attr = 'myparent'
"""
order_insertion_by = []
left_attr = 'lft'
right_attr = 'rght'
tree_id_attr = 'tree_id'
level_attr = 'level'
parent_attr = 'parent'
def __init__(self, opts=None, **kwargs):
# Override defaults with options provided
if opts:
opts = list(opts.__dict__.items())
else:
opts = []
opts.extend(list(kwargs.items()))
if 'tree_manager_attr' in [opt[0] for opt in opts]:
raise ValueError(
"`tree_manager_attr` has been removed; you should instantiate"
" a TreeManager as a normal manager on your model instead.")
for key, value in opts:
if key[:2] == '__':
continue
setattr(self, key, value)
# Normalize order_insertion_by to a list
if isinstance(self.order_insertion_by, six.string_types):
self.order_insertion_by = [self.order_insertion_by]
elif isinstance(self.order_insertion_by, tuple):
self.order_insertion_by = list(self.order_insertion_by)
elif self.order_insertion_by is None:
self.order_insertion_by = []
def __iter__(self):
return ((k, v) for k, v in self.__dict__.items() if k[0] != '_')
# Helper methods for accessing tree attributes on models.
def get_raw_field_value(self, instance, field_name):
"""
Gets the value of the given fieldname for the instance.
This is not the same as getattr().
This function will return IDs for foreignkeys etc, rather than doing
a database query.
"""
field = instance._meta.get_field(field_name)
return field.value_from_object(instance)
def set_raw_field_value(self, instance, field_name, value):
"""
Sets the value of the given fieldname for the instance.
This is not the same as setattr().
This function requires an ID for a foreignkey (etc) rather than an instance.
"""
field = instance._meta.get_field(field_name)
setattr(instance, field.attname, value)
def update_mptt_cached_fields(self, instance):
"""
Caches (in an instance._mptt_cached_fields dict) the original values of:
- parent pk
- fields specified in order_insertion_by
These are used in save() to determine if the relevant fields have changed,
so that the MPTT fields need to be updated.
"""
instance._mptt_cached_fields = {}
field_names = set((self.parent_attr,))
if self.order_insertion_by:
for f in self.order_insertion_by:
if f[0] == '-':
f = f[1:]
field_names.add(f)
deferred_fields = instance.get_deferred_fields()
for field_name in field_names:
if deferred_fields:
field = instance._meta.get_field(field_name)
if field.attname in deferred_fields \
and field.attname not in instance.__dict__:
# deferred attribute (i.e. via .only() or .defer())
# It'd be silly to cache this (that'd do a database query)
# Instead, we mark it as a deferred attribute here, then
# assume it hasn't changed during save(), unless it's no
# longer deferred.
instance._mptt_cached_fields[field_name] = DeferredAttribute
continue
instance._mptt_cached_fields[field_name] = self.get_raw_field_value(
instance, field_name)
def insertion_target_filters(self, instance, order_insertion_by):
"""
Creates a filter which matches suitable right siblings for ``node``,
where insertion should maintain ordering according to the list of
fields in ``order_insertion_by``.
For example, given an ``order_insertion_by`` of
``['field1', 'field2', 'field3']``, the resulting filter should
correspond to the following SQL::
field1 > %s
OR (field1 = %s AND field2 > %s)
OR (field1 = %s AND field2 = %s AND field3 > %s)
"""
fields = []
filters = []
fields__append = fields.append
filters__append = filters.append
and_ = operator.and_
or_ = operator.or_
for field_name in order_insertion_by:
if field_name[0] == '-':
field_name = field_name[1:]
filter_suffix = '__lt'
else:
filter_suffix = '__gt'
value = getattr(instance, field_name)
if value is None:
# node isn't saved yet. get the insertion value from pre_save.
field = instance._meta.get_field(field_name)
value = field.pre_save(instance, True)
q = Q(**{field_name + filter_suffix: value})
filters__append(reduce(and_, [Q(**{f: v}) for f, v in fields] + [q]))
fields__append((field_name, value))
return reduce(or_, filters)
def get_ordered_insertion_target(self, node, parent):
"""
Attempts to retrieve a suitable right sibling for ``node``
underneath ``parent`` (which may be ``None`` in the case of root
nodes) so that ordering by the fields specified by the node's class'
``order_insertion_by`` option is maintained.
Returns ``None`` if no suitable sibling can be found.
"""
right_sibling = None
# Optimisation - if the parent doesn't have descendants,
# the node will always be its last child.
if parent is None or parent.get_descendant_count() > 0:
opts = node._mptt_meta
order_by = opts.order_insertion_by[:]
filters = self.insertion_target_filters(node, order_by)
if parent:
filters = filters & Q(**{opts.parent_attr: parent})
# Fall back on tree ordering if multiple child nodes have
# the same values.
order_by.append(opts.left_attr)
else:
filters = filters & Q(**{opts.parent_attr: None})
# Fall back on tree id ordering if multiple root nodes have
# the same values.
order_by.append(opts.tree_id_attr)
queryset = node.__class__._tree_manager.db_manager(node._state.db).filter(filters).order_by(*order_by)
if node.pk:
queryset = queryset.exclude(pk=node.pk)
try:
right_sibling = queryset[:1][0]
except IndexError:
# No suitable right sibling could be found
pass
return right_sibling
class MPTTModelBase(ModelBase):
"""
Metaclass for MPTT models
"""
def __new__(meta, class_name, bases, class_dict):
"""
Create subclasses of MPTTModel. This:
- adds the MPTT fields to the class
- adds a TreeManager to the model
"""
if class_name == 'NewBase' and class_dict == {}:
return super(MPTTModelBase, meta).__new__(meta, class_name, bases, class_dict)
is_MPTTModel = False
try:
MPTTModel
except NameError:
is_MPTTModel = True
MPTTMeta = class_dict.pop('MPTTMeta', None)
if not MPTTMeta:
class MPTTMeta:
pass
initial_options = frozenset(dir(MPTTMeta))
# extend MPTTMeta from base classes
for base in bases:
if hasattr(base, '_mptt_meta'):
for name, value in base._mptt_meta:
if name == 'tree_manager_attr':
continue
if name not in initial_options:
setattr(MPTTMeta, name, value)
class_dict['_mptt_meta'] = MPTTOptions(MPTTMeta)
super_new = super(MPTTModelBase, meta).__new__
cls = super_new(meta, class_name, bases, class_dict)
cls = meta.register(cls)
# see error cases in TreeManager.disable_mptt_updates for the reasoning here.
cls._mptt_tracking_base = None
if is_MPTTModel:
bases = [cls]
else:
bases = [base for base in cls.mro() if issubclass(base, MPTTModel)]
for base in bases:
if (not (base._meta.abstract or base._meta.proxy) and
base._tree_manager.tree_model is base):
cls._mptt_tracking_base = base
break
if cls is cls._mptt_tracking_base:
cls._threadlocal = threading.local()
# set on first access (to make threading errors more obvious):
# cls._threadlocal.mptt_delayed_tree_changes = None
return cls
@classmethod
def register(meta, cls, **kwargs):
"""
For the weird cases when you need to add tree-ness to an *existing*
class. For other cases you should subclass MPTTModel instead of calling this.
"""
if not issubclass(cls, models.Model):
raise ValueError(_("register() expects a Django model class argument"))
if not hasattr(cls, '_mptt_meta'):
cls._mptt_meta = MPTTOptions(**kwargs)
abstract = getattr(cls._meta, 'abstract', False)
try:
MPTTModel
except NameError:
# We're defining the base class right now, so don't do anything
# We only want to add this stuff to the subclasses.
# (Otherwise if field names are customized, we'll end up adding two
# copies)
pass
else:
if not issubclass(cls, MPTTModel):
bases = list(cls.__bases__)
# strip out bases that are strict superclasses of MPTTModel.
# (i.e. Model, object)
# this helps linearize the type hierarchy if possible
for i in range(len(bases) - 1, -1, -1):
if issubclass(MPTTModel, bases[i]):
del bases[i]
bases.insert(0, MPTTModel)
cls.__bases__ = tuple(bases)
if _get_tree_model(cls) is cls:
# HACK: _meta.get_field() doesn't work before AppCache.ready in Django>=1.8
# ( see https://code.djangoproject.com/ticket/24231 )
# So the only way to get existing fields is using local_fields on all superclasses.
existing_field_names = set()
for base in cls.mro():
if hasattr(base, '_meta'):
existing_field_names.update([f.name for f in base._meta.local_fields])
for key in ('left_attr', 'right_attr', 'tree_id_attr', 'level_attr'):
field_name = getattr(cls._mptt_meta, key)
if field_name not in existing_field_names:
field = models.PositiveIntegerField(db_index=True, editable=False)
field.contribute_to_class(cls, field_name)
# Add a tree manager, if there isn't one already
if not abstract:
# make sure we have a tree manager somewhere
tree_manager = None
if hasattr(cls._meta, 'concrete_managers'): # Django < 1.10
cls_managers = cls._meta.concrete_managers + cls._meta.abstract_managers
cls_managers = [r[2] for r in cls_managers]
else:
cls_managers = cls._meta.managers
for cls_manager in cls_managers:
if isinstance(cls_manager, TreeManager):
# prefer any locally defined manager (i.e. keep going if not local)
if cls_manager.model is cls:
tree_manager = cls_manager
break
if tree_manager and tree_manager.model is not cls:
tree_manager = tree_manager._copy_to_model(cls)
elif tree_manager is None:
tree_manager = TreeManager()
tree_manager.contribute_to_class(cls, '_tree_manager')
# avoid using ManagerDescriptor, so instances can refer to self._tree_manager
setattr(cls, '_tree_manager', tree_manager)
return cls
def raise_if_unsaved(func):
@wraps(func)
def _fn(self, *args, **kwargs):
if not self.pk:
raise ValueError(
'Cannot call %(function)s on unsaved %(class)s instances'
% {'function': func.__name__, 'class': self.__class__.__name__}
)
return func(self, *args, **kwargs)
return _fn
class MPTTModel(six.with_metaclass(MPTTModelBase, models.Model)):
"""
Base class for tree models.
"""
class Meta:
abstract = True
objects = TreeManager()
def __init__(self, *args, **kwargs):
super(MPTTModel, self).__init__(*args, **kwargs)
self._mptt_meta.update_mptt_cached_fields(self)
def _mpttfield(self, fieldname):
translated_fieldname = getattr(self._mptt_meta, fieldname + '_attr')
return getattr(self, translated_fieldname)
@_classproperty
def _mptt_updates_enabled(cls):
if not cls._mptt_tracking_base:
return True
return getattr(cls._mptt_tracking_base._threadlocal, 'mptt_updates_enabled', True)
# ideally this'd be part of the _mptt_updates_enabled classproperty, but it seems
# that settable classproperties are very, very hard to do! suggestions please :)
@classmethod
def _set_mptt_updates_enabled(cls, value):
assert cls is cls._mptt_tracking_base,\
"Can't enable or disable mptt updates on a non-tracking class."
cls._threadlocal.mptt_updates_enabled = value
@_classproperty
def _mptt_is_tracking(cls):
if not cls._mptt_tracking_base:
return False
if not hasattr(cls._threadlocal, 'mptt_delayed_tree_changes'):
# happens the first time this is called from each thread
cls._threadlocal.mptt_delayed_tree_changes = None
return cls._threadlocal.mptt_delayed_tree_changes is not None
@classmethod
def _mptt_start_tracking(cls):
assert cls is cls._mptt_tracking_base,\
"Can't start or stop mptt tracking on a non-tracking class."
assert not cls._mptt_is_tracking, "mptt tracking is already started."
cls._threadlocal.mptt_delayed_tree_changes = set()
@classmethod
def _mptt_stop_tracking(cls):
assert cls is cls._mptt_tracking_base,\
"Can't start or stop mptt tracking on a non-tracking class."
assert cls._mptt_is_tracking, "mptt tracking isn't started."
results = cls._threadlocal.mptt_delayed_tree_changes
cls._threadlocal.mptt_delayed_tree_changes = None
return results
@classmethod
def _mptt_track_tree_modified(cls, tree_id):
if not cls._mptt_is_tracking:
return
cls._threadlocal.mptt_delayed_tree_changes.add(tree_id)
@classmethod
def _mptt_track_tree_insertions(cls, tree_id, num_inserted):
if not cls._mptt_is_tracking:
return
changes = cls._threadlocal.mptt_delayed_tree_changes
if not num_inserted or not changes:
return
if num_inserted < 0:
deleted = range(tree_id + num_inserted, -num_inserted)
changes.difference_update(deleted)
new_changes = set(
(t + num_inserted if t >= tree_id else t) for t in changes)
cls._threadlocal.mptt_delayed_tree_changes = new_changes
@raise_if_unsaved
def get_ancestors(self, ascending=False, include_self=False):
"""
Creates a ``QuerySet`` containing the ancestors of this model
instance.
This defaults to being in descending order (root ancestor first,
immediate parent last); passing ``True`` for the ``ascending``
argument will reverse the ordering (immediate parent first, root
ancestor last).
If ``include_self`` is ``True``, the ``QuerySet`` will also
include this model instance.
"""
if self.is_root_node():
if not include_self:
return self._tree_manager.none()
else:
# Filter on pk for efficiency.
qs = self._tree_manager.filter(pk=self.pk)
else:
opts = self._mptt_meta
order_by = opts.left_attr
if ascending:
order_by = '-' + order_by
left = getattr(self, opts.left_attr)
right = getattr(self, opts.right_attr)
if not include_self:
left -= 1
right += 1
qs = self._tree_manager._mptt_filter(
left__lte=left,
right__gte=right,
tree_id=self._mpttfield('tree_id'),
)
qs = qs.order_by(order_by)
if hasattr(self, '_mptt_use_cached_ancestors'):
# Called during or after a `recursetree` tag.
# There should be cached parents up to level 0.
# So we can use them to avoid doing a query at all.
ancestors = []
p = self
if not include_self:
p = getattr(p, opts.parent_attr)
while p is not None:
ancestors.append(p)
p = getattr(p, opts.parent_attr)
ancestors.reverse()
qs._result_cache = ancestors
return qs
@raise_if_unsaved
def get_family(self):
"""
Returns a ``QuerySet`` containing the ancestors, the model itself
and the descendants, in tree order.
"""
opts = self._mptt_meta
left = getattr(self, opts.left_attr)
right = getattr(self, opts.right_attr)
ancestors = Q(**{
"%s__lte" % opts.left_attr: left,
"%s__gte" % opts.right_attr: right,
opts.tree_id_attr: self._mpttfield('tree_id'),
})
descendants = Q(**{
"%s__gte" % opts.left_attr: left,
"%s__lte" % opts.left_attr: right,
opts.tree_id_attr: self._mpttfield('tree_id'),
})
return self._tree_manager.filter(ancestors | descendants)
@raise_if_unsaved
def get_children(self):
"""
Returns a ``QuerySet`` containing the immediate children of this
model instance, in tree order.
The benefit of using this method over the reverse relation
provided by the ORM to the instance's children is that a
database query can be avoided in the case where the instance is
a leaf node (it has no children).
If called from a template where the tree has been walked by the
``cache_tree_children`` filter, no database query is required.
"""
if hasattr(self, '_cached_children'):
qs = self._tree_manager.filter(pk__in=[n.pk for n in self._cached_children])
qs._result_cache = self._cached_children
return qs
else:
if self.is_leaf_node():
return self._tree_manager.none()
return self._tree_manager._mptt_filter(parent=self)
@raise_if_unsaved
def get_descendants(self, include_self=False):
"""
Creates a ``QuerySet`` containing descendants of this model
instance, in tree order.
If ``include_self`` is ``True``, the ``QuerySet`` will also
include this model instance.
"""
if self.is_leaf_node():
if not include_self:
return self._tree_manager.none()
else:
return self._tree_manager.filter(pk=self.pk)
opts = self._mptt_meta
left = getattr(self, opts.left_attr)
right = getattr(self, opts.right_attr)
if not include_self:
left += 1
right -= 1
return self._tree_manager._mptt_filter(
tree_id=self._mpttfield('tree_id'),
left__gte=left,
left__lte=right
)
def get_descendant_count(self):
"""
Returns the number of descendants this model instance has.
"""
if self._mpttfield('right') is None:
# node not saved yet
return 0
else:
return (self._mpttfield('right') - self._mpttfield('left') - 1) // 2
@raise_if_unsaved
def get_leafnodes(self, include_self=False):
"""
Creates a ``QuerySet`` containing leafnodes of this model
instance, in tree order.
If ``include_self`` is ``True``, the ``QuerySet`` will also
include this model instance (if it is a leaf node)
"""
descendants = self.get_descendants(include_self=include_self)
return self._tree_manager._mptt_filter(
descendants,
left=(models.F(self._mptt_meta.right_attr) - 1)
)
@raise_if_unsaved
def get_next_sibling(self, *filter_args, **filter_kwargs):
"""
Returns this model instance's next sibling in the tree, or
``None`` if it doesn't have a next sibling.
"""
qs = self._tree_manager.filter(*filter_args, **filter_kwargs)
if self.is_root_node():
qs = self._tree_manager._mptt_filter(
qs,
parent=None,
tree_id__gt=self._mpttfield('tree_id'),
)
else:
qs = self._tree_manager._mptt_filter(
qs,
parent__pk=getattr(self, self._mptt_meta.parent_attr + '_id'),
left__gt=self._mpttfield('right'),
)
siblings = qs[:1]
return siblings and siblings[0] or None
@raise_if_unsaved
def get_previous_sibling(self, *filter_args, **filter_kwargs):
"""
Returns this model instance's previous sibling in the tree, or
``None`` if it doesn't have a previous sibling.
"""
opts = self._mptt_meta
qs = self._tree_manager.filter(*filter_args, **filter_kwargs)
if self.is_root_node():
qs = self._tree_manager._mptt_filter(
qs,
parent=None,
tree_id__lt=self._mpttfield('tree_id'),
)
qs = qs.order_by('-' + opts.tree_id_attr)
else:
qs = self._tree_manager._mptt_filter(
qs,
parent__pk=getattr(self, opts.parent_attr + '_id'),
right__lt=self._mpttfield('left'),
)
qs = qs.order_by('-' + opts.right_attr)
siblings = qs[:1]
return siblings and siblings[0] or None
@raise_if_unsaved
def get_root(self):
"""
Returns the root node of this model instance's tree.
"""
if self.is_root_node() and type(self) == self._tree_manager.tree_model:
return self
return self._tree_manager._mptt_filter(
tree_id=self._mpttfield('tree_id'),
parent=None,
).get()
@raise_if_unsaved
def get_siblings(self, include_self=False):
"""
Creates a ``QuerySet`` containing siblings of this model
instance. Root nodes are considered to be siblings of other root
nodes.
If ``include_self`` is ``True``, the ``QuerySet`` will also
include this model instance.
"""
if self.is_root_node():
queryset = self._tree_manager._mptt_filter(parent=None)
else:
parent_id = getattr(self, self._mptt_meta.parent_attr + '_id')
queryset = self._tree_manager._mptt_filter(parent__pk=parent_id)
if not include_self:
queryset = queryset.exclude(pk=self.pk)
return queryset
def get_level(self):
"""
Returns the level of this node (distance from root)
"""
return getattr(self, self._mptt_meta.level_attr)
def insert_at(self, target, position='first-child', save=False,
allow_existing_pk=False, refresh_target=True):
"""
Convenience method for calling ``TreeManager.insert_node`` with this
model instance.
"""
self._tree_manager.insert_node(
self, target, position, save, allow_existing_pk=allow_existing_pk,
refresh_target=refresh_target)
def is_child_node(self):
"""
Returns ``True`` if this model instance is a child node, ``False``
otherwise.
"""
return not self.is_root_node()
def is_leaf_node(self):
"""
Returns ``True`` if this model instance is a leaf node (it has no
children), ``False`` otherwise.
"""
return not self.get_descendant_count()
def is_root_node(self):
"""
Returns ``True`` if this model instance is a root node,
``False`` otherwise.
"""
return getattr(self, self._mptt_meta.parent_attr + '_id') is None
@raise_if_unsaved
def is_descendant_of(self, other, include_self=False):
"""
Returns ``True`` if this model is a descendant of the given node,
``False`` otherwise.
If include_self is True, also returns True if the two nodes are the same node.
"""
opts = self._mptt_meta
if include_self and other.pk == self.pk:
return True
if getattr(self, opts.tree_id_attr) != getattr(other, opts.tree_id_attr):
return False
else:
left = getattr(self, opts.left_attr)
right = getattr(self, opts.right_attr)
return (
left > getattr(other, opts.left_attr) and
right < getattr(other, opts.right_attr))
@raise_if_unsaved
def is_ancestor_of(self, other, include_self=False):
"""
Returns ``True`` if this model is an ancestor of the given node,
``False`` otherwise.
If include_self is True, also returns True if the two nodes are the same node.
"""
if include_self and other.pk == self.pk:
return True
return other.is_descendant_of(self)
def move_to(self, target, position='first-child'):
"""
Convenience method for calling ``TreeManager.move_node`` with this
model instance.
NOTE: This is a low-level method; it does NOT respect ``MPTTMeta.order_insertion_by``.
In most cases you should just move the node yourself by setting node.parent.
"""
self._tree_manager.move_node(self, target, position)
def _is_saved(self, using=None):
if not self.pk or self._mpttfield('tree_id') is None:
return False
opts = self._meta
if opts.pk.rel is None:
return True
else:
if not hasattr(self, '_mptt_saved'):
manager = self.__class__._base_manager
manager = manager.using(using)
self._mptt_saved = manager.filter(pk=self.pk).exists()
return self._mptt_saved
def _get_user_field_names(self):
""" Returns the list of user defined (i.e. non-mptt internal) field names. """
from django.db.models.fields import AutoField
field_names = []
internal_fields = (
self._mptt_meta.left_attr, self._mptt_meta.right_attr, self._mptt_meta.tree_id_attr,
self._mptt_meta.level_attr, self._mptt_meta.parent_attr)
for field in self._meta.fields:
if (field.name not in internal_fields) and (not isinstance(field, AutoField)) and (not field.primary_key): # noqa
field_names.append(field.name)
return field_names
def save(self, *args, **kwargs):
"""
If this is a new node, sets tree fields up before it is inserted
into the database, making room in the tree structure as neccessary,
defaulting to making the new node the last child of its parent.
It the node's left and right edge indicators already been set, we
take this as indication that the node has already been set up for
insertion, so its tree fields are left untouched.
If this is an existing node and its parent has been changed,
performs reparenting in the tree structure, defaulting to making the
node the last child of its new parent.
In either case, if the node's class has its ``order_insertion_by``
tree option set, the node will be inserted or moved to the
appropriate position to maintain ordering by the specified field.
"""
do_updates = self.__class__._mptt_updates_enabled
track_updates = self.__class__._mptt_is_tracking
opts = self._mptt_meta
if not (do_updates or track_updates):
# inside manager.disable_mptt_updates(), don't do any updates.
# unless we're also inside TreeManager.delay_mptt_updates()
if self._mpttfield('left') is None:
# we need to set *some* values, though don't care too much what.
parent = getattr(self, '_%s_cache' % opts.parent_attr, None)
# if we have a cached parent, have a stab at getting
# possibly-correct values. otherwise, meh.
if parent:
left = parent._mpttfield('left') + 1
setattr(self, opts.left_attr, left)
setattr(self, opts.right_attr, left + 1)
setattr(self, opts.level_attr, parent._mpttfield('level') + 1)
setattr(self, opts.tree_id_attr, parent._mpttfield('tree_id'))
self._tree_manager._post_insert_update_cached_parent_right(parent, 2)
else:
setattr(self, opts.left_attr, 1)
setattr(self, opts.right_attr, 2)
setattr(self, opts.level_attr, 0)
setattr(self, opts.tree_id_attr, 0)
return super(MPTTModel, self).save(*args, **kwargs)
parent_id = opts.get_raw_field_value(self, opts.parent_attr)
# determine whether this instance is already in the db
force_update = kwargs.get('force_update', False)
force_insert = kwargs.get('force_insert', False)
collapse_old_tree = None
deferred_fields = self.get_deferred_fields()
if force_update or (not force_insert and self._is_saved(using=kwargs.get('using'))):
# it already exists, so do a move
old_parent_id = self._mptt_cached_fields[opts.parent_attr]
if old_parent_id is DeferredAttribute:
same_order = True
else:
same_order = old_parent_id == parent_id
if same_order and len(self._mptt_cached_fields) > 1:
for field_name, old_value in self._mptt_cached_fields.items():
if old_value is DeferredAttribute and field_name not in deferred_fields:
same_order = False
break
if old_value != opts.get_raw_field_value(self, field_name):
same_order = False
break
if not do_updates and not same_order:
same_order = True
self.__class__._mptt_track_tree_modified(self._mpttfield('tree_id'))
elif (not do_updates) and not same_order and old_parent_id is None:
# the old tree no longer exists, so we need to collapse it.
collapse_old_tree = self._mpttfield('tree_id')
parent = getattr(self, opts.parent_attr)
tree_id = parent._mpttfield('tree_id')
left = parent._mpttfield('left') + 1
self.__class__._mptt_track_tree_modified(tree_id)
setattr(self, opts.tree_id_attr, tree_id)
setattr(self, opts.left_attr, left)
setattr(self, opts.right_attr, left + 1)
setattr(self, opts.level_attr, parent._mpttfield('level') + 1)
same_order = True
if not same_order:
opts.set_raw_field_value(self, opts.parent_attr, old_parent_id)
try:
right_sibling = None
if opts.order_insertion_by:
right_sibling = opts.get_ordered_insertion_target(
self, getattr(self, opts.parent_attr))
if parent_id is not None:
parent = getattr(self, opts.parent_attr)
# If we aren't already a descendant of the new parent,
# we need to update the parent.rght so things like
# get_children and get_descendant_count work correctly.
#
# parent might be None if parent_id was assigned
# directly -- then we certainly do not have to update
# the cached parent.
update_cached_parent = parent and (
getattr(self, opts.tree_id_attr) != getattr(parent, opts.tree_id_attr) or # noqa
getattr(self, opts.left_attr) < getattr(parent, opts.left_attr) or
getattr(self, opts.right_attr) > getattr(parent, opts.right_attr))
if right_sibling:
self._tree_manager._move_node(
self, right_sibling, 'left', save=False,
refresh_target=False)
else:
# Default movement
if parent_id is None:
root_nodes = self._tree_manager.root_nodes()
try:
rightmost_sibling = root_nodes.exclude(
pk=self.pk).order_by('-' + opts.tree_id_attr)[0]
self._tree_manager._move_node(
self, rightmost_sibling, 'right', save=False,
refresh_target=False)
except IndexError:
pass
else:
self._tree_manager._move_node(
self, parent, 'last-child', save=False)
if parent_id is not None and update_cached_parent:
# Update rght of cached parent
right_shift = 2 * (self.get_descendant_count() + 1)
self._tree_manager._post_insert_update_cached_parent_right(
parent, right_shift)
finally:
# Make sure the new parent is always
# restored on the way out in case of errors.
opts.set_raw_field_value(self, opts.parent_attr, parent_id)
# If there were no exceptions raised then send a moved signal
node_moved.send(sender=self.__class__, instance=self,
target=getattr(self, opts.parent_attr))
else:
opts.set_raw_field_value(self, opts.parent_attr, parent_id)
if not track_updates:
# When not using delayed/disabled updates,
# populate update_fields with user defined model fields.
# This helps preserve tree integrity when saving model on top
# of a modified tree.
if len(args) > 3:
if not args[3]:
args = list(args)
args[3] = self._get_user_field_names()
args = tuple(args)
else:
if not kwargs.get("update_fields", None):
kwargs["update_fields"] = self._get_user_field_names()
else:
# new node, do an insert
if (getattr(self, opts.left_attr) and getattr(self, opts.right_attr)):
# This node has already been set up for insertion.
pass
else:
parent = getattr(self, opts.parent_attr)
right_sibling = None
# if we're inside delay_mptt_updates, don't do queries to find
# sibling position. instead, do default insertion. correct
# positions will be found during partial rebuild later.
# *unless* this is a root node. (as update tracking doesn't
# handle re-ordering of trees.)
if do_updates or parent is None:
if opts.order_insertion_by:
right_sibling = opts.get_ordered_insertion_target(self, parent)
if right_sibling:
self.insert_at(right_sibling, 'left', allow_existing_pk=True,
refresh_target=False)
if parent:
# since we didn't insert into parent, we have to update parent.rght
# here instead of in TreeManager.insert_node()
right_shift = 2 * (self.get_descendant_count() + 1)
self._tree_manager._post_insert_update_cached_parent_right(
parent, right_shift)
else:
# Default insertion
self.insert_at(parent, position='last-child', allow_existing_pk=True)
try:
super(MPTTModel, self).save(*args, **kwargs)
finally:
if collapse_old_tree is not None:
self._tree_manager._create_tree_space(collapse_old_tree, -1)
self._mptt_saved = True
opts.update_mptt_cached_fields(self)
save.alters_data = True
def delete(self, *args, **kwargs):
"""Calling ``delete`` on a node will delete it as well as its full
subtree, as opposed to reattaching all the subnodes to its parent node.
There are no argument specific to a MPTT model, all the arguments will
be passed directly to the django's ``Model.delete``.
``delete`` will not return anything. """
tree_width = (self._mpttfield('right') -
self._mpttfield('left') + 1)
target_right = self._mpttfield('right')
tree_id = self._mpttfield('tree_id')
self._tree_manager._close_gap(tree_width, target_right, tree_id)
parent = getattr(self, '_%s_cache' % self._mptt_meta.parent_attr, None)
if parent:
right_shift = -self.get_descendant_count() - 2
self._tree_manager._post_insert_update_cached_parent_right(parent, right_shift)
super(MPTTModel, self).delete(*args, **kwargs)
delete.alters_data = True
def _mptt_refresh(self):
if not self.pk:
return
manager = type(self)._tree_manager
opts = self._mptt_meta
values = manager.filter(pk=self.pk).values(
opts.left_attr,
opts.right_attr,
opts.level_attr,
opts.tree_id_attr,
)[0]
for k, v in values.items():
setattr(self, k, v)
|
|
#title :image_class.py
#description :This code will batch classify multiple images using a .csv file containing tags using a Convolustional Neural Network.
#author :Harry F Bullough
#date :10/05/2017
#version :3.1
#usage :python image_class.py /my/image/directory/
#notes :
#python_version :2.7
#=============================================================================
##################################################################
######################## Import Libraries ########################
##################################################################
import numpy as np
import tensorflow as tf
import cv2
import csv
import sys
import time
import os
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
##################################################################
##################### Define Global Variables ####################
##################################################################
global imagePath_dir
global imageNumber
global modelFullPath
global labelsFullPath
global imagePath
##################################################################
######################## Handler Function ########################
##################################################################
class MyHandler(PatternMatchingEventHandler):
#Setup image tags as target
patterns = ["img_tags.csv"]
#Function run upon event trigger
def process(self, event):
"""
event.event_type
'modified' | 'created' | 'moved' | 'deleted'
event.is_directory
True | False
event.src_path
path/to/observed/file
"""
print(".csv has been changed\n");
#Start image processing function
process_img()
#Setup event triggers to run process
def on_modified(self, event): #Trigger event on file modification
self.process(event)
def on_created(self, event): #Trigger event on file creation
self.process(event)
def on_moved(self, event): #Trigger event on file creation
self.process(event)
def on_deleted(self, event): #Trigger event on file creation
self.process(event)
##################################################################
#################### Image Processing Function ###################
##################################################################
def process_img():
#Setup image paths
global imagePath
global imagePath_dir
global imageNumber
imagePath_dir= '/home/ec2-user/src/tensorflow/input/img_'
imageNumber = '0'
#Open image tags CSV file
ifile = open('/home/ec2-user/src/tensorflow/input/img_tags.csv',"rb")
img_tags = csv.reader(ifile)
#Define CSV header for new files
header = ['x','y','img','class']
#Initialise CSV creation counters
t_num = 0
ben_num = 0
bin_num = 0
u_num = 0
s_num = 0
#Create a graph from the training
create_graph()
#Loop for every entry in tags CSV
for row in img_tags:
#Don't run for header line
if row[0] != 'x':
#Read current image number and concatinate to obtain new image path
imageNumber = row[2]
imagePath = imagePath_dir + imageNumber + '.jpg'
#Run slope detection function
if avg_bands() == 'false':
#If not a slope, run tensorflow classification & update classifier with result
classifier = run_inference_on_image()
else:
#Set class to slope if slope detected
classifier = 'slope'
#Add classifier to CSV column in current row
row.append(classifier)
if classifier == 'tree':
#Only create CSV on first pass through loop
if t_num == 0:
#Create CSV
tree_file = open('/home/ec2-user/src/tensorflow/output/trees.csv',"wb")
tree = csv.writer(tree_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
#Write Header to CSV
tree.writerow(header)
#Increment counter so that CSV is only created once
t_num += 1
#write current row to file
tree.writerow(row)
elif classifier == 'bench':
if ben_num == 0:
bench_file = open('/home/ec2-user/src/tensorflow/output/benches.csv',"wb")
bench = csv.writer(bench_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
bench.writerow(header)
ben_num += 1
bench.writerow(row)
elif classifier == 'bin':
if bin_num == 0:
bin_file = open('/home/ec2-user/src/tensorflow/output/bins.csv',"wb")
bins = csv.writer(bin_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
bins.writerow(header)
bin_num += 1
bins.writerow(row)
elif classifier == 'undefined':
if u_num == 0:
undefined_file = open('/home/ec2-user/src/tensorflow/output/undefined.csv',"wb")
undefined = csv.writer(undefined_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
undefined.writerow(header)
u_num += 1
undefined.writerow(row)
elif classifier == 'slope':
if s_num == 0:
slope_file = open('/home/ec2-user/src/tensorflow/output/slope.csv',"wb")
slope = csv.writer(slope_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_NONE)
slope.writerow(header)
s_num += 1
slope.writerow(row)
#Close CSV files and send over scp
ifile.close()
if t_num > 0:
tree_file.close()
os.system('sudo scp -i /home/ec2-user/src/NewMyPi /home/ec2-user/src/tensorflow/output/trees.csv [email protected]:/MapFiles/trees.csv')
if ben_num > 0:
bench_file.close()
os.system("sudo scp -i /home/ec2-user/src/NewMyPi /home/ec2-user/src/tensorflow/output/benches.csv [email protected]:/MapFiles/benches.csv")
if bin_num > 0:
bin_file.close()
os.system("sudo scp -i /home/ec2-user/src/NewMyPi /home/ec2-user/src/tensorflow/output/bins.csv [email protected]:/MapFiles/bins.csv")
if u_num > 0:
undefined_file.close()
os.system("sudo scp -i /home/ec2-user/src/NewMyPi /home/ec2-user/src/tensorflow/output/undefined.csv [email protected]:/MapFiles/undefined.csv")
if s_num > 0:
slope_file.close()
os.system("sudo scp -i /home/ec2-user/src/NewMyPi /home/ec2-user/src/tensorflow/output/slope.csv [email protected]:/MapFiles/slope.csv")
##################################################################
#################### Slope Detection Function ####################
##################################################################
def avg_bands():
#import current image
img = cv2.imread(imagePath,1)
#Find image size
rows = img.shape[0]
col = img.shape[1]
#Section into 6 layers
mask_sz = rows / 6
mask_top = 0
mask_1 = mask_sz - 1
mask_2 = mask_1 + mask_sz - 1
mask_3 = mask_2 + mask_sz - 1
mask_4 = mask_3 + mask_sz - 1
mask_5 = mask_4 + mask_sz - 1
mask_bot = rows - 1
#Crop image into layers
layer_1 = np.zeros((mask_sz,col))
layer_1 = img[mask_top:mask_1,:]
layer_2 = np.zeros((mask_sz,col))
layer_2 = img[mask_1:mask_2,:]
layer_3 = np.zeros((mask_sz,col))
layer_3 = img[mask_2:mask_3,:]
layer_4= np.zeros((mask_sz,col))
layer_4 = img[mask_3:mask_4,:]
layer_5 = np.zeros((mask_sz,col))
layer_5 = img[mask_4:mask_5,:]
layer_6 = np.zeros((mask_sz,col))
layer_6 = img[mask_5:mask_bot,:]
#Calculate average colour values
layer1_b,layer1_g,layer1_r = cv2.split(layer_1)
layer2_b,layer2_g,layer2_r = cv2.split(layer_2)
layer3_b,layer3_g,layer3_r = cv2.split(layer_3)
layer4_b,layer4_g,layer4_r = cv2.split(layer_4)
layer5_b,layer5_g,layer5_r = cv2.split(layer_5)
layer6_b,layer6_g,layer6_r = cv2.split(layer_6)
layer1_b_val = cv2.mean(layer1_b)[0]
layer1_g_val = cv2.mean(layer1_g)[0]
layer1_r_val = cv2.mean(layer1_r)[0]
layer2_b_val = cv2.mean(layer2_b)[0]
layer2_g_val = cv2.mean(layer2_g)[0]
layer2_r_val = cv2.mean(layer2_r)[0]
layer3_b_val = cv2.mean(layer3_b)[0]
layer3_g_val = cv2.mean(layer3_g)[0]
layer3_r_val = cv2.mean(layer3_r)[0]
layer4_b_val = cv2.mean(layer4_b)[0]
layer4_g_val = cv2.mean(layer4_g)[0]
layer4_r_val = cv2.mean(layer4_r)[0]
layer5_b_val = cv2.mean(layer5_b)[0]
layer5_g_val = cv2.mean(layer5_g)[0]
layer5_r_val = cv2.mean(layer5_r)[0]
layer6_b_val = cv2.mean(layer6_b)[0]
layer6_g_val = cv2.mean(layer6_g)[0]
layer6_r_val = cv2.mean(layer6_r)[0]
layer1_b_val = round(layer1_b_val)
layer1_g_val = round(layer1_g_val)
layer1_r_val = round(layer1_r_val)
layer2_b_val = round(layer2_b_val)
layer2_g_val = round(layer2_g_val)
layer2_r_val = round(layer2_r_val)
layer3_b_val = round(layer3_b_val)
layer3_g_val = round(layer3_g_val)
layer3_r_val = round(layer3_r_val)
layer4_b_val = round(layer4_b_val)
layer4_g_val = round(layer4_g_val)
layer4_r_val = round(layer4_r_val)
layer5_b_val = round(layer5_b_val)
layer5_g_val = round(layer5_g_val)
layer5_r_val = round(layer5_r_val)
layer6_b_val = round(layer6_b_val)
layer6_g_val = round(layer6_g_val)
layer6_r_val = round(layer6_r_val)
#Initialise terrain variables
layer1_terrain = ""
layer2_terrain = ""
layer3_terrain = ""
layer4_terrain = ""
layer5_terrain = ""
layer6_terrain = ""
#Define which bands are green
if layer1_g_val > layer1_b_val and layer1_g_val > layer1_r_val:
layer1_terrain = "green"
else:
layer1_terrain = "other"
if layer2_g_val > layer2_b_val and layer2_g_val > layer2_r_val:
layer2_terrain = "green"
else:
layer2_terrain = "other"
if layer3_g_val > layer3_b_val and layer3_g_val > layer3_r_val:
layer3_terrain = "green"
else:
layer3_terrain = "other"
if layer4_g_val > layer4_b_val and layer4_g_val > layer4_r_val:
layer4_terrain = "green"
else:
layer4_terrain = "other"
if layer5_g_val > layer5_b_val and layer5_g_val > layer5_r_val:
layer5_terrain = "green"
else:
layer5_terrain = "other"
if layer6_g_val > layer6_b_val and layer6_g_val > layer6_r_val:
layer6_terrain = "green"
else:
layer6_terrain = "other"
#Set as true if all bands are green or all but the top bands are green
if layer2_terrain == "green" and layer3_terrain == "green" and layer4_terrain == "green" and layer5_terrain == "green" and layer6_terrain == "green":
return "true"
if layer1_terrain == "green" and layer2_terrain == "green" and layer3_terrain == "green" and layer4_terrain == "green" and layer5_terrain == "green" and layer6_terrain == "green":
return "true"
else:
return "false"
##################################################################
################## Create Graph From Training File ###############
##################################################################
def create_graph():
global modelFullPath
modelFullPath = '/home/ec2-user/src/tensorflow/output/output_graph.pb'
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(modelFullPath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
##################################################################
####################### Run Tensorflow CNN #######################
##################################################################
def run_inference_on_image():
#Intialise variables and define paths
global imagePath
global labelsFullPath
labelsFullPath = '/home/ec2-user/src/tensorflow/output/output_labels.txt'
answer = None
#Check to see if output labels file exists
if not tf.gfile.Exists(imagePath):
tf.logging.fatal('File does not exist %s', imagePath)
return answer
#Load current image
image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
#Set to not use CUDA GPU tool
config = tf.ConfigProto(
device_count = {'GPU': 0}
)
#Start TF session
with tf.Session(config=config) as sess:
#Run classification
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
#Remove 1D entry of shape of predictions
predictions = np.squeeze(predictions)
#Sort predictions and return top 5
top_k = predictions.argsort()[-5:][::-1]
f = open(labelsFullPath, 'rb')
lines = f.readlines()
labels = [str(w).replace("\n", "") for w in lines]
score_last = 0;
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
#Find the highest score value in list
if score > score_last:
high_str = human_string
high_sc = score
score_last = score
#Return highest scoring label
answer = labels[top_k[0]]
#If high score is too low, set to undefined
if high_sc < 0.5:
answer = 'undefined'
print('img_%s: %s (score = %.5f)' % (imageNumber, answer, high_sc))
return answer
##################################################################
############################ Main loop ###########################
##################################################################
if __name__ == '__main__':
#Setup observer & assign handler
args = sys.argv[1:]
observer = Observer()
observer.schedule(MyHandler(), path=args[0] if args else '.')
observer.start()
try:
#Sleep until interrupt
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
|
import time
from coloredcoinlib.store import PersistentDictStore
from asset import AdditiveAssetValue, AssetTarget
from txcons import RawTxSpec
from ngcccbase.address import AddressRecord
def asset_value_to_data(av):
return (av.get_asset().get_id(),
av.get_value())
class TxHistoryEntry(object):
def __init__(self, model, data):
self.txhash = data['txhash']
self.txtime = data['txtime']
self.txtype = data['txtype']
self.data = data
self.model = model
@classmethod
def from_data(cls, model, data):
txtype = data['txtype']
if txtype == 'send':
return TxHistoryEntry_Send(model, data)
elif txtype == 'receive':
return TxHistoryEntry_Receive(model, data)
elif txtype == 'trade':
return TxHistoryEntry_Trade(model, data)
if txtype == 'complex':
return TxHistoryEntry_Complex(model, data)
else:
return TxHistoryEntry(model, data)
class TxHistoryEntry_Send(TxHistoryEntry):
def __init__(self, model, data):
super(TxHistoryEntry_Send, self).__init__(model, data)
self.asset_id = data['asset_id']
self.targets = data['targets']
def get_asset(self):
adm = self.model.get_asset_definition_manager()
return adm.get_asset_by_id(self.asset_id)
def get_fee_asset_target(self):
adm = self.model.get_asset_definition_manager()
asset = adm.get_asset_by_moniker("bitcoin")
fee = self.model.get_blockchain_state().get_tx(self.txhash).get_fee()
asset_value = AdditiveAssetValue(asset=asset, value=fee)
return AssetTarget(None, asset_value)
def get_targets(self):
asset = self.get_asset()
asset_targets = []
for (tgt_addr, tgt_value) in self.targets:
asset_value = AdditiveAssetValue(asset=asset, value=tgt_value)
asset_targets.append(AssetTarget(tgt_addr, asset_value))
try:
asset_targets.append(self.get_fee_asset_target())
except:
pass
return asset_targets
class TxHistoryEntry_Complex(TxHistoryEntry):
def __init__(self, model, data):
super(TxHistoryEntry_Complex, self).__init__(model, data)
self.data = data
def get_deltas(self):
adm = self.model.get_asset_definition_manager()
deltas = []
for assetid, value in self.data['deltas'].items():
deltas.append(adm.get_assetvalue_for_assetid_value(assetid, value))
return deltas
def get_addresses(self):
return ", ".join(self.data['addresses'])
class TxHistoryEntry_Receive(TxHistoryEntry):
def __init__(self, model, data):
super(TxHistoryEntry_Receive, self).__init__(model, data)
self.out_idxs = data['out_idxs']
def get_targets(self):
targets = []
coindb = self.model.get_coin_manager()
adm = self.model.get_asset_definition_manager()
for out_idx in self.out_idxs:
coin = coindb.find_coin(self.txhash, out_idx)
colorvalues = coin.get_colorvalues()
if not colorvalues:
continue
assert len(colorvalues) == 1
asset_value = adm.get_assetvalue_for_colorvalue(
colorvalues[0])
targets.append(AssetTarget(coin.address,
asset_value))
return targets
class TxHistoryEntry_Trade(TxHistoryEntry):
def __init__(self, model, data):
TxHistoryEntry.__init__(self, model, data)
self.in_values = data['in_values']
self.out_values = data['out_values']
def get_values(self, values):
adm = self.model.get_asset_definition_manager()
avalues = []
for asset_id, value in values:
asset = adm.get_asset_by_id(asset_id)
avalues.append(AdditiveAssetValue(asset=asset,
value=value))
return avalues
def get_in_values(self):
return self.get_values(self.in_values)
def get_out_values(self):
return self.get_values(self.out_values)
class TxHistory(object):
def __init__(self, model):
self.model = model
self.entries = PersistentDictStore(
self.model.store_conn.conn, "txhistory")
def decode_entry(self, entry_data):
return TxHistoryEntry.from_data(self.model, entry_data)
def get_entry(self, txhash):
entry = self.entries.get(txhash)
if entry:
return self.decode_entry(entry)
else:
return None
def get_all_entries(self):
return sorted([self.decode_entry(e)
for e in self.entries.values()],
key=lambda txe: txe.txtime)
def populate_history(self):
txdb = self.model.get_tx_db()
for txhash in txdb.get_all_tx_hashes():
if (txhash not in self.entries or # new transaction
not self.entries[txhash]['txtime']): # update unconfirmed
tx_data = txdb.get_tx_by_hash(txhash)['data']
raw_tx = RawTxSpec.from_tx_data(self.model,
tx_data.decode('hex'))
self.add_entry_from_tx(raw_tx)
def get_tx_timestamp(self, txhash): # TODO move to suitable file
txtime = 0
bs = self.model.get_blockchain_state()
blockhash, x = bs.get_tx_blockhash(txhash)
if blockhash:
height = bs.get_block_height(blockhash)
if height:
header = bs.get_header(height)
txtime = header.get('timestamp', txtime)
return txtime
def is_receive_entry(self, raw_tx, spent_coins, received_coins):
return not spent_coins and received_coins
def create_receive_entry(self, raw_tx, received_coins):
txhash = raw_tx.get_hex_txhash()
txtime = self.get_tx_timestamp(txhash)
out_idxs = [coin.outindex for coin in received_coins]
self.entries[txhash] = {"txhash": txhash,
"txtype": 'receive',
"txtime": txtime,
"out_idxs": out_idxs}
def add_trade_entry(self, txhash, in_colorvalue, out_colorvalue):
adm = self.model.get_asset_definition_manager()
in_assetvalue = adm.get_assetvalue_for_colorvalue(in_colorvalue)
out_assetvalue = adm.get_assetvalue_for_colorvalue(out_colorvalue)
txtime = self.get_tx_timestamp(txhash)
self.entries[txhash] = {"txhash": txhash,
"txtype": 'trade',
"txtime": txtime,
"in_values": [asset_value_to_data(in_assetvalue)],
"out_values": [asset_value_to_data(out_assetvalue)]}
def add_unknown_entry(self, txhash):
txtime = self.get_tx_timestamp(txhash)
self.entries[txhash] = {"txhash": txhash,
"txtype": 'unknown',
"txtime": txtime}
def get_delta_color_values(self, spent_coins, received_coins):
adm = self.model.get_asset_definition_manager()
deltas = {}
for coin in received_coins: # add received
for cv in coin.get_colorvalues():
colorid = cv.get_colordef().get_color_id()
assetid = adm.get_asset_by_color_id(colorid).get_id()
deltas[assetid] = deltas.get(assetid, 0) + cv.get_value()
for coin in spent_coins: # subtract sent
for cv in coin.get_colorvalues():
colorid = cv.get_colordef().get_color_id()
assetid = adm.get_asset_by_color_id(colorid).get_id()
deltas[assetid] = deltas.get(assetid, 0) - cv.get_value()
return dict(deltas)
def create_complex_entry(self, raw_tx, spent_coins, received_coins):
am = self.model.get_address_manager()
txhash = raw_tx.get_hex_txhash()
txtime = self.get_tx_timestamp(txhash)
# get addresses
outputs = raw_tx.composed_tx_spec.txouts
wallet_addrs = set([r.address for r in am.get_all_addresses()])
output_addrs = set([out.target_addr for out in outputs])
send_addrs = list(output_addrs.difference(wallet_addrs))
deltas = self.get_delta_color_values(spent_coins, received_coins)
self.entries[txhash] = {
"txhash": txhash,
"txtype": 'complex',
"txtime": txtime,
"addresses" : send_addrs,
"deltas" : deltas,
}
def is_send_entry(self, raw_tx, spent_coins, received_coins):
am = self.model.get_address_manager()
# only inputs from this wallet
input_addrs = set(raw_tx.get_input_addresses())
wallet_addrs = set([r.address for r in am.get_all_addresses()])
if wallet_addrs.intersection(input_addrs) != input_addrs:
return False # foreign inputs
# only one color + uncolored sent
cvlists = [coin.get_colorvalues() for coin in spent_coins]
cvs = [item for sublist in cvlists for item in sublist] # flatten
cids = set([cv.get_color_id() for cv in cvs])
if len(cids) > 2 or (len(cids) == 2 and 0 not in cids):
return False
return False # FIXME disabled for now
def create_send_entry(self, raw_tx, spent_coins, received_coins):
pass # TODO
def add_send_entry(self, txhash, asset, target_addrs, target_values):
self.entries[txhash] = {"txhash": txhash,
"txtype": 'send',
"txtime": int(time.time()),
"asset_id": asset.get_id(),
"targets": zip(target_addrs, target_values)}
def add_entry_from_tx(self, raw_tx):
coindb = self.model.get_coin_manager()
spent_coins, received_coins = coindb.get_coins_for_transaction(raw_tx)
if (not spent_coins) and (not received_coins):
return # no effect
# receive coins
if self.is_receive_entry(raw_tx, spent_coins, received_coins):
self.create_receive_entry(raw_tx, received_coins)
# send coins
elif self.is_send_entry(raw_tx, spent_coins, received_coins):
self.create_send_entry(raw_tx, spent_coins, received_coins)
else: # default for non obvious
self.create_complex_entry(raw_tx, spent_coins, received_coins)
|
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SignSessionInfoRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'certificate': 'str',
'max_signature_length': 'str',
'return_format': 'str',
'signing_location': 'str'
}
attribute_map = {
'certificate': 'certificate',
'max_signature_length': 'maxSignatureLength',
'return_format': 'returnFormat',
'signing_location': 'signingLocation'
}
def __init__(self, certificate=None, max_signature_length=None, return_format=None, signing_location=None): # noqa: E501
"""SignSessionInfoRequest - a model defined in Swagger""" # noqa: E501
self._certificate = None
self._max_signature_length = None
self._return_format = None
self._signing_location = None
self.discriminator = None
if certificate is not None:
self.certificate = certificate
if max_signature_length is not None:
self.max_signature_length = max_signature_length
if return_format is not None:
self.return_format = return_format
if signing_location is not None:
self.signing_location = signing_location
@property
def certificate(self):
"""Gets the certificate of this SignSessionInfoRequest. # noqa: E501
# noqa: E501
:return: The certificate of this SignSessionInfoRequest. # noqa: E501
:rtype: str
"""
return self._certificate
@certificate.setter
def certificate(self, certificate):
"""Sets the certificate of this SignSessionInfoRequest.
# noqa: E501
:param certificate: The certificate of this SignSessionInfoRequest. # noqa: E501
:type: str
"""
self._certificate = certificate
@property
def max_signature_length(self):
"""Gets the max_signature_length of this SignSessionInfoRequest. # noqa: E501
# noqa: E501
:return: The max_signature_length of this SignSessionInfoRequest. # noqa: E501
:rtype: str
"""
return self._max_signature_length
@max_signature_length.setter
def max_signature_length(self, max_signature_length):
"""Sets the max_signature_length of this SignSessionInfoRequest.
# noqa: E501
:param max_signature_length: The max_signature_length of this SignSessionInfoRequest. # noqa: E501
:type: str
"""
self._max_signature_length = max_signature_length
@property
def return_format(self):
"""Gets the return_format of this SignSessionInfoRequest. # noqa: E501
# noqa: E501
:return: The return_format of this SignSessionInfoRequest. # noqa: E501
:rtype: str
"""
return self._return_format
@return_format.setter
def return_format(self, return_format):
"""Sets the return_format of this SignSessionInfoRequest.
# noqa: E501
:param return_format: The return_format of this SignSessionInfoRequest. # noqa: E501
:type: str
"""
self._return_format = return_format
@property
def signing_location(self):
"""Gets the signing_location of this SignSessionInfoRequest. # noqa: E501
Specifies the physical location where the signing takes place. It can have two enumeration values; InPerson and Online. The default value is Online. # noqa: E501
:return: The signing_location of this SignSessionInfoRequest. # noqa: E501
:rtype: str
"""
return self._signing_location
@signing_location.setter
def signing_location(self, signing_location):
"""Sets the signing_location of this SignSessionInfoRequest.
Specifies the physical location where the signing takes place. It can have two enumeration values; InPerson and Online. The default value is Online. # noqa: E501
:param signing_location: The signing_location of this SignSessionInfoRequest. # noqa: E501
:type: str
"""
self._signing_location = signing_location
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SignSessionInfoRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SignSessionInfoRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
|
from __future__ import absolute_import, division, print_function
import os
import _pytest._code
import py
import pytest
from _pytest.main import Node, Item, FSCollector
from _pytest.resultlog import generic_path, ResultLog, \
pytest_configure, pytest_unconfigure
def test_generic_path(testdir):
from _pytest.main import Session
config = testdir.parseconfig()
session = Session(config)
p1 = Node('a', config=config, session=session)
#assert p1.fspath is None
p2 = Node('B', parent=p1)
p3 = Node('()', parent = p2)
item = Item('c', parent = p3)
res = generic_path(item)
assert res == 'a.B().c'
p0 = FSCollector('proj/test', config=config, session=session)
p1 = FSCollector('proj/test/a', parent=p0)
p2 = Node('B', parent=p1)
p3 = Node('()', parent = p2)
p4 = Node('c', parent=p3)
item = Item('[1]', parent = p4)
res = generic_path(item)
assert res == 'test/a:B().c[1]'
def test_write_log_entry():
reslog = ResultLog(None, None)
reslog.logfile = py.io.TextIO()
reslog.write_log_entry('name', '.', '')
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 1
assert entry_lines[0] == '. name'
reslog.logfile = py.io.TextIO()
reslog.write_log_entry('name', 's', 'Skipped')
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 2
assert entry_lines[0] == 's name'
assert entry_lines[1] == ' Skipped'
reslog.logfile = py.io.TextIO()
reslog.write_log_entry('name', 's', 'Skipped\n')
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 2
assert entry_lines[0] == 's name'
assert entry_lines[1] == ' Skipped'
reslog.logfile = py.io.TextIO()
longrepr = ' tb1\n tb 2\nE tb3\nSome Error'
reslog.write_log_entry('name', 'F', longrepr)
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 5
assert entry_lines[0] == 'F name'
assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()]
class TestWithFunctionIntegration(object):
# XXX (hpk) i think that the resultlog plugin should
# provide a Parser object so that one can remain
# ignorant regarding formatting details.
def getresultlog(self, testdir, arg):
resultlog = testdir.tmpdir.join("resultlog")
testdir.plugins.append("resultlog")
args = ["--resultlog=%s" % resultlog] + [arg]
testdir.runpytest(*args)
return [x for x in resultlog.readlines(cr=0) if x]
def test_collection_report(self, testdir):
ok = testdir.makepyfile(test_collection_ok="")
fail = testdir.makepyfile(test_collection_fail="XXX")
lines = self.getresultlog(testdir, ok)
assert not lines
lines = self.getresultlog(testdir, fail)
assert lines
assert lines[0].startswith("F ")
assert lines[0].endswith("test_collection_fail.py"), lines[0]
for x in lines[1:]:
assert x.startswith(" ")
assert "XXX" in "".join(lines[1:])
def test_log_test_outcomes(self, testdir):
mod = testdir.makepyfile(test_mod="""
import pytest
def test_pass(): pass
def test_skip(): pytest.skip("hello")
def test_fail(): raise ValueError("FAIL")
@pytest.mark.xfail
def test_xfail(): raise ValueError("XFAIL")
@pytest.mark.xfail
def test_xpass(): pass
""")
lines = self.getresultlog(testdir, mod)
assert len(lines) >= 3
assert lines[0].startswith(". ")
assert lines[0].endswith("test_pass")
assert lines[1].startswith("s "), lines[1]
assert lines[1].endswith("test_skip")
assert lines[2].find("hello") != -1
assert lines[3].startswith("F ")
assert lines[3].endswith("test_fail")
tb = "".join(lines[4:8])
assert tb.find('raise ValueError("FAIL")') != -1
assert lines[8].startswith('x ')
tb = "".join(lines[8:14])
assert tb.find('raise ValueError("XFAIL")') != -1
assert lines[14].startswith('X ')
assert len(lines) == 15
@pytest.mark.parametrize("style", ("native", "long", "short"))
def test_internal_exception(self, style):
# they are produced for example by a teardown failing
# at the end of the run or a failing hook invocation
try:
raise ValueError
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
reslog = ResultLog(None, py.io.TextIO())
reslog.pytest_internalerror(excinfo.getrepr(style=style))
entry = reslog.logfile.getvalue()
entry_lines = entry.splitlines()
assert entry_lines[0].startswith('! ')
if style != "native":
assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class
assert entry_lines[-1][0] == ' '
assert 'ValueError' in entry
def test_generic(testdir, LineMatcher):
testdir.plugins.append("resultlog")
testdir.makepyfile("""
import pytest
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("")
@pytest.mark.xfail
def test_xfail():
assert 0
@pytest.mark.xfail(run=False)
def test_xfail_norun():
assert 0
""")
testdir.runpytest("--resultlog=result.log")
lines = testdir.tmpdir.join("result.log").readlines(cr=0)
LineMatcher(lines).fnmatch_lines([
". *:test_pass",
"F *:test_fail",
"s *:test_skip",
"x *:test_xfail",
"x *:test_xfail_norun",
])
def test_makedir_for_resultlog(testdir, LineMatcher):
"""--resultlog should automatically create directories for the log file"""
testdir.plugins.append("resultlog")
testdir.makepyfile("""
import pytest
def test_pass():
pass
""")
testdir.runpytest("--resultlog=path/to/result.log")
lines = testdir.tmpdir.join("path/to/result.log").readlines(cr=0)
LineMatcher(lines).fnmatch_lines([
". *:test_pass",
])
def test_no_resultlog_on_slaves(testdir):
config = testdir.parseconfig("-p", "resultlog", "--resultlog=resultlog")
assert not hasattr(config, '_resultlog')
pytest_configure(config)
assert hasattr(config, '_resultlog')
pytest_unconfigure(config)
assert not hasattr(config, '_resultlog')
config.slaveinput = {}
pytest_configure(config)
assert not hasattr(config, '_resultlog')
pytest_unconfigure(config)
assert not hasattr(config, '_resultlog')
def test_failure_issue380(testdir):
testdir.makeconftest("""
import pytest
class MyCollector(pytest.File):
def collect(self):
raise ValueError()
def repr_failure(self, excinfo):
return "somestring"
def pytest_collect_file(path, parent):
return MyCollector(parent=parent, fspath=path)
""")
testdir.makepyfile("""
def test_func():
pass
""")
result = testdir.runpytest("--resultlog=log")
assert result.ret == 2
|
|
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import logging
import socket
import struct
import threading
import time
from cryptography import x509
from kmip.core import enums
from kmip.core import exceptions
from kmip.core.messages import contents
from kmip.core.messages import messages
from kmip.core import utils
from kmip.services.server import auth
class KmipSession(threading.Thread):
"""
A session thread representing a single KMIP client/server interaction.
"""
def __init__(self,
engine,
connection,
address,
name=None,
enable_tls_client_auth=True,
auth_settings=None):
"""
Create a KmipSession.
Args:
engine (KmipEngine): A reference to the central server application
that handles message processing. Required.
connection (socket): A client socket.socket TLS connection
representing a new KMIP connection. Required.
address (tuple): The address tuple produced with the session
connection. Contains the IP address and port number of the
remote connection endpoint. Required.
name (str): The name of the KmipSession. Optional, defaults to
None.
enable_tls_client_auth (bool): A flag that enables a strict check
for the client auth flag in the extended key usage extension
in client certificates when establishing the client/server TLS
connection. Optional, defaults to True.
auth_settings (list): A list of tuples, each containing (1) the
name of the 'auth:' settings block from the server config file,
and (2) a dictionary of configuration settings for a specific
authentication plugin. Optional, defaults to None.
"""
super(KmipSession, self).__init__(
group=None,
target=None,
name=name,
args=(),
kwargs={}
)
self._logger = logging.getLogger(
'kmip.server.session.{0}'.format(self.name)
)
self._engine = engine
self._connection = connection
self._address = address
self._enable_tls_client_auth = enable_tls_client_auth
self._auth_settings = [] if auth_settings is None else auth_settings
self._session_time = time.time()
self._max_buffer_size = 4096
self._max_request_size = 1048576
self._max_response_size = 1048576
def run(self):
"""
The main thread routine executed by invoking thread.start.
This method manages the new client connection, running a message
handling loop. Once this method completes, the thread is finished.
"""
self._logger.info("Starting session: {0}".format(self.name))
try:
self._connection.do_handshake()
except Exception as e:
self._logger.info("Failure running TLS handshake")
self._logger.exception(e)
else:
while True:
try:
self._handle_message_loop()
except exceptions.ConnectionClosed as e:
break
except Exception as e:
self._logger.info("Failure handling message loop")
self._logger.exception(e)
self._connection.shutdown(socket.SHUT_RDWR)
self._connection.close()
self._logger.info("Stopping session: {0}".format(self.name))
def _handle_message_loop(self):
request_data = self._receive_request()
request = messages.RequestMessage()
max_size = self._max_response_size
kmip_version = contents.protocol_version_to_kmip_version(
self._engine.default_protocol_version
)
try:
if hasattr(self._connection, 'shared_ciphers'):
shared_ciphers = self._connection.shared_ciphers()
self._logger.debug(
"Possible session ciphers: {0}".format(len(shared_ciphers))
)
for cipher in shared_ciphers:
self._logger.debug(cipher)
self._logger.debug(
"Session cipher selected: {0}".format(
self._connection.cipher()
)
)
certificate = auth.get_certificate_from_connection(
self._connection
)
if certificate is None:
raise exceptions.PermissionDenied(
"The client certificate could not be loaded from the "
"session connection."
)
if self._enable_tls_client_auth:
extension = auth.get_extended_key_usage_from_certificate(
certificate
)
if extension is None:
raise exceptions.PermissionDenied(
"The extended key usage extension is missing from "
"the client certificate."
)
if x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH not in extension:
raise exceptions.PermissionDenied(
"The extended key usage extension is not marked for "
"client authentication in the client certificate."
)
request.read(request_data, kmip_version=kmip_version)
except exceptions.PermissionDenied as e:
self._logger.warning("Failure verifying the client certificate.")
self._logger.exception(e)
response = self._engine.build_error_response(
contents.ProtocolVersion(1, 0),
enums.ResultReason.AUTHENTICATION_NOT_SUCCESSFUL,
"Error verifying the client certificate. "
"See server logs for more information."
)
except Exception as e:
self._logger.warning("Failure parsing request message.")
self._logger.exception(e)
response = self._engine.build_error_response(
contents.ProtocolVersion(1, 0),
enums.ResultReason.INVALID_MESSAGE,
"Error parsing request message. See server logs for more "
"information."
)
else:
try:
client_identity = self.authenticate(certificate, request)
self._logger.info(
"Session client identity: {}".format(client_identity[0])
)
except Exception:
self._logger.warning("Authentication failed.")
response = self._engine.build_error_response(
request.request_header.protocol_version,
enums.ResultReason.AUTHENTICATION_NOT_SUCCESSFUL,
"An error occurred during client authentication. "
"See server logs for more information."
)
else:
try:
results = self._engine.process_request(
request,
client_identity
)
response, max_response_size, protocol_version = results
kmip_version = contents.protocol_version_to_kmip_version(
protocol_version
)
if max_response_size:
max_size = max_response_size
except exceptions.KmipError as e:
response = self._engine.build_error_response(
request.request_header.protocol_version,
e.reason,
str(e)
)
except Exception as e:
self._logger.warning(
"An unexpected error occurred while processing "
"request."
)
self._logger.exception(e)
response = self._engine.build_error_response(
request.request_header.protocol_version,
enums.ResultReason.GENERAL_FAILURE,
"An unexpected error occurred while processing "
"request. See server logs for more information."
)
response_data = utils.BytearrayStream()
response.write(response_data, kmip_version=kmip_version)
if len(response_data) > max_size:
self._logger.warning(
"Response message length too large: "
"{0} bytes, max {1} bytes".format(
len(response_data),
self._max_response_size
)
)
response = self._engine.build_error_response(
request.request_header.protocol_version,
enums.ResultReason.RESPONSE_TOO_LARGE,
"Response message length too large. See server logs for "
"more information."
)
response_data = utils.BytearrayStream()
response.write(response_data, kmip_version=kmip_version)
self._send_response(response_data.buffer)
def authenticate(self, certificate, request):
credentials = []
if request.request_header.authentication is not None:
credentials = request.request_header.authentication.credentials
plugin_enabled = False
for auth_settings in self._auth_settings:
plugin_name, plugin_config = auth_settings
if plugin_name.startswith("auth:slugs"):
if plugin_config.get("enabled") == "True":
plugin_enabled = True
plugin = auth.SLUGSConnector(plugin_config.get("url"))
self._logger.debug(
"Authenticating with plugin: {}".format(plugin_name)
)
try:
client_identity = plugin.authenticate(
certificate,
(self._address, self._session_time),
credentials
)
except Exception as e:
self._logger.warning(
"Authentication failed."
)
self._logger.error(e)
self._logger.exception(e)
else:
self._logger.debug(
"Authentication succeeded for client identity: "
"{}".format(client_identity[0])
)
return client_identity
else:
self._logger.warning(
"Authentication plugin '{}' is not "
"supported.".format(plugin_name)
)
if not plugin_enabled:
self._logger.debug(
"No authentication plugins are enabled. The client identity "
"will be extracted from the client certificate."
)
try:
client_identity = auth.get_client_identity_from_certificate(
certificate
)
except Exception as e:
self._logger.warning("Client identity extraction failed.")
self._logger.exception(e)
else:
self._logger.debug(
"Extraction succeeded for client identity: {}".format(
client_identity
)
)
return tuple([client_identity, None])
raise exceptions.PermissionDenied("Authentication failed.")
def _receive_request(self):
header = self._receive_bytes(8)
message_size = struct.unpack('!I', header[4:])[0]
payload = self._receive_bytes(message_size)
data = utils.BytearrayStream(header + payload)
return data
def _receive_bytes(self, message_size):
bytes_received = 0
message = b''
while bytes_received < message_size:
partial_message = self._connection.recv(
min(message_size - bytes_received, self._max_buffer_size)
)
if partial_message is None:
break
elif len(partial_message) == 0:
raise exceptions.ConnectionClosed()
else:
bytes_received += len(partial_message)
message += partial_message
if bytes_received != message_size:
raise ValueError(
"Invalid KMIP message received. Actual message length "
"does not match the advertised header length."
)
else:
self._logger.debug(
"Request encoding: {}".format(binascii.hexlify(message))
)
return message
def _send_response(self, data):
if len(data) > 0:
self._logger.debug(
"Response encoding: {}".format(binascii.hexlify(bytes(data)))
)
self._connection.sendall(bytes(data))
|
|
"""Switch platform for UniFi integration.
Support for controlling power supply of clients which are powered over Ethernet (POE).
Support for controlling network access of clients selected in option flow.
Support for controlling deep packet inspection (DPI) restriction groups.
"""
import logging
from typing import Any
from aiounifi.api import SOURCE_EVENT
from aiounifi.events import (
WIRED_CLIENT_BLOCKED,
WIRED_CLIENT_UNBLOCKED,
WIRELESS_CLIENT_BLOCKED,
WIRELESS_CLIENT_UNBLOCKED,
)
from homeassistant.components.switch import DOMAIN, SwitchEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_registry import async_entries_for_config_entry
from homeassistant.helpers.restore_state import RestoreEntity
from .const import ATTR_MANUFACTURER, DOMAIN as UNIFI_DOMAIN
from .unifi_client import UniFiClient
from .unifi_entity_base import UniFiBase
_LOGGER = logging.getLogger(__name__)
BLOCK_SWITCH = "block"
DPI_SWITCH = "dpi"
POE_SWITCH = "poe"
CLIENT_BLOCKED = (WIRED_CLIENT_BLOCKED, WIRELESS_CLIENT_BLOCKED)
CLIENT_UNBLOCKED = (WIRED_CLIENT_UNBLOCKED, WIRELESS_CLIENT_UNBLOCKED)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches for UniFi component.
Switches are controlling network access and switch ports with POE.
"""
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
controller.entities[DOMAIN] = {
BLOCK_SWITCH: set(),
POE_SWITCH: set(),
DPI_SWITCH: set(),
}
if controller.site_role != "admin":
return
# Store previously known POE control entities in case their POE are turned off.
known_poe_clients = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
for entry in async_entries_for_config_entry(entity_registry, config_entry.entry_id):
if not entry.unique_id.startswith(POE_SWITCH):
continue
mac = entry.unique_id.replace(f"{POE_SWITCH}-", "")
if mac not in controller.api.clients:
continue
known_poe_clients.append(mac)
for mac in controller.option_block_clients:
if mac not in controller.api.clients and mac in controller.api.clients_all:
client = controller.api.clients_all[mac]
controller.api.clients.process_raw([client.raw])
@callback
def items_added(
clients: set = controller.api.clients,
devices: set = controller.api.devices,
dpi_groups: set = controller.api.dpi_groups,
) -> None:
"""Update the values of the controller."""
if controller.option_block_clients:
add_block_entities(controller, async_add_entities, clients)
if controller.option_poe_clients:
add_poe_entities(controller, async_add_entities, clients, known_poe_clients)
if controller.option_dpi_restrictions:
add_dpi_entities(controller, async_add_entities, dpi_groups)
for signal in (controller.signal_update, controller.signal_options_update):
config_entry.async_on_unload(
async_dispatcher_connect(hass, signal, items_added)
)
items_added()
known_poe_clients.clear()
@callback
def add_block_entities(controller, async_add_entities, clients):
"""Add new switch entities from the controller."""
switches = []
for mac in controller.option_block_clients:
if mac in controller.entities[DOMAIN][BLOCK_SWITCH] or mac not in clients:
continue
client = controller.api.clients[mac]
switches.append(UniFiBlockClientSwitch(client, controller))
if switches:
async_add_entities(switches)
@callback
def add_poe_entities(controller, async_add_entities, clients, known_poe_clients):
"""Add new switch entities from the controller."""
switches = []
devices = controller.api.devices
for mac in clients:
if mac in controller.entities[DOMAIN][POE_SWITCH]:
continue
client = controller.api.clients[mac]
# Try to identify new clients powered by POE.
# Known POE clients have been created in previous HASS sessions.
# If port_poe is None the port does not support POE
# If poe_enable is False we can't know if a POE client is available for control.
if mac not in known_poe_clients and (
mac in controller.wireless_clients
or client.sw_mac not in devices
or not devices[client.sw_mac].ports[client.sw_port].port_poe
or not devices[client.sw_mac].ports[client.sw_port].poe_enable
or controller.mac == client.mac
):
continue
# Multiple POE-devices on same port means non UniFi POE driven switch
multi_clients_on_port = False
for client2 in controller.api.clients.values():
if mac in known_poe_clients:
break
if (
client2.is_wired
and client.mac != client2.mac
and client.sw_mac == client2.sw_mac
and client.sw_port == client2.sw_port
):
multi_clients_on_port = True
break
if multi_clients_on_port:
continue
switches.append(UniFiPOEClientSwitch(client, controller))
if switches:
async_add_entities(switches)
@callback
def add_dpi_entities(controller, async_add_entities, dpi_groups):
"""Add new switch entities from the controller."""
switches = []
for group in dpi_groups:
if (
group in controller.entities[DOMAIN][DPI_SWITCH]
or not dpi_groups[group].dpiapp_ids
):
continue
switches.append(UniFiDPIRestrictionSwitch(dpi_groups[group], controller))
if switches:
async_add_entities(switches)
class UniFiPOEClientSwitch(UniFiClient, SwitchEntity, RestoreEntity):
"""Representation of a client that uses POE."""
DOMAIN = DOMAIN
TYPE = POE_SWITCH
def __init__(self, client, controller):
"""Set up POE switch."""
super().__init__(client, controller)
self.poe_mode = None
if client.sw_port and self.port.poe_mode != "off":
self.poe_mode = self.port.poe_mode
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
await super().async_added_to_hass()
if self.poe_mode: # POE is enabled and client in a known state
return
if (state := await self.async_get_last_state()) is None:
return
self.poe_mode = state.attributes.get("poe_mode")
if not self.client.sw_mac:
self.client.raw["sw_mac"] = state.attributes.get("switch")
if not self.client.sw_port:
self.client.raw["sw_port"] = state.attributes.get("port")
@property
def is_on(self):
"""Return true if POE is active."""
return self.port.poe_mode != "off"
@property
def available(self):
"""Return if switch is available.
Poe_mode None means its POE state is unknown.
Sw_mac unavailable means restored client.
"""
return (
self.poe_mode is not None
and self.controller.available
and self.client.sw_port
and self.client.sw_mac
and self.client.sw_mac in self.controller.api.devices
)
async def async_turn_on(self, **kwargs):
"""Enable POE for client."""
await self.device.async_set_port_poe_mode(self.client.sw_port, self.poe_mode)
async def async_turn_off(self, **kwargs):
"""Disable POE for client."""
await self.device.async_set_port_poe_mode(self.client.sw_port, "off")
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attributes = {
"power": self.port.poe_power,
"switch": self.client.sw_mac,
"port": self.client.sw_port,
"poe_mode": self.poe_mode,
}
return attributes
@property
def device(self):
"""Shortcut to the switch that client is connected to."""
return self.controller.api.devices[self.client.sw_mac]
@property
def port(self):
"""Shortcut to the switch port that client is connected to."""
return self.device.ports[self.client.sw_port]
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if not self.controller.option_poe_clients:
await self.remove_item({self.client.mac})
class UniFiBlockClientSwitch(UniFiClient, SwitchEntity):
"""Representation of a blockable client."""
DOMAIN = DOMAIN
TYPE = BLOCK_SWITCH
def __init__(self, client, controller):
"""Set up block switch."""
super().__init__(client, controller)
self._is_blocked = client.blocked
@callback
def async_update_callback(self) -> None:
"""Update the clients state."""
if (
self.client.last_updated == SOURCE_EVENT
and self.client.event.event in CLIENT_BLOCKED + CLIENT_UNBLOCKED
):
self._is_blocked = self.client.event.event in CLIENT_BLOCKED
super().async_update_callback()
@property
def is_on(self):
"""Return true if client is allowed to connect."""
return not self._is_blocked
async def async_turn_on(self, **kwargs):
"""Turn on connectivity for client."""
await self.controller.api.clients.async_unblock(self.client.mac)
async def async_turn_off(self, **kwargs):
"""Turn off connectivity for client."""
await self.controller.api.clients.async_block(self.client.mac)
@property
def icon(self):
"""Return the icon to use in the frontend."""
if self._is_blocked:
return "mdi:network-off"
return "mdi:network"
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if self.client.mac not in self.controller.option_block_clients:
await self.remove_item({self.client.mac})
class UniFiDPIRestrictionSwitch(UniFiBase, SwitchEntity):
"""Representation of a DPI restriction group."""
DOMAIN = DOMAIN
TYPE = DPI_SWITCH
@property
def key(self) -> Any:
"""Return item key."""
return self._item.id
@property
def unique_id(self):
"""Return a unique identifier for this switch."""
return self._item.id
@property
def name(self) -> str:
"""Return the name of the client."""
return self._item.name
@property
def icon(self):
"""Return the icon to use in the frontend."""
if self._item.enabled:
return "mdi:network"
return "mdi:network-off"
@property
def is_on(self):
"""Return true if client is allowed to connect."""
return self._item.enabled
async def async_turn_on(self, **kwargs):
"""Turn on connectivity for client."""
await self.controller.api.dpi_groups.async_enable(self._item)
async def async_turn_off(self, **kwargs):
"""Turn off connectivity for client."""
await self.controller.api.dpi_groups.async_disable(self._item)
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if not self.controller.option_dpi_restrictions:
await self.remove_item({self.key})
@property
def device_info(self) -> DeviceInfo:
"""Return a service description for device registry."""
return {
"identifiers": {(DOMAIN, f"unifi_controller_{self._item.site_id}")},
"name": "UniFi Controller",
"manufacturer": ATTR_MANUFACTURER,
"model": "UniFi Controller",
"entry_type": "service",
}
|
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2015 Peter Williams <[email protected]> and collaborators
# Licensed under the MIT License.
"""pwkit.slurp - run a program and capture its output."""
from __future__ import absolute_import, division, print_function, unicode_literals
__all__ = str ('Event Redirection Slurper').split ()
import fcntl, os, signal, six, subprocess, sys
from select import select, error as selecterror
from . import Holder
try:
from subprocss import DEVNULL as _DEVNULL
except ImportError:
_DEVNULL = subprocess.STDOUT - 1
@Holder
class Event (object):
Stdout = 'stdout'
Stderr = 'stderr'
ForwardedSignal = 'forwarded-signal'
Timeout = 'timeout'
@Holder
class Redirection (object):
Pipe = subprocess.PIPE
Stdout = subprocess.STDOUT
DevNull = _DEVNULL
signals_for_child = [
signal.SIGHUP,
signal.SIGINT,
signal.SIGQUIT,
signal.SIGTERM,
signal.SIGUSR1,
signal.SIGUSR2,
]
class SlurperIterator (object):
def __init__ (self, parent):
self.parent = parent
def __iter__ (self):
return self
def __next__ (self): # Python 3
if not len (self.parent._files):
raise StopIteration ()
return self.parent._next_lowlevel ()
next = __next__ # Python 2
def _decode_streams (event_source, which_events, encoding):
from codecs import getincrementaldecoder
decoders = {}
for etype, edata in event_source:
if etype not in which_events:
yield etype, edata
continue
dec = decoders.get (etype)
if dec is None:
dec = decoders[etype] = getincrementaldecoder (encoding) ()
final = not len (edata)
result = dec.decode (edata, final)
if len (result):
yield etype, result # no false EOF indicators
if final:
yield etype, edata # make sure we have an EOF signal
def _linebreak_streams (event_source, which_events):
partials = {}
for etype, edata in event_source:
if etype not in which_events:
yield etype, edata
continue
if not len (edata):
# EOF on this stream.
trailer = partials.get (etype, edata)
if len (trailer):
yield etype, trailer
yield etype, edata
continue
lines = (partials.get (etype, edata * 0) + edata).split (edata.__class__ (b'\n'))
for line in lines[:-1]:
yield etype, line
partials[etype] = lines[-1]
class Slurper (object):
_chunksize = 1024
def __init__ (self, argv=None, env=None, cwd=None, propagate_signals=True,
timeout=10, linebreak=False, encoding=None,
stdin=Redirection.DevNull, stdout=Redirection.Pipe,
stderr=Redirection.Pipe, executable=None, subproc_factory=None):
if subproc_factory is None:
subproc_factory = subprocess.Popen
self.subproc_factory = subproc_factory
self.proc = None
self.argv = argv
self.env = env
self.cwd = cwd
self.propagate_signals = propagate_signals
self.timeout = timeout
self.linebreak = linebreak
self.encoding = encoding
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.executable = executable
def __enter__ (self):
self._prev_handlers = {}
self._other_events = []
self._file_event_types = {}
self._files = []
stdin = self.stdin
if stdin == Redirection.DevNull:
stdin = open (os.devnull, 'r')
stdout = self.stdout
if stdout == Redirection.DevNull:
stdout = open (os.devnull, 'w')
stderr = self.stderr
if stderr == Redirection.DevNull:
stderr = open (os.devnull, 'w')
self.proc = self.subproc_factory (
self.argv,
env=self.env,
executable=self.executable,
cwd=self.cwd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=False,
)
if self.propagate_signals:
def handle (signum, frame):
self.proc.send_signal (signum)
self._other_events.insert (0, (Event.ForwardedSignal, signum))
for signum in signals_for_child:
self._prev_handlers[signum] = signal.signal (signum, handle)
if stdout == Redirection.Pipe:
self._file_event_types[self.proc.stdout.fileno ()] = Event.Stdout
self._files.append (self.proc.stdout)
if stderr == Redirection.Pipe:
self._file_event_types[self.proc.stderr.fileno ()] = Event.Stderr
self._files.append (self.proc.stderr)
for fd in self._files:
fl = fcntl.fcntl (fd.fileno (), fcntl.F_GETFL)
fcntl.fcntl (fd.fileno (), fcntl.F_SETFL, fl | os.O_NONBLOCK)
return self
def _next_lowlevel (self):
if len (self._other_events):
return self._other_events.pop ()
while True:
try:
rd, wr, er = select (self._files, [], [], self.timeout)
break
except selecterror as e:
# if EINTR or EAGAIN, try again; we won't get EINTR unless
# we're forwarding signals, since otherwise it'll show up as a
# KeyboardInterrupt. "e.args[0]" is the only way to get errno.
if e.args[0] not in (4, 11):
raise
for fd in rd:
chunk = fd.read (self._chunksize)
if not len (chunk):
self._files.remove (fd)
return (self._file_event_types[fd.fileno ()], chunk)
return (Event.Timeout, None)
def __iter__ (self):
result = SlurperIterator (self)
if self.encoding is not None:
which = frozenset ((Event.Stdout, Event.Stderr))
result = _decode_streams (result, which, self.encoding)
if self.linebreak:
which = frozenset ((Event.Stdout, Event.Stderr))
result = _linebreak_streams (result, which)
return result
def __exit__ (self, etype, evalue, etb):
self.proc.wait ()
for signum, prev_handler in six.iteritems (self._prev_handlers):
signal.signal (signum, prev_handler)
return False
|
|
# -*- encoding: utf-8 -*-
import logging
import tempfile
import sh
import os
import shutil
import time
import hashlib
import json
from StringIO import StringIO
from gzip import GzipFile
from beefish import encrypt, decrypt
from boto.s3.key import Key
import bakthat
from bakthat.conf import DEFAULT_DESTINATION
from bakthat.backends import S3Backend
from bakthat.models import Backups
log = logging.getLogger(__name__)
class KeyValue(S3Backend):
"""A Key Value store to store/retrieve object/string on S3.
Data is gzipped and json encoded before uploading,
compression can be disabled.
"""
def __init__(self, conf={}, profile="default"):
S3Backend.__init__(self, conf, profile)
self.profile = profile
def set_key(self, keyname, value, **kwargs):
"""Store a string as keyname in S3.
:type keyname: str
:param keyname: Key name
:type value: str
:param value: Value to save, will be json encoded.
:type value: bool
:keyword compress: Compress content with gzip,
True by default
"""
k = Key(self.bucket)
k.key = keyname
backup_date = int(time.time())
backup = dict(filename=keyname,
stored_filename=keyname,
backup_date=backup_date,
last_updated=backup_date,
backend="s3",
is_deleted=False,
tags="",
metadata={"KeyValue": True,
"is_enc": False,
"is_gzipped": False})
fileobj = StringIO(json.dumps(value))
if kwargs.get("compress", True):
backup["metadata"]["is_gzipped"] = True
out = StringIO()
f = GzipFile(fileobj=out, mode="w")
f.write(fileobj.getvalue())
f.close()
fileobj = StringIO(out.getvalue())
password = kwargs.get("password")
if password:
backup["metadata"]["is_enc"] = True
out = StringIO()
encrypt(fileobj, out, password)
fileobj = out
# Creating the object on S3
k.set_contents_from_string(fileobj.getvalue())
k.set_acl("private")
backup["size"] = k.size
access_key = self.conf.get("access_key")
container_key = self.conf.get(self.container_key)
backup["backend_hash"] = hashlib.sha512(access_key + container_key).hexdigest()
Backups.upsert(**backup)
def get_key(self, keyname, **kwargs):
"""Return the object stored under keyname.
:type keyname: str
:param keyname: Key name
:type default: str
:keyword default: Default value if key name does not exist, None by default
:rtype: str
:return: The key content as string, or default value.
"""
k = Key(self.bucket)
k.key = keyname
if k.exists():
backup = Backups.get(Backups.stored_filename % keyname, Backups.backend == "s3")
fileobj = StringIO(k.get_contents_as_string())
if backup.is_encrypted():
out = StringIO()
decrypt(fileobj, out, kwargs.get("password"))
fileobj = out
fileobj.seek(0)
if backup.is_gzipped():
f = GzipFile(fileobj=fileobj, mode="r")
out = f.read()
f.close()
fileobj = StringIO(out)
return json.loads(fileobj.getvalue())
return kwargs.get("default")
def delete_key(self, keyname):
"""Delete the given key.
:type keyname: str
:param keyname: Key name
"""
k = Key(self.bucket)
k.key = keyname
if k.exists():
k.delete()
backup = Backups.match_filename(keyname, "s3", profile=self.profile)
if backup:
backup.set_deleted()
return True
def get_key_url(self, keyname, expires_in, method="GET"):
"""Generate a URL for the keyname object.
Be careful, the response is JSON encoded.
:type keyname: str
:param keyname: Key name
:type expires_in: int
:param expires_in: Number of the second before the expiration of the link
:type method: str
:param method: HTTP method for access
:rtype str:
:return: The URL to download the content of the given keyname
"""
k = Key(self.bucket)
k.key = keyname
if k.exists:
return k.generate_url(expires_in, method)
class BakHelper:
"""Helper that makes building scripts with bakthat better faster stronger.
Designed to be used as a context manager.
:type backup_name: str
:param backup_name: Backup name
also the prefix for the created temporary directory.
:type destination: str
:keyword destination: Destination (glacier|s3)
:type password: str
:keyword password: Password (Empty string to disable encryption, disabled by default)
:type profile: str
:keyword profile: Profile name, only valid if no custom conf is provided
:type conf: dict
:keyword conf: Override profiles configuration
:type tags: list
:param tags: List of tags
"""
def __init__(self, backup_name, **kwargs):
self.backup_name = backup_name
self.dir_prefix = "{0}_".format(backup_name)
self.destination = kwargs.get("destination", DEFAULT_DESTINATION)
self.password = kwargs.get("password", "")
self.profile = kwargs.get("profile", "default")
self.conf = kwargs.get("conf", {})
self.tags = kwargs.get("tags", [])
# Key for bakmanager.io hook
self.key = kwargs.get("key", None)
self.syncer = None
def __enter__(self):
"""Save the old current working directory,
create a temporary directory,
and make it the new current working directory.
"""
self.old_cwd = os.getcwd()
self.tmpd = tempfile.mkdtemp(prefix=self.dir_prefix)
sh.cd(self.tmpd)
log.info("New current working directory: {0}.".format(self.tmpd))
return self
def __exit__(self, type, value, traceback):
"""Reseting the current working directory,
and run synchronization if enabled.
"""
sh.cd(self.old_cwd)
log.info("Back to {0}".format(self.old_cwd))
shutil.rmtree(self.tmpd)
if self.syncer:
log.debug("auto sync")
self.sync()
def sync(self):
"""Shortcut for calling BakSyncer."""
if self.syncer:
try:
return self.syncer.sync()
except Exception, exc:
log.exception(exc)
def enable_sync(self, api_url, auth=None):
"""Enable synchronization with :class:`bakthat.sync.BakSyncer` (optional).
:type api_url: str
:param api_url: Base API URL.
:type auth: tuple
:param auth: Optional, tuple/list (username, password) for API authentication.
"""
log.debug("Enabling BakSyncer to {0}".format(api_url))
from bakthat.sync import BakSyncer
self.syncer = BakSyncer(api_url, auth)
def backup(self, filename=None, **kwargs):
"""Perform backup.
:type filename: str
:param filename: File/directory to backup.
:type password: str
:keyword password: Override already set password.
:type destination: str
:keyword destination: Override already set destination.
:type tags: list
:keyword tags: Tags list
:type profile: str
:keyword profile: Profile name
:type conf: dict
:keyword conf: Override profiles configuration
:rtype: dict
:return: A dict containing the following keys: stored_filename, size, metadata and filename.
"""
if filename is None:
filename = self.tmpd
return bakthat.backup(filename,
destination=kwargs.get("destination", self.destination),
password=kwargs.get("password", self.password),
tags=kwargs.get("tags", self.tags),
profile=kwargs.get("profile", self.profile),
conf=kwargs.get("conf", self.conf),
key=kwargs.get("key", self.key),
custom_filename=self.backup_name)
def restore(self, filename, **kwargs):
"""Restore backup in the current working directory.
:type filename: str
:param filename: File/directory to backup.
:type password: str
:keyword password: Override already set password.
:type destination: str
:keyword destination: Override already set destination.
:type profile: str
:keyword profile: Profile name
:type conf: dict
:keyword conf: Override profiles configuration
:rtype: bool
:return: True if successful.
"""
return bakthat.restore(filename,
destination=kwargs.get("destination", self.destination),
password=kwargs.get("password", self.password),
profile=kwargs.get("profile", self.profile),
conf=kwargs.get("conf", self.conf))
def delete_older_than(self, filename=None, interval=None, **kwargs):
"""Delete backups older than the given interval string.
:type filename: str
:param filename: File/directory name.
:type interval: str
:param interval: Interval string like 1M, 1W, 1M3W4h2s...
(s => seconds, m => minutes, h => hours, D => days, W => weeks, M => months, Y => Years).
:type destination: str
:keyword destination: Override already set destination.
:type profile: str
:keyword profile: Profile name
:type conf: dict
:keyword conf: Override profiles configuration
:rtype: list
:return: A list containing the deleted keys (S3) or archives (Glacier).
"""
if filename is None:
filename = self.tmpd
return bakthat.delete_older_than(filename, interval,
destination=kwargs.get("destination", self.destination),
profile=kwargs.get("profile", self.profile),
conf=kwargs.get("conf", self.conf))
def rotate(self, filename=None, **kwargs):
"""Rotate backup using grandfather-father-son rotation scheme.
:type filename: str
:param filename: File/directory name.
:type destination: str
:keyword destination: Override already set destination.
:type profile: str
:keyword profile: Profile name
:type conf: dict
:keyword conf: Override profiles configuration
:rtype: list
:return: A list containing the deleted keys (S3) or archives (Glacier).
"""
if filename is None:
filename = self.backup_name
return bakthat.rotate_backups(filename,
destination=kwargs.pop("destination", self.destination),
profile=kwargs.get("profile", self.profile),
conf=kwargs.get("conf", self.conf))
|
|
import numpy as np
from mpi4py import MPI
from pmesh.pm import ParticleMesh
from pySDC.core.Errors import ParameterError, ProblemError
from pySDC.core.Problem import ptype
from pySDC.playgrounds.pmesh.PMESH_datatype import pmesh_datatype, rhs_imex_pmesh
class allencahn_imex(ptype):
"""
Example implementing Allen-Cahn equation in 2-3D using PMESH for solving linear parts, IMEX time-stepping
PMESH: https://github.com/rainwoodman/pmesh
Attributes:
xvalues: grid points in space
dx: mesh width
"""
def __init__(self, problem_params, dtype_u=pmesh_datatype, dtype_f=rhs_imex_pmesh):
"""
Initialization routine
Args:
problem_params (dict): custom parameters for the example
dtype_u: pmesh data type (will be passed to parent class)
dtype_f: pmesh data type wuth implicit and explicit parts (will be passed to parent class)
"""
if 'L' not in problem_params:
problem_params['L'] = 1.0
if 'init_type' not in problem_params:
problem_params['init_type'] = 'circle'
if 'comm' not in problem_params:
problem_params['comm'] = None
if 'dw' not in problem_params:
problem_params['dw'] = 0.0
# these parameters will be used later, so assert their existence
essential_keys = ['nvars', 'eps', 'L', 'radius', 'dw']
for key in essential_keys:
if key not in problem_params:
msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))
raise ParameterError(msg)
if not (isinstance(problem_params['nvars'], tuple) and len(problem_params['nvars']) > 1):
raise ProblemError('Need at least two dimensions')
# Creating ParticleMesh structure
self.pm = ParticleMesh(BoxSize=problem_params['L'], Nmesh=list(problem_params['nvars']), dtype='f8',
plan_method='measure', comm=problem_params['comm'])
# create test RealField to get the local dimensions (there's probably a better way to do that)
tmp = self.pm.create(type='real')
# invoke super init, passing the communicator and the local dimensions as init
super(allencahn_imex, self).__init__(init=(self.pm.comm, tmp.value.shape), dtype_u=dtype_u, dtype_f=dtype_f,
params=problem_params)
# Need this for diagnostics
self.dx = self.params.L / problem_params['nvars'][0]
self.dy = self.params.L / problem_params['nvars'][1]
self.xvalues = [i * self.dx - problem_params['L'] / 2 for i in range(problem_params['nvars'][0])]
self.yvalues = [i * self.dy - problem_params['L'] / 2 for i in range(problem_params['nvars'][1])]
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
def Laplacian(k, v):
k2 = sum(ki ** 2 for ki in k)
return -k2 * v
f = self.dtype_f(self.init)
tmp_u = self.pm.create(type='real', value=u.values)
f.impl.values = tmp_u.r2c().apply(Laplacian, out=Ellipsis).c2r(out=Ellipsis).value
if self.params.eps > 0:
f.expl.values = - 2.0 / self.params.eps ** 2 * u.values * (1.0 - u.values) * (1.0 - 2.0 * u.values) - \
6.0 * self.params.dw * u.values * (1.0 - u.values)
return f
def solve_system(self, rhs, factor, u0, t):
"""
Simple FFT solver for the diffusion part
Args:
rhs (dtype_f): right-hand side for the linear system
factor (float) : abbrev. for the node-to-node stepsize (or any other factor required)
u0 (dtype_u): initial guess for the iterative solver (not used here so far)
t (float): current time (e.g. for time-dependent BCs)
Returns:
dtype_u: solution as mesh
"""
def linear_solve(k, v):
k2 = sum(ki ** 2 for ki in k)
return 1.0 / (1.0 + factor * k2) * v
me = self.dtype_u(self.init)
tmp_rhs = self.pm.create(type='real', value=rhs.values)
me.values = tmp_rhs.r2c().apply(linear_solve, out=Ellipsis).c2r(out=Ellipsis).value
return me
def u_exact(self, t):
"""
Routine to compute the exact solution at time t
Args:
t (float): current time
Returns:
dtype_u: exact solution
"""
def circle(i, v):
r = [ii * (Li / ni) - 0.5 * Li for ii, ni, Li in zip(i, v.Nmesh, v.BoxSize)]
r2 = sum(ri ** 2 for ri in r)
return 0.5 * (1.0 + np.tanh((self.params.radius - np.sqrt(r2)) / (np.sqrt(2) * self.params.eps)))
def circle_rand(i, v):
L = [int(l) for l in v.BoxSize]
r = [ii * (Li / ni) - 0.5 * Li for ii, ni, Li in zip(i, v.Nmesh, L)]
rshift = r.copy()
ndim = len(r)
data = 0
# get random radii for circles/spheres
np.random.seed(1)
lbound = 3.0 * self.params.eps
ubound = 0.5 - self.params.eps
rand_radii = (ubound - lbound) * np.random.random_sample(size=tuple(L)) + lbound
# distribnute circles/spheres
if ndim == 2:
for indexi, i in enumerate(range(-L[0] + 1, L[0], 2)):
for indexj, j in enumerate(range(-L[1] + 1, L[1], 2)):
# shift x and y coordinate depending on which box we are in
rshift[0] = r[0] + i/2
rshift[1] = r[1] + j/2
# build radius
r2 = sum(ri ** 2 for ri in rshift)
# add this blob, shifted by 1 to avoid issues with adding up negative contributions
data += np.tanh((rand_radii[indexi, indexj] - np.sqrt(r2)) / (np.sqrt(2) * self.params.eps)) + 1
# get rid of the 1
data *= 0.5
assert np.all(data <= 1.0)
return data
assert t == 0, 'ERROR: u_exact only valid for t=0'
me = self.dtype_u(self.init)
if self.params.init_type == 'circle':
tmp_u = self.pm.create(type='real', value=0.0)
me.values = tmp_u.apply(circle, kind='index').value
elif self.params.init_type == 'circle_rand':
tmp_u = self.pm.create(type='real', value=0.0)
me.values = tmp_u.apply(circle_rand, kind='index').value
else:
raise NotImplementedError('type of initial value not implemented, got %s' % self.params.init_type)
return me
class allencahn_imex_timeforcing(allencahn_imex):
"""
Example implementing Allen-Cahn equation in 2-3D using PMESH for solving linear parts, IMEX time-stepping,
time-dependent forcing
"""
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
def Laplacian(k, v):
k2 = sum(ki ** 2 for ki in k)
return -k2 * v
f = self.dtype_f(self.init)
tmp_u = self.pm.create(type='real', value=u.values)
f.impl.values = tmp_u.r2c().apply(Laplacian, out=Ellipsis).c2r(out=Ellipsis).value
if self.params.eps > 0:
f.expl.values = - 2.0 / self.params.eps ** 2 * u.values * (1.0 - u.values) * (1.0 - 2.0 * u.values)
# build sum over RHS without driving force
Rt_local = f.impl.values.sum() + f.expl.values.sum()
if self.pm.comm is not None:
Rt_global = self.pm.comm.allreduce(sendobj=Rt_local, op=MPI.SUM)
else:
Rt_global = Rt_local
# build sum over driving force term
Ht_local = np.sum(6.0 * u.values * (1.0 - u.values))
if self.pm.comm is not None:
Ht_global = self.pm.comm.allreduce(sendobj=Ht_local, op=MPI.SUM)
else:
Ht_global = Rt_local
# add/substract time-dependent driving force
dw = Rt_global / Ht_global
f.expl.values -= 6.0 * dw * u.values * (1.0 - u.values)
return f
class allencahn_imex_stab(allencahn_imex):
"""
Example implementing Allen-Cahn equation in 2-3D using PMESH for solving linear parts, IMEX time-stepping with
stabilized splitting
"""
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
def Laplacian(k, v):
k2 = sum(ki ** 2 for ki in k) + 1.0 / self.params.eps ** 2
return -k2 * v
f = self.dtype_f(self.init)
tmp_u = self.pm.create(type='real', value=u.values)
f.impl.values = tmp_u.r2c().apply(Laplacian, out=Ellipsis).c2r(out=Ellipsis).value
if self.params.eps > 0:
f.expl.values = - 2.0 / self.params.eps ** 2 * u.values * (1.0 - u.values) * (1.0 - 2.0 * u.values) - \
6.0 * self.params.dw * u.values * (1.0 - u.values) + \
1.0 / self.params.eps ** 2 * u.values
return f
def solve_system(self, rhs, factor, u0, t):
"""
Simple FFT solver for the diffusion part
Args:
rhs (dtype_f): right-hand side for the linear system
factor (float) : abbrev. for the node-to-node stepsize (or any other factor required)
u0 (dtype_u): initial guess for the iterative solver (not used here so far)
t (float): current time (e.g. for time-dependent BCs)
Returns:
dtype_u: solution as mesh
"""
def linear_solve(k, v):
k2 = sum(ki ** 2 for ki in k) + 1.0 / self.params.eps ** 2
return 1.0 / (1.0 + factor * k2) * v
me = self.dtype_u(self.init)
tmp_rhs = self.pm.create(type='real', value=rhs.values)
me.values = tmp_rhs.r2c().apply(linear_solve, out=Ellipsis).c2r(out=Ellipsis).value
return me
|
|
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from rest_framework import serializers
from django.core.exceptions import FieldError
from django.core.validators import RegexValidator
from pdc.apps.common.fields import ChoiceSlugField
from pdc.apps.common import models as common_models
from pdc.apps.common.serializers import StrictSerializerMixin
from .models import Product, ProductVersion, Release, BaseProduct, ReleaseType, Variant, VariantArch, VariantType
from . import signals
class ProductSerializer(StrictSerializerMixin, serializers.ModelSerializer):
product_versions = serializers.SlugRelatedField(
many=True,
read_only=True,
source='productversion_set',
slug_field='product_version_id'
)
active = serializers.BooleanField(read_only=True)
class Meta:
model = Product
fields = ('name', 'short', 'active', 'product_versions')
class ProductVersionSerializer(StrictSerializerMixin, serializers.ModelSerializer):
product_version_id = serializers.CharField(read_only=True)
active = serializers.BooleanField(read_only=True)
releases = serializers.SerializerMethodField()
product = serializers.SlugRelatedField(slug_field='short',
queryset=Product.objects.all())
short = serializers.CharField(required=False, validators=[
RegexValidator(regex=r"^[a-z\-]+$", message='Only accept lowercase letter or -')])
class Meta:
model = ProductVersion
fields = ('name', 'short', 'version', 'active', 'product_version_id', 'product', 'releases')
def to_internal_value(self, data):
if not self.partial and 'short' not in data:
data['short'] = data.get('product')
return super(ProductVersionSerializer, self).to_internal_value(data)
def get_releases(self, obj):
"""[release_id]"""
return [x.release_id for x in sorted(obj.release_set.all(), key=Release.version_sort_key)]
class ReleaseSerializer(StrictSerializerMixin, serializers.ModelSerializer):
release_type = ChoiceSlugField(slug_field='short',
queryset=ReleaseType.objects.all())
release_id = serializers.CharField(read_only=True)
compose_set = serializers.SerializerMethodField()
base_product = serializers.SlugRelatedField(slug_field='base_product_id',
queryset=BaseProduct.objects.all(),
required=False,
default=None,
allow_null=True)
product_version = serializers.SlugRelatedField(slug_field='product_version_id',
queryset=ProductVersion.objects.all(),
required=False,
allow_null=True)
active = serializers.BooleanField(default=True)
integrated_with = serializers.SlugRelatedField(slug_field='release_id',
queryset=Release.objects.all(),
required=False)
class Meta:
model = Release
fields = ('release_id', 'short', 'version', 'name', 'base_product',
'active', 'product_version', 'release_type',
'compose_set', 'integrated_with')
def get_compose_set(self, obj):
"""[Compose.compose_id]"""
return [compose.compose_id for compose in sorted(obj.get_all_composes())]
def create(self, validated_data):
signals.release_serializer_extract_data.send(sender=self, validated_data=validated_data)
obj = super(ReleaseSerializer, self).create(validated_data)
signals.release_serializer_post_create.send(sender=self, release=obj)
return obj
def update(self, instance, validated_data):
signals.release_serializer_extract_data.send(sender=self, validated_data=validated_data)
obj = super(ReleaseSerializer, self).update(instance, validated_data)
signals.release_serializer_post_update.send(sender=self, release=obj)
if hasattr(instance, 'pk'):
# reload to make sure changes in mapping are reflected
obj = Release.objects.get(pk=obj.pk)
# By default, PUT does not erase optional field if not specified. This
# loops over all optional fields and resets them manually.
if not self.partial:
for field_name, field in self.fields.iteritems():
if not field.read_only and field_name not in validated_data:
attr = field.source or field_name
try:
if hasattr(obj, attr):
setattr(obj, attr, None)
except ValueError:
pass
obj.save()
return obj
class BaseProductSerializer(StrictSerializerMixin, serializers.ModelSerializer):
base_product_id = serializers.CharField(read_only=True)
class Meta:
model = BaseProduct
fields = ('base_product_id', 'short', 'version', 'name')
class ReleaseTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
short = serializers.CharField()
name = serializers.CharField()
suffix = serializers.CharField()
class Meta:
model = ReleaseType
fields = ("short", "name", "suffix",)
class VariantArchNestedSerializer(serializers.BaseSerializer):
doc_format = "string"
def to_representation(self, obj):
return obj.arch.name
def to_internal_value(self, data, files=None):
try:
arch = common_models.Arch.objects.get(name=data)
return VariantArch(arch=arch)
except common_models.Arch.DoesNotExist:
raise FieldError('No such arch: "%s".' % data)
class ReleaseVariantSerializer(StrictSerializerMixin, serializers.ModelSerializer):
type = ChoiceSlugField(source='variant_type', slug_field='name',
queryset=VariantType.objects.all())
release = serializers.SlugRelatedField(slug_field='release_id',
queryset=Release.objects.all())
id = serializers.CharField(source='variant_id')
uid = serializers.CharField(source='variant_uid')
name = serializers.CharField(source='variant_name')
arches = VariantArchNestedSerializer(source='variantarch_set',
many=True)
key_combination_error = 'add_arches/remove_arches can not be combined with arches.'
extra_fields = ['add_arches', 'remove_arches']
class Meta:
model = Variant
fields = ('release', 'id', 'uid', 'name', 'type', 'arches')
def to_internal_value(self, data):
# Save value of attributes not directly corresponding to serializer
# fields. We can't rely on data dict to be mutable, so the values can
# not be removed from it.
self.add_arches = data.get('add_arches', None)
self.remove_arches = data.get('remove_arches', None)
return super(ReleaseVariantSerializer, self).to_internal_value(data)
def update(self, instance, validated_data):
arches = validated_data.pop('variantarch_set', [])
instance = super(ReleaseVariantSerializer, self).update(instance, validated_data)
if arches:
if self.add_arches or self.remove_arches:
raise FieldError(self.key_combination_error)
# If arches were completely specified, try first to remove unwanted
# arches, then create new ones.
requested = dict([(x.arch.name, x) for x in arches])
for variant in instance.variantarch_set.all():
if variant.arch.name in requested:
del requested[variant.arch.name]
else:
variant.delete()
for arch in requested.values():
arch.variant = instance
arch.save()
# These loops can only do something on partial update: when doing PUT,
# "arches" is required and if any of the other arch modifications were
# specified, an exception would be raised above.
for arch_name in self.add_arches or []:
arch = common_models.Arch.objects.get(name=arch_name)
vararch = VariantArch(arch=arch, variant=instance)
vararch.save()
for arch_name in self.remove_arches or []:
instance.variantarch_set.filter(arch__name=arch_name).delete()
return instance
class VariantTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = VariantType
fields = ('name',)
|
|
from .models import (Message, METHOD_SMS, METHOD_SMS_CALLBACK,
METHOD_SMS_SURVEY, METHOD_IVR_SURVEY,
CaseReminderHandler)
from corehq.apps.smsforms.app import submit_unfinished_form
from corehq.apps.smsforms.models import get_session_by_session_id, SQLXFormsSession
from corehq.apps.sms.mixin import (VerifiedNumber, apply_leniency,
CommCareMobileContactMixin, InvalidFormatException)
from touchforms.formplayer.api import current_question
from corehq.apps.sms.api import (
send_sms, send_sms_to_verified_number, MessageMetadata
)
from corehq.apps.smsforms.app import start_session
from corehq.apps.smsforms.util import form_requires_input
from corehq.apps.sms.util import format_message_list
from corehq.apps.users.models import CouchUser
from corehq.apps.domain.models import Domain
from corehq.apps.sms.models import (
CallLog, ExpectedCallbackEventLog, CALLBACK_PENDING, CALLBACK_RECEIVED,
CALLBACK_MISSED, WORKFLOW_REMINDER, WORKFLOW_KEYWORD, WORKFLOW_BROADCAST,
WORKFLOW_CALLBACK,
)
from django.conf import settings
from corehq.apps.app_manager.models import Form
from corehq.apps.ivr.tasks import initiate_outbound_call
from dimagi.utils.parsing import json_format_datetime
from dimagi.utils.couch import CriticalSection
from django.utils.translation import ugettext_noop
from casexml.apps.case.models import CommCareCase
from dimagi.utils.modules import to_function
ERROR_RENDERING_MESSAGE = ugettext_noop("Error rendering templated message for language '%s'. Please check message syntax.")
ERROR_NO_VERIFIED_NUMBER = ugettext_noop("Recipient has no phone number.")
ERROR_NO_OTHER_NUMBERS = ugettext_noop("Recipient has no phone number.")
ERROR_FORM = ugettext_noop("Can't load form. Please check configuration.")
ERROR_NO_RECIPIENTS = ugettext_noop("No recipient(s).")
ERROR_FINDING_CUSTOM_CONTENT_HANDLER = ugettext_noop("Error looking up custom content handler.")
ERROR_INVALID_CUSTOM_CONTENT_HANDLER = ugettext_noop("Invalid custom content handler.")
"""
This module defines the methods that will be called from CaseReminderHandler.fire()
when a reminder event fires.
Each method accepts the following parameters:
reminder The CaseReminder which is being fired. Use reminder.current_event
to see the specific event which is being fired.
handler The CaseReminderHandler which defines the rules / schedule for
the reminder.
recipients A list of recipients to send the content to. At the moment, this
will be list of CommCareUsers or CommCareCases.
verified_numbers A dictionary of recipient.get_id : <first non-pending verified number>
If the recipient doesn't have a verified VerifiedNumber entry, None is the
corresponding value.
Any changes to the reminder object made by the event handler method will be saved
after the method returns.
Each method should return True to move the reminder forward to the next event, or False
to not move the reminder forward to the next event.
"""
def get_workflow(handler):
from corehq.apps.reminders.models import REMINDER_TYPE_ONE_TIME, REMINDER_TYPE_KEYWORD_INITIATED
if handler.reminder_type == REMINDER_TYPE_ONE_TIME:
return WORKFLOW_BROADCAST
elif handler.reminder_type == REMINDER_TYPE_KEYWORD_INITIATED:
return WORKFLOW_KEYWORD
else:
return WORKFLOW_REMINDER
def get_recipient_phone_number(reminder, recipient, verified_numbers):
verified_number = verified_numbers.get(recipient.get_id, None)
unverified_number = None
if verified_number is None:
if isinstance(recipient, CouchUser):
try:
unverified_number = recipient.phone_number
except Exception:
unverified_number = None
elif isinstance(recipient, CommCareCase):
unverified_number = recipient.get_case_property("contact_phone_number")
unverified_number = apply_leniency(unverified_number)
if unverified_number:
try:
CommCareMobileContactMixin.validate_number_format(
unverified_number)
except InvalidFormatException:
unverified_number = None
else:
unverified_number = None
return (verified_number, unverified_number)
def get_message_template_params(case):
"""
Data such as case properties can be referenced from reminder messages
such as {case.name} which references the case's name. Add to this result
all data that can be referenced from a reminder message.
The result is a dictionary where each key is the object's name and each
value is a dictionary of attributes to be referenced. Dictionaries can
also be nested, so a result here of {"case": {"parent": {"name": "joe"}}}
allows you to reference {case.parent.name} in a reminder message.
At the moment, the result here is of this structure:
{
"case": {
...key:value case properties...
"parent": {
...key:value parent case properties...
}
}
}
"""
result = {"case": {}}
if case:
result["case"] = case.case_properties()
parent_case = case.parent if case else None
result["case"]["parent"] = {}
if parent_case:
result["case"]["parent"] = parent_case.case_properties()
return result
def fire_sms_event(reminder, handler, recipients, verified_numbers, workflow=None):
metadata = MessageMetadata(
workflow=workflow or get_workflow(handler),
reminder_id=reminder._id,
)
current_event = reminder.current_event
case = reminder.case
template_params = get_message_template_params(case)
for recipient in recipients:
try:
lang = recipient.get_language_code()
except Exception:
lang = None
if handler.custom_content_handler is not None:
if handler.custom_content_handler in settings.ALLOWED_CUSTOM_CONTENT_HANDLERS:
try:
content_handler = to_function(settings.ALLOWED_CUSTOM_CONTENT_HANDLERS[handler.custom_content_handler])
except Exception:
raise_error(reminder, ERROR_FINDING_CUSTOM_CONTENT_HANDLER)
return False
message = content_handler(reminder, handler, recipient)
# If the content handler returns None or empty string,
# don't send anything
if not message:
return True
else:
raise_error(reminder, ERROR_INVALID_CUSTOM_CONTENT_HANDLER)
return False
else:
message = current_event.message.get(lang, current_event.message[handler.default_lang])
try:
message = Message.render(message, **template_params)
except Exception:
if len(recipients) == 1:
raise_error(reminder, ERROR_RENDERING_MESSAGE % lang)
return False
else:
raise_warning() # ERROR_RENDERING_MESSAGE
continue
verified_number, unverified_number = get_recipient_phone_number(
reminder, recipient, verified_numbers)
domain_obj = Domain.get_by_name(reminder.domain, strict=True)
if verified_number is not None:
result = send_sms_to_verified_number(verified_number,
message, metadata)
elif isinstance(recipient, CouchUser) and unverified_number:
result = send_sms(reminder.domain, recipient, unverified_number,
message, metadata)
elif (isinstance(recipient, CommCareCase) and unverified_number and
domain_obj.send_to_duplicated_case_numbers):
result = send_sms(reminder.domain, recipient, unverified_number,
message, metadata)
else:
if len(recipients) == 1:
raise_error(reminder, ERROR_NO_VERIFIED_NUMBER)
result = False
if len(recipients) == 1:
return result
# For multiple recipients, always move to the next event
return True
def fire_sms_callback_event(reminder, handler, recipients, verified_numbers):
current_event = reminder.current_event
for recipient in recipients:
send_message = False
if reminder.callback_try_count > 0:
if reminder.event_initiation_timestamp:
event = ExpectedCallbackEventLog.view("sms/expected_callback_event",
key=[reminder.domain,
json_format_datetime(reminder.event_initiation_timestamp),
recipient.get_id],
include_docs=True,
limit=1).one()
if not event:
continue
if event.status == CALLBACK_RECEIVED:
continue
if CallLog.inbound_entry_exists(recipient.doc_type,
recipient.get_id, reminder.event_initiation_timestamp):
event.status = CALLBACK_RECEIVED
event.save()
continue
else:
continue
if (reminder.callback_try_count >=
len(current_event.callback_timeout_intervals)):
# On the last callback timeout, instead of sending the SMS
# again, log the missed callback
if event:
event.status = CALLBACK_MISSED
event.save()
else:
send_message = True
else:
# It's the first time sending the sms, so create an expected
# callback event
send_message = True
event = ExpectedCallbackEventLog(
domain=reminder.domain,
date=reminder.event_initiation_timestamp,
couch_recipient_doc_type=recipient.doc_type,
couch_recipient=recipient.get_id,
status=CALLBACK_PENDING,
)
event.save()
if send_message:
fire_sms_event(reminder, handler, [recipient], verified_numbers,
workflow=WORKFLOW_CALLBACK)
return True
def fire_sms_survey_event(reminder, handler, recipients, verified_numbers):
if reminder.callback_try_count > 0:
# Handle timeouts
if handler.submit_partial_forms and (reminder.callback_try_count == len(reminder.current_event.callback_timeout_intervals)):
# Submit partial form completions
for session_id in reminder.xforms_session_ids:
submit_unfinished_form(session_id, handler.include_case_side_effects)
else:
# Resend current question
for session_id in reminder.xforms_session_ids:
session = get_session_by_session_id(session_id)
if session.end_time is None:
vn = VerifiedNumber.view("sms/verified_number_by_owner_id",
key=session.connection_id,
include_docs=True).first()
if vn is not None:
metadata = MessageMetadata(
workflow=get_workflow(handler),
reminder_id=reminder._id,
xforms_session_couch_id=session._id,
)
resp = current_question(session_id)
send_sms_to_verified_number(vn, resp.event.text_prompt, metadata)
return True
else:
reminder.xforms_session_ids = []
# Get the app, module, and form
try:
form_unique_id = reminder.current_event.form_unique_id
form = Form.get_form(form_unique_id)
app = form.get_app()
module = form.get_module()
except Exception:
raise_error(reminder, ERROR_FORM)
return False
# Start a touchforms session for each recipient
for recipient in recipients:
verified_number, unverified_number = get_recipient_phone_number(
reminder, recipient, verified_numbers)
domain_obj = Domain.get_by_name(reminder.domain, strict=True)
no_verified_number = verified_number is None
cant_use_unverified_number = (unverified_number is None or
not domain_obj.send_to_duplicated_case_numbers or
form_requires_input(form))
if no_verified_number and cant_use_unverified_number:
if len(recipients) == 1:
raise_error(reminder, ERROR_NO_VERIFIED_NUMBER)
return False
else:
continue
key = "start-sms-survey-for-contact-%s" % recipient.get_id
with CriticalSection([key], timeout=60):
# Close all currently open sessions
SQLXFormsSession.close_all_open_sms_sessions(reminder.domain, recipient.get_id)
# Start the new session
if (isinstance(recipient, CommCareCase) and
not handler.force_surveys_to_use_triggered_case):
case_id = recipient.get_id
else:
case_id = reminder.case_id
session, responses = start_session(reminder.domain, recipient,
app, module, form, case_id, case_for_case_submission=
handler.force_surveys_to_use_triggered_case)
session.survey_incentive = handler.survey_incentive
session.workflow = get_workflow(handler)
session.reminder_id = reminder._id
session.save()
reminder.xforms_session_ids.append(session.session_id)
# Send out first message
if len(responses) > 0:
message = format_message_list(responses)
metadata = MessageMetadata(
workflow=get_workflow(handler),
reminder_id=reminder._id,
xforms_session_couch_id=session._id,
)
if verified_number:
result = send_sms_to_verified_number(verified_number, message, metadata)
else:
result = send_sms(reminder.domain, recipient, unverified_number,
message, metadata)
if len(recipients) == 1:
return result
return True
def fire_ivr_survey_event(reminder, handler, recipients, verified_numbers):
domain_obj = Domain.get_by_name(reminder.domain, strict=True)
for recipient in recipients:
initiate_call = True
if reminder.callback_try_count > 0 and reminder.event_initiation_timestamp:
initiate_call = not CallLog.answered_call_exists(
recipient.doc_type, recipient.get_id,
reminder.event_initiation_timestamp,
CaseReminderHandler.get_now())
if initiate_call:
if (isinstance(recipient, CommCareCase) and
not handler.force_surveys_to_use_triggered_case):
case_id = recipient.get_id
else:
case_id = reminder.case_id
verified_number, unverified_number = get_recipient_phone_number(
reminder, recipient, verified_numbers)
if verified_number:
initiate_outbound_call.delay(
recipient,
reminder.current_event.form_unique_id,
handler.submit_partial_forms,
handler.include_case_side_effects,
handler.max_question_retries,
verified_number=verified_number,
case_id=case_id,
case_for_case_submission=handler.force_surveys_to_use_triggered_case,
timestamp=CaseReminderHandler.get_now(),
)
elif domain_obj.send_to_duplicated_case_numbers and unverified_number:
initiate_outbound_call.delay(
recipient,
reminder.current_event.form_unique_id,
handler.submit_partial_forms,
handler.include_case_side_effects,
handler.max_question_retries,
unverified_number=unverified_number,
case_id=case_id,
case_for_case_submission=handler.force_surveys_to_use_triggered_case,
timestamp=CaseReminderHandler.get_now(),
)
else:
#No phone number to send to
pass
return True
def raise_warning():
"""
This method is meant to report runtime warnings which are caused by
configuration errors to a project contact.
"""
# For now, just a stub.
pass
def raise_error(reminder, error_msg):
"""
Put the reminder in an error state, which filters it out of the reminders
queue.
"""
reminder.error = True
reminder.error_msg = error_msg
reminder.save()
# The dictionary which maps an event type to its event handling method
EVENT_HANDLER_MAP = {
METHOD_SMS: fire_sms_event,
METHOD_SMS_CALLBACK: fire_sms_callback_event,
METHOD_SMS_SURVEY: fire_sms_survey_event,
METHOD_IVR_SURVEY: fire_ivr_survey_event,
# METHOD_EMAIL is a placeholder at the moment; it's not implemented yet anywhere in the framework
}
|
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import shutil
from catkin_tools.common import mkdir_p
from catkin_tools.common import get_cached_recursive_build_depends_in_workspace
from catkin_tools.resultspace import get_resultspace_environment
from catkin_tools.execution.events import ExecutionEvent
class CommandMissing(Exception):
'''A required command is missing.'''
def __init__(self, name):
super(CommandMissing, self).__init__(
'Cannot find required tool `%s` on the PATH, is it installed?' % name)
def require_command(name, which):
if not which:
raise CommandMissing(name)
def get_env_loaders(package, context):
"""Get a list of env loaders required to build this package."""
sources = []
# If installing to isolated folders or not installing, but devel spaces are not merged
if (context.install and context.isolate_install) or (not context.install and context.isolate_devel):
# Source each package's install or devel space
space = context.install_space_abs if context.install else context.devel_space_abs
# Get the recursive dependcies
depends = get_cached_recursive_build_depends_in_workspace(package, context.packages)
# For each dep add a line to source its setup file
for dep_pth, dep in depends:
source_path = os.path.join(space, dep.name, 'env.sh')
sources.append(source_path)
else:
# Get the actual destination of this package
if context.link_devel and not context.install:
source_path = os.path.join(context.package_final_path(package), 'env.sh')
else:
source_path = os.path.join(context.package_dest_path(package), 'env.sh')
sources = [source_path]
return sources
def merge_envs(job_env, overlay_envs):
'''
In the merged/linked case of single env, this function amounts to a straight
assignment, but a more complex merge is required with isolated result spaces,
since a package's build environment may require extending that of multiple
other result spaces.
'''
merge_path_values = {}
for overlay_env in overlay_envs:
for key, values_str in overlay_env.items():
if key.endswith('PATH'):
if key not in merge_path_values:
# Seed the list with any values already in the environment. We reverse the list
# here so that we can cheaply append to it, representing a prepend in the final
# PATH var, and because we need to maintain the order of underlay paths.
if key in job_env:
values = job_env[key].split(os.pathsep)
values.reverse()
merge_path_values[key] = values
else:
merge_path_values[key] = []
merge_path_values[key].extend(values_str.split(os.pathsep))
else:
# For non-PATH keys, simply assign the value. This may not always
# be correct behaviour, but we don't have the information here to
# know how to do anything else.
job_env[key] = values_str
# For the path values, do a deduplicating merge.
for key, values_list in merge_path_values.items():
seen_values = set()
new_values_list = []
for value in values_list:
if value not in seen_values:
seen_values.add(value)
new_values_list.append(value)
job_env[key] = os.pathsep.join(reversed(new_values_list))
def loadenv(logger, event_queue, job_env, package, context):
# Get the paths to the env loaders
env_loader_paths = get_env_loaders(package, context)
# If DESTDIR is set, set _CATKIN_SETUP_DIR as well
if context.destdir is not None:
job_env['_CATKIN_SETUP_DIR'] = context.package_dest_path(package)
envs = []
for env_loader_path in env_loader_paths:
if logger:
logger.out('Loading environment from: {}'.format(env_loader_path))
envs.append(get_resultspace_environment(
os.path.split(env_loader_path)[0],
base_env=job_env,
quiet=True,
cached=context.use_env_cache,
strict=False))
# Avoid using merge logic if not required (in the non-isolated resultspace
# case. It has corner cases which may trip up the unwary, so having the
# option to switch to a merged resultspace is a good fallback.
if len(envs) > 1:
merge_envs(job_env, envs)
elif len(envs) == 1:
job_env.update(envs[0])
return 0
def makedirs(logger, event_queue, path):
"""FunctionStage functor that makes a path of directories."""
mkdir_p(path)
return 0
def copyfiles(logger, event_queue, source_paths, dest_path):
"""FunctionStage functor that copies one or more files"""
for source_path in source_paths:
shutil.copy(source_path, dest_path)
return 0
def rmfile(logger, event_queue, path):
"""FunctionStage functor that removes a file."""
if os.path.exists(path):
os.remove(path)
return 0
def rmdirs(logger, event_queue, paths):
"""FunctionStage functor that removes a directory tree."""
return rmfiles(logger, event_queue, paths, remove_empty=False)
def rmfiles(logger, event_queue, paths, dry_run, remove_empty=False, empty_root='/'):
"""FunctionStage functor that removes a list of files and directories.
If remove_empty is True, then this will also remove directories which
become emprt after deleting the files in `paths`. It will delete files up
to the path specified by `empty_root`.
"""
# Determine empty directories
if remove_empty:
# First get a list of directories to check
dirs_to_check = set()
for path in paths:
# Make sure the file is given by an absolute path and it exists
if not os.path.isabs(path) or not os.path.exists(path):
continue
# Only look in the devel space
while empty_root.find(path) != 0:
# Pop up a directory
path, dirname = os.path.split(path)
# Skip if this path isn't a directory
if not os.path.isdir(path):
continue
dirs_to_check.add(path)
# For each directory which may be empty after cleaning, visit them
# depth-first and count their descendants
dir_descendants = dict()
for path in sorted(dirs_to_check, key=lambda k: -len(k.split(os.path.sep))):
# Get the absolute path to all the files currently in this directory
files = [os.path.join(path, f) for f in os.listdir(path)]
# Filter out the files which we intend to remove
files = [f for f in files if f not in paths]
# Compute the minimum number of files potentially contained in this path
dir_descendants[path] = sum([
(dir_descendants.get(f, 1) if os.path.isdir(f) else 1)
for f in files
])
# Schedule the directory for removal if removal of the given files will make it empty
if dir_descendants[path] == 0:
paths.append(path)
# REmove the paths
for index, path in enumerate(paths):
# Remove the path
if os.path.exists(path):
if os.path.isdir(path):
logger.out('Removing directory: {}'.format(path))
if not dry_run:
shutil.rmtree(path)
else:
logger.out(' Removing file: {}'.format(path))
if not dry_run:
os.remove(path)
else:
logger.err('Warning: File {} could not be deleted because it does not exist.'.format(path))
# Report progress
event_queue.put(ExecutionEvent(
'STAGE_PROGRESS',
job_id=logger.job_id,
stage_label=logger.stage_label,
percent=str(index / float(len(paths)))))
return 0
|
|
import ctypes, random, unittest, sys
from django.contrib.gis.geos import *
from django.contrib.gis.geos.base import gdal, numpy, GEOSBase
from django.contrib.gis.tests.geometries import *
class GEOSTest(unittest.TestCase):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test15_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test00_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p('foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test01a_wkt(self):
"Testing WKT output."
for g in wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test01b_hex(self):
"Testing HEX output."
for g in hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex)
def test01c_kml(self):
"Testing KML output."
for tg in wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test01d_errors(self):
"Testing the Error handlers."
# string-based
print "\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n"
for err in errors:
try:
g = fromstr(err.wkt)
except (GEOSException, ValueError):
pass
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
print "\nEND - expecting GEOS_ERROR; safe to ignore.\n"
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test01e_wkb(self):
"Testing WKB output."
from binascii import b2a_hex
for g in hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
def test01f_create_hex(self):
"Testing creation from HEX."
for g in hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01g_create_wkb(self):
"Testing creation from WKB."
from binascii import a2b_hex
for g in hex_wkt:
wkb = buffer(a2b_hex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01h_ewkt(self):
"Testing EWKT."
srid = 32140
for p in polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test01i_json(self):
"Testing GeoJSON input/output (via GDAL)."
if not gdal or not gdal.GEOJSON: return
for g in json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test01k_fromfile(self):
"Testing the fromfile() factory."
from StringIO import StringIO
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = StringIO()
wkt_f.write(ref_pnt.wkt)
wkb_f = StringIO()
wkb_f.write(str(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test01k_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test02a_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test02b_multipoints(self):
"Testing MultiPoint objects."
for mp in multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.points, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test03a_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test03b_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test04_linearring(self):
"Testing LinearRing objects."
for rr in linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test05a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = fromstr('POINT(0 0)')
for p in polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon.__init__, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon.__init__, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test05b_multipolygons(self):
"Testing MultiPolygon objects."
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
prev = fromstr('POINT (0 0)')
for mp in multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def test06a_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
# The previous hijinks tests are now moot because only clones are
# now used =)
def test08_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test09_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for i in xrange(len(relate_geoms)):
g_tup = relate_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
pat = g_tup[2]
result = g_tup[3]
self.assertEqual(result, a.relate_pattern(b, pat))
self.assertEqual(pat, a.relate(b))
def test10_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
i1 = fromstr(intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test11_union(self):
"Testing union()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
u1 = fromstr(union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test12_difference(self):
"Testing difference()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
d1 = fromstr(diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test13_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(topology_geoms)):
g_tup = topology_geoms[i]
a = fromstr(g_tup[0].wkt)
b = fromstr(g_tup[1].wkt)
d1 = fromstr(sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test14_buffer(self):
"Testing buffer()."
for i in xrange(len(buffer_geoms)):
g_tup = buffer_geoms[i]
g = fromstr(g_tup[0].wkt)
# The buffer we expect
exp_buf = fromstr(g_tup[1].wkt)
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, g_tup[2], float(g_tup[3]))
# Constructing our buffer
buf = g.buffer(g_tup[2], g_tup[3])
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test15_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test16_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(1, 100), random.randint(1, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test17_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test18_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test19_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test20a_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test20b_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in multipolygons if mp.valid]
coll.extend([mls.wkt for mls in multilinestrings])
coll.extend([p.wkt for p in polygons])
coll.extend([mp.wkt for mp in multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test21_test_gdal(self):
"Testing `ogr` and `srs` properties."
if not gdal.HAS_GDAL: return
g1 = fromstr('POINT(5 23)')
self.assertEqual(True, isinstance(g1.ogr, gdal.OGRGeometry))
self.assertEqual(g1.srs, None)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertEqual(True, isinstance(g2.ogr, gdal.OGRGeometry))
self.assertEqual(True, isinstance(g2.srs, gdal.SpatialReference))
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test22_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test23_transform(self):
"Testing `transform` method."
if not gdal.HAS_GDAL: return
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test24_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test25_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
import pickle, cPickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(points)
tgeoms.extend(get_geoms(multilinestrings, 4326))
tgeoms.extend(get_geoms(polygons, 3084))
tgeoms.extend(get_geoms(multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
def test26_prepared(self):
"Testing PreparedGeometry support."
if not GEOS_PREPARE: return
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
def test26_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
|
import tables
import numpy as np
import math
import png
import sys
import time
sys.path.insert(0, '/home/brad/rayTracer/Objects')
from object import Sphere, Plane, Cylinder, Cube
str_time = time.time()
cur_sec = 0
'''---------------------------------------------------------'''
''' Setup Image '''
'''---------------------------------------------------------'''
width = 480
height = 300
s_num = 3
s_size = height/s_num
channels = 3
num_colours = 256
num_steps = 4
'''---------------------------------------------------------'''
''' Create Scene '''
'''---------------------------------------------------------'''
glblAmb = np.array([0.2, 0.2, 0.2])
l1 = np.array([[4.271, 16.705, -17.204],[0.1, 0.1, 0.1],[1.0, 1.0, 1.0],[1.0, 1.0, 1.0]])
lights = np.array([l1])
s1 = Sphere(np.array([ 9.50,-1.00, 19.00]), \
np.array([[0.70, 0.70, 0.70],[0.10, 0.50, 0.80],[1.00, 1.00, 1.00]]), \
50.0, 0.03, "earthScaled16", 256,\
4.0, np.array([2, 1, -3]), np.array([0,3,1]))
s2 = Sphere(np.array([-2.25,-3.50, 11.50]), \
np.array([[0.10, 0.10, 0.10],[0.60, 0.06, 0.00],[1.00, 1.00, 1.00]]), \
30.0, 0.15, None, None, \
1.5, None, None)
s3 = Sphere(np.array([-3.75, 0.00, 30.0]), \
np.array([[0.20, 0.20, 0.20],[0.00, 0.50, 0.00],[0.00, 0.00, 0.00]]), \
20.0, 0.25, None, None, \
5.0, None, None)
s4 = Sphere(np.array([-7.14, 5.89, 15.64]), \
np.array([[0.20, 0.20, 0.20],[0.00, 0.00, 0.00],[1.00, 1.00, 1.00]]), \
100.0, 1.0, None, None, \
3.0, None, None)
p1 = Plane(np.array([0.0,-5.0,10.0]), \
np.array([[0.20, 0.20, 0.20],[0.90, 0.90, 0.90],[0.00, 0.00, 0.00]]), \
10.0, 0.1, "checkerboard", 256, \
np.array([0.0,1.0,0.0]), np.array([1.0,0.0,0.0]), 1.0)
c1 = Cylinder(np.array([ 0.0, -5.0, 10.0]), \
np.array([[0.70, 0.70, 0.70],[0.65, 0.65, 0.65],[1.00, 1.00, 1.00]]), \
50.0, 0.03, None, None, \
np.array([0.0, 1.0, 0.0]), 0.75, 9.0)
cu1 = Cube(np.array([-8.0, -1.0, 16.5]), \
np.array([[0.2,0.2,0.2],[1.0,1.0,0.0],[0.0,0.0,0.0]]), \
5.0, 0.15, None, None, \
np.array([1.0,8.0,-1.0]), np.array([1.0,0.0,-1.0]), 4)
cu2 = Cube(np.array([4.0, -1.53589838486225, 3.3]), \
np.array([[1.0,1.0,1.0],[0.5,0.5,0.5],[0.0,0.0,0.0]]), \
10.0, 0.04, None, None, \
np.array([1.0,1.0,-1.0]), np.array([2.5,0.0,-1.0]), 2)
#cu1 = Cube(np.array([0.0, 0.0, 10.0]), \
# np.array([[1.0,1.0,1.0],[0.5,0.5,0.5],[0.0,0.0,0.0]]), \
# 10.0, 0.04, None, None, \
# np.array([0.0,1.0,0.0]), np.array([0.0,0.0,-1.0]), 2)
scene = np.array([s1, s2, s3, s4, p1, c1, cu1, cu2])
bg_colour = np.array([0.7, 0.7, 1.0])
'''---------------------------------------------------------'''
''' Setup Image Plane '''
'''---------------------------------------------------------'''
eye = np.array([0.0,0.0,-30.0])
lookatp = np.array([0.0,0.0,0.0])
ipd = 5
up = np.array([0,1,0])
ipwidth = 3.2
ipheight = 2.0
pixel_width = ipwidth/float(width)
lookat = lookatp-eye
lookat = lookat/np.linalg.norm(lookat)
ipx = np.cross(up, lookat)
ipx = ipx/np.linalg.norm(ipx)
ipy = np.cross(lookat, ipx)
ipy = ipy/np.linalg.norm(ipy)
ipo = ipd*(lookat) + eye
'''---------------------------------------------------------'''
''' Setup PyTables Files '''
'''---------------------------------------------------------'''
atom = tables.UInt8Atom()
p_atom = tables.Float64Atom()
rt = tables.open_file("rt.h5", "w")
image = rt.create_earray(rt.root, 'image', atom, (0, width*channels))
imagep = rt.create_earray(rt.root, 'image_plane', p_atom, (0, width, 3))
'''---------------------------------------------------------'''
''' Build Image Plane '''
'''---------------------------------------------------------'''
lastPercent = 0
curPercent = 0
for s in range(s_num):
sp_image = np.zeros((s_size, width, 3), dtype=np.float64)
for y in range(0, s_size):
for x in range(0,width):
curPercent = math.floor((((s*s_size*width)+(y*width)+(x+1))/float(width*height))*100)
if (curPercent > lastPercent):
lastPercent = curPercent
cur_sec = time.time() - str_time
sys.stdout.write("\rBuild Plane %d%% [%ds]" % (curPercent, cur_sec))
sys.stdout.flush()
trans_y = s*s_size + y
sp_image[y,x] = ipo + (ipx-ipy)*pixel_width/2.0 + ipwidth*ipx*(-0.5 + (x/float(width))) - ipheight*ipy*(-0.5 + (trans_y/float(height)))
imagep.append(sp_image)
'''----------------------------------------------------------'''
''' Trace Scene '''
'''----------------------------------------------------------'''
def getReflec(obj, incident, hit):
normal = obj.getNormal(hit)
incident = incident/np.linalg.norm(incident)
reflect = incident + (np.dot(normal, -1.0 * incident) * 2 * normal)
reflect = reflect/np.linalg.norm(reflect)
return reflect
def phongShading(obj, source, hit):
result = np.zeros((3), dtype=np.float64)
col = np.zeros((3,3), dtype=np.float64)
toLight = lights[0,0] - hit
toLight = toLight/np.linalg.norm(toLight)
normal = obj.getNormal(hit)
toSource = source - hit
toSource = toSource/np.linalg.norm(toSource)
incidentLight = hit - lights[0,0]
incidentLight = incidentLight/np.linalg.norm(incidentLight)
reflectLight = getReflec(obj, incidentLight, hit)
'''AMBIENT'''
for i in range(3):
col[0,i] = glblAmb[i] * obj.col[0,i]
'''DIFFUSE'''
dotProd = np.dot(normal, toLight)
if (dotProd < 0.0):
dotProd = 0.0
difColour = obj.col[1]
if obj.tex != None:
difColour = obj.getTextureColour(hit)
for i in range(3):
col[1,i] = difColour[i] * (dotProd * lights[0,2,i])
'''SPECULAR'''
dotProd = np.dot(reflectLight, toSource)
if (dotProd < 0.0):
dotProd = 0.0
dotProd = dotProd**obj.sh
for i in range(3):
col[2,i] = obj.col[2,i] * (dotProd * lights[0,3,i])
'''SHADOWS'''
'''to implement'''
for i in range(3):
result[i] = col[0,i] + col[1,i] + col[2,i]
return result
def getNearObj(source, ray):
nearDist = None
count = -1
nearObj = -1
hit = None
for obj in scene:
count += 1
temp_hit = obj.intersect(source, ray)
if not temp_hit is None:
tempDist = np.linalg.norm(temp_hit - source)
if (tempDist < nearDist or nearDist is None) and tempDist > 0.0:
nearDist = tempDist
nearObj = count
hit = temp_hit
if not hit is None:
no = np.zeros((5), dtype=np.float64)
no[0:3] = hit
no[3] = nearObj
no[4] = nearDist
return no
else:
return None
def rayTrace(source, ray, step):
result = np.zeros((3), dtype=np.float64)
average = np.zeros((3), dtype=np.float64)
'''GET NEAR OBJ'''
hit = None
nearObj = -1
no = getNearObj(source, ray)
if not no is None:
hit = no[0:3]
nearObj = no[3]
if not hit is None:
result = phongShading(scene[nearObj], source, hit)
if (step <= num_steps) and (num_steps > 0) and scene[nearObj].refl > 0.0:
reflec = getReflec(scene[nearObj], hit - source, hit)
reflec = reflec/np.linalg.norm(reflec)
'''APPLY REFLECTION FIX'''
reflec_source = hit + reflec * 0.0000000001
'''TESTING
print("start")
print(str(hit))
print(str(reflec_source))
print("end")
'''
reflec_result = rayTrace(reflec_source, reflec, step+1)
result = result + scene[nearObj].refl * reflec_result
else:
result = bg_colour
return result
sys.stdout.write("\n")
lastPercent = 0.0
curPercent = 0.0
for s in range(s_num):
s_image = np.zeros((s_size, width * channels), dtype=np.uint8)
for y in range(0, s_size):
for x in range(0,width):
curPercent = math.floor((((s*s_size*width)+(y*width)+(x+1))/float(width*height))*1000) / 10.0
if (curPercent > lastPercent):
lastPercent = curPercent
cur_sec = time.time() - str_time
sys.stdout.write("\rTrace Scene %.1f%% [%ds]" % (curPercent, cur_sec))
sys.stdout.flush()
trans_y = s*s_size + y
trans_x = x*3
lookat = imagep[trans_y, x] - eye
lookat = lookat/np.linalg.norm(lookat)
colour = rayTrace(eye, lookat, 0)
colour = (num_colours - 1) * np.array(colour)
for i in range(3):
if colour[i] >= num_colours:
colour[i] = num_colours - 1
s_image[y,trans_x:trans_x+3] = colour
image.append(s_image)
'''-------------------------------------------------------'''
''' Write to PNG '''
'''-------------------------------------------------------'''
pngfile = open('rt.png', 'wb')
pngWriter = png.Writer(width, height, greyscale=False, alpha=False, bitdepth=8)
pngWriter.write(pngfile, image[:])
'''----------------------------------------------------------
print(ipo)
print(ipx)
print(ipy)
print(imagep[:])
'''
rt.close()
pngfile.close()
sys.stdout.write("\n")
sys.stdout.flush()
|
|
#!/usr/local/bin/python
# encoding: utf-8
"""
*Import ned_d catalogue into sherlock-catalogues database*
:Author:
David Young
"""
from __future__ import print_function
from __future__ import division
from builtins import zip
from past.utils import old_div
import sys
import os
os.environ['TERM'] = 'vt100'
import readline
import csv
import time
import glob
import pickle
import codecs
import string
import re
from fundamentals.mysql import writequery, readquery
from astrocalc.coords import unit_conversion
from sloancone import check_coverage
from neddy import namesearch
from docopt import docopt
from ._base_importer import _base_importer
class ned_d(_base_importer):
"""
*Import the * `NED-D <https://ned.ipac.caltech.edu/Library/Distances/>`_ *galaxy catalogue in to the sherlock-catalogues database*
**Key Arguments**
- ``log`` -- logger
- ``settings`` -- the settings dictionary
- ``pathToDataFile`` -- path to the ned_d data file
- ``version`` -- version of the ned_d catalogue
- ``catalogueName`` -- the name of the catalogue
**Usage**
To import the ned_d catalogue catalogue, run the following:
```python
from sherlock.imports import ned_d
catalogue = ned_d(
log=log,
settings=settings,
pathToDataFile="/path/to/ned_d.txt",
version="1.0",
catalogueName="ned_d"
)
catalogue.ingest()
```
.. todo ::
- abstract this module out into its own stand alone script
"""
# INITIALISATION
def ingest(self):
"""Import the ned_d catalogue into the catalogues database
The method first generates a list of python dictionaries from the ned_d datafile, imports this list of dictionaries into a database table and then generates the HTMIDs for that table.
**Usage**
See class docstring for usage
.. todo ::
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``get`` method')
dictList = self._create_dictionary_of_ned_d()
self.primaryIdColumnName = "primaryId"
self.raColName = "raDeg"
self.declColName = "decDeg"
tableName = self.dbTableName
createStatement = u"""
CREATE TABLE `%(tableName)s` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`Method` varchar(150) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,
`updated` varchar(45) DEFAULT '0',
`dist_derived_from_sn` varchar(150) DEFAULT NULL,
`dist_in_ned_flag` varchar(10) DEFAULT NULL,
`dist_index_id` mediumint(9) DEFAULT NULL,
`dist_mod` double DEFAULT NULL,
`dist_mod_err` double DEFAULT NULL,
`dist_mpc` double DEFAULT NULL,
`galaxy_index_id` mediumint(9) DEFAULT NULL,
`hubble_const` double DEFAULT NULL,
`lmc_mod` double DEFAULT NULL,
`notes` varchar(500) DEFAULT NULL,
`primary_ned_id` varchar(150) DEFAULT NULL,
`redshift` double DEFAULT NULL,
`ref` varchar(150) DEFAULT NULL,
`ref_date` int(11) DEFAULT NULL,
`master_row` tinyint(4) DEFAULT '0',
`major_diameter_arcmin` double DEFAULT NULL,
`ned_notes` varchar(700) DEFAULT NULL,
`object_type` varchar(100) DEFAULT NULL,
`redshift_err` double DEFAULT NULL,
`redshift_quality` varchar(100) DEFAULT NULL,
`magnitude_filter` varchar(10) DEFAULT NULL,
`minor_diameter_arcmin` double DEFAULT NULL,
`morphology` varchar(50) DEFAULT NULL,
`hierarchy` varchar(50) DEFAULT NULL,
`galaxy_morphology` varchar(50) DEFAULT NULL,
`radio_morphology` varchar(50) DEFAULT NULL,
`activity_type` varchar(50) DEFAULT NULL,
`in_ned` tinyint(4) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`eb_v` double DEFAULT NULL,
`sdss_coverage` TINYINT DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `galaxy_index_id_dist_index_id` (`galaxy_index_id`,`dist_index_id`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
DROP VIEW IF EXISTS `view_%(tableName)s_master_recorders`;
CREATE
VIEW `view_%(tableName)s_master_recorders` AS
(SELECT
`%(tableName)s`.`primary_ned_id` AS `primary_ned_id`,
`%(tableName)s`.`object_type` AS `object_type`,
`%(tableName)s`.`raDeg` AS `raDeg`,
`%(tableName)s`.`decDeg` AS `decDeg`,
`%(tableName)s`.`dist_mpc` AS `dist_mpc`,
`%(tableName)s`.`dist_mod` AS `dist_mod`,
`%(tableName)s`.`dist_mod_err` AS `dist_mod_err`,
`%(tableName)s`.`Method` AS `dist_measurement_method`,
`%(tableName)s`.`redshift` AS `redshift`,
`%(tableName)s`.`redshift_err` AS `redshift_err`,
`%(tableName)s`.`redshift_quality` AS `redshift_quality`,
`%(tableName)s`.`major_diameter_arcmin` AS `major_diameter_arcmin`,
`%(tableName)s`.`minor_diameter_arcmin` AS `minor_diameter_arcmin`,
`%(tableName)s`.`magnitude_filter` AS `magnitude_filter`,
`%(tableName)s`.`eb_v` AS `gal_eb_v`,
`%(tableName)s`.`hierarchy` AS `hierarchy`,
`%(tableName)s`.`morphology` AS `morphology`,
`%(tableName)s`.`radio_morphology` AS `radio_morphology`,
`%(tableName)s`.`activity_type` AS `activity_type`,
`%(tableName)s`.`ned_notes` AS `ned_notes`,
`%(tableName)s`.`in_ned` AS `in_ned`,
`%(tableName)s`.`primaryId` AS `primaryId`
FROM
`%(tableName)s`
WHERE
(`%(tableName)s`.`master_row` = 1));
""" % locals()
self.add_data_to_database_table(
dictList=dictList,
createStatement=createStatement
)
self._clean_up_columns()
self._get_metadata_for_galaxies()
self._update_sdss_coverage()
self.log.debug('completed the ``get`` method')
return None
def _create_dictionary_of_ned_d(
self):
"""create a list of dictionaries containing all the rows in the ned_d catalogue
**Return**
- ``dictList`` - a list of dictionaries containing all the rows in the ned_d catalogue
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_create_dictionary_of_ned_d`` method')
count = 0
with open(self.pathToDataFile, 'r') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
totalRows = sum(1 for row in csvReader)
csvFile.close()
totalCount = totalRows
with open(self.pathToDataFile, 'r') as csvFile:
csvReader = csv.reader(
csvFile, dialect='excel', delimiter=',', quotechar='"')
theseKeys = []
dictList = []
for row in csvReader:
if len(theseKeys) == 0:
totalRows -= 1
if "Exclusion Code" in row and "Hubble const." in row:
for i in row:
if i == "redshift (z)":
theseKeys.append("redshift")
elif i == "Hubble const.":
theseKeys.append("hubble_const")
elif i == "G":
theseKeys.append("galaxy_index_id")
elif i == "err":
theseKeys.append("dist_mod_err")
elif i == "D (Mpc)":
theseKeys.append("dist_mpc")
elif i == "Date (Yr. - 1980)":
theseKeys.append("ref_date")
elif i == "REFCODE":
theseKeys.append("ref")
elif i == "Exclusion Code":
theseKeys.append("dist_in_ned_flag")
elif i == "Adopted LMC modulus":
theseKeys.append("lmc_mod")
elif i == "m-M":
theseKeys.append("dist_mod")
elif i == "Notes":
theseKeys.append("notes")
elif i == "SN ID":
theseKeys.append("dist_derived_from_sn")
elif i == "method":
theseKeys.append("dist_method")
elif i == "Galaxy ID":
theseKeys.append("primary_ned_id")
elif i == "D":
theseKeys.append("dist_index_id")
else:
theseKeys.append(i)
continue
if len(theseKeys):
count += 1
if count > 1:
# Cursor up one line and clear line
sys.stdout.write("\x1b[1A\x1b[2K")
if count > totalCount:
count = totalCount
percent = (old_div(float(count), float(totalCount))) * 100.
print(
"%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory" % locals())
rowDict = {}
for t, r in zip(theseKeys, row):
rowDict[t] = r
if t == "ref_date":
try:
rowDict[t] = int(r) + 1980
except:
rowDict[t] = None
if rowDict["dist_index_id"] != "999999":
dictList.append(rowDict)
csvFile.close()
self.log.debug(
'completed the ``_create_dictionary_of_ned_d`` method')
return dictList
def _clean_up_columns(
self):
"""clean up columns of the NED table
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_clean_up_columns`` method')
tableName = self.dbTableName
print("cleaning up %(tableName)s columns" % locals())
sqlQuery = u"""
set sql_mode="STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION";
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u"""
update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;
update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = "";
update %(tableName)s set notes = null where notes = "";
update %(tableName)s set redshift = null where redshift = 0;
update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = "";
update %(tableName)s set hubble_const = null where hubble_const = 0;
update %(tableName)s set lmc_mod = null where lmc_mod = 0;
update %(tableName)s set master_row = 0;
update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_clean_up_columns`` method')
return None
def _get_metadata_for_galaxies(
self):
"""get metadata for galaxies
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_get_metadata_for_galaxies`` method')
total, batches = self._count_galaxies_requiring_metadata()
print("%(total)s galaxies require metadata. Need to send %(batches)s batch requests to NED." % locals())
totalBatches = self.batches
thisCount = 0
# FOR EACH BATCH, GET THE GALAXY IDs, QUERY NED AND UPDATE THE DATABASE
while self.total:
thisCount += 1
self._get_3000_galaxies_needing_metadata()
dictList = self._query_ned_and_add_results_to_database(thisCount)
self.add_data_to_database_table(
dictList=dictList,
createStatement=False
)
self._count_galaxies_requiring_metadata()
self.log.debug('completed the ``_get_metadata_for_galaxies`` method')
return None
def _count_galaxies_requiring_metadata(
self):
""" count galaxies requiring metadata
**Return**
- ``self.total``, ``self.batches`` -- total number of galaxies needing metadata & the number of batches required to be sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_count_galaxies_requiring_metadata`` method')
tableName = self.dbTableName
sqlQuery = u"""
select count(*) as count from %(tableName)s where master_row = 1 and in_ned is null
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
self.total = rows[0]["count"]
self.batches = int(old_div(self.total, 3000.)) + 1
if self.total == 0:
self.batches = 0
self.log.debug(
'completed the ``_count_galaxies_requiring_metadata`` method')
return self.total, self.batches
def _get_3000_galaxies_needing_metadata(
self):
""" get 3000 galaxies needing metadata
**Return**
- ``len(self.theseIds)`` -- the number of NED IDs returned
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_get_3000_galaxies_needing_metadata`` method')
tableName = self.dbTableName
# SELECT THE DATA FROM NED TABLE
self.theseIds = {}
sqlQuery = u"""
select primaryId, primary_ned_id from %(tableName)s where master_row = 1 and in_ned is null limit 3000;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
self.theseIds[row["primary_ned_id"]] = row["primaryId"]
self.log.debug(
'completed the ``_get_3000_galaxies_needing_metadata`` method')
return len(self.theseIds)
def _query_ned_and_add_results_to_database(
self,
batchCount):
""" query ned and add results to database
**Key Arguments**
- ``batchCount`` - the index number of the batch sent to NED
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``_query_ned_and_add_results_to_database`` method')
tableName = self.dbTableName
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
# QUERY NED WITH BATCH
totalCount = len(self.theseIds)
print("requesting metadata from NED for %(totalCount)s galaxies (batch %(batchCount)s)" % locals())
search = namesearch(
log=self.log,
names=list(self.theseIds.keys()),
quiet=True
)
results = search.get()
print("results returned from ned -- starting to add to database" % locals())
# CLEAN THE RETURNED DATA AND UPDATE DATABASE
totalCount = len(results)
count = 0
sqlQuery = ""
dictList = []
colList = ["redshift_quality", "redshift", "hierarchy", "object_type", "major_diameter_arcmin", "morphology", "magnitude_filter",
"ned_notes", "eb_v", "raDeg", "radio_morphology", "activity_type", "minor_diameter_arcmin", "decDeg", "redshift_err", "in_ned"]
if not len(results):
for k, v in list(self.theseIds.items()):
dictList.append({
"in_ned": 0,
"primaryID": v
})
for thisDict in results:
thisDict["tableName"] = tableName
count += 1
for k, v in list(thisDict.items()):
if not v or len(v) == 0:
thisDict[k] = "null"
if k in ["major_diameter_arcmin", "minor_diameter_arcmin"] and (":" in v or "?" in v or "<" in v):
thisDict[k] = v.replace(":", "").replace(
"?", "").replace("<", "")
if isinstance(v, ("".__class__, u"".__class__)) and '"' in v:
thisDict[k] = v.replace('"', '\\"')
if "Input name not" not in thisDict["input_note"] and "Same object as" not in thisDict["input_note"]:
if thisDict["ra"] != "null" and thisDict["dec"] != "null":
thisDict["raDeg"] = converter.ra_sexegesimal_to_decimal(
ra=thisDict["ra"]
)
thisDict["decDeg"] = converter.dec_sexegesimal_to_decimal(
dec=thisDict["dec"]
)
else:
thisDict["raDeg"] = None
thisDict["decDeg"] = None
thisDict["in_ned"] = 1
thisDict["eb_v"] = thisDict["eb-v"]
row = {}
row["primary_ned_id"] = thisDict["input_name"]
try:
row["primaryID"] = self.theseIds[thisDict["input_name"]]
for c in colList:
if thisDict[c] == "null":
row[c] = None
else:
row[c] = thisDict[c]
dictList.append(row)
except:
g = thisDict["input_name"]
self.log.error(
"Cannot find database table %(tableName)s primaryID for '%(g)s'\n\n" % locals())
dictList.append({
"in_ned": 0,
"primary_ned_id": thisDict["input_name"]
})
else:
dictList.append({
"primary_ned_id": thisDict["input_name"],
"in_ned": 0,
"primaryID": self.theseIds[thisDict["input_name"]]
})
self.log.debug(
'completed the ``_query_ned_and_add_results_to_database`` method')
return dictList
def _update_sdss_coverage(
self):
""" update sdss coverage
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_update_sdss_coverage`` method')
tableName = self.dbTableName
# SELECT THE LOCATIONS NEEDING TO BE CHECKED
sqlQuery = u"""
select primary_ned_id, primaryID, raDeg, decDeg, sdss_coverage from %(tableName)s where sdss_coverage is null and master_row = 1 and in_ned = 1 order by dist_mpc;
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
totalCount = len(rows)
count = 0
for row in rows:
count += 1
if count > 1:
# Cursor up three lines and clear
sys.stdout.write("\x1b[1A\x1b[2K")
sys.stdout.write("\x1b[1A\x1b[2K")
sys.stdout.write("\x1b[1A\x1b[2K")
if count > totalCount:
count = totalCount
percent = (old_div(float(count), float(totalCount))) * 100.
primaryID = row["primaryID"]
raDeg = float(row["raDeg"])
decDeg = float(row["decDeg"])
primary_ned_id = row["primary_ned_id"]
# SDSS CAN ONLY ACCEPT 60 QUERIES/MIN
time.sleep(1.1)
print("%(count)s / %(totalCount)s (%(percent)1.1f%%) NED galaxies checked for SDSS coverage" % locals())
print("NED NAME: ", primary_ned_id)
# covered = True | False | 999 (i.e. not sure)
sdss_coverage = check_coverage(
log=self.log,
ra=raDeg,
dec=decDeg
).get()
if sdss_coverage == 999:
sdss_coverage_flag = "null"
elif sdss_coverage == True:
sdss_coverage_flag = 1
elif sdss_coverage == False:
sdss_coverage_flag = 0
else:
self.log.error('cound not get sdss coverage' % locals())
sys.exit(0)
# UPDATE THE DATABASE FLAG
sqlQuery = u"""
update %(tableName)s set sdss_coverage = %(sdss_coverage_flag)s where primaryID = %(primaryID)s
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_update_sdss_coverage`` method')
return None
# use the tab-trigger below for new method
# xt-class-method
|
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import shutil
import gzip
import subprocess
import pytest
from basic_modules.metadata import Metadata
from tool.bwa_aligner import bwaAlignerTool
from tool.bwa_mem_aligner import bwaAlignerMEMTool
@pytest.mark.chipseq
def test_bwa_aligner_chipseq_aln():
"""
Function to test BWA Aligner
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "macs2.Human.GCA_000001405.22.fasta"
fastq_file = resource_path + "macs2.Human.DRR000150.22.fastq"
input_files = {
"genome": genome_fa,
"index": genome_fa + ".bwa.tar.gz",
"loc": fastq_file
}
output_files = {
"output": fastq_file.replace(".fastq", "_aln.bam"),
"bai": fastq_file.replace(".fastq", "_aln.bai")
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{"assembly": "test"}),
"index": Metadata(
"index_bwa", "", [genome_fa],
{
"assembly": "test",
"tool": "bwa_indexer"
}
),
"loc": Metadata(
"data_chip_seq", "fastq", fastq_file, None,
{"assembly": "test"}
)
}
bwa_t = bwaAlignerTool({"execution": resource_path})
bwa_t.run(input_files, metadata, output_files)
print(__file__)
assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22_aln.bam") is True
assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22_aln.bam") > 0
assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22_aln.bai") is True
assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22_aln.bai") > 0
@pytest.mark.bwa
def test_bwa_aligner_aln():
"""
Function to test BWA Aligner
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "macs2.Human.GCA_000001405.22.fasta"
fastq_file = resource_path + "macs2.Human.DRR000150.22.fastq"
input_files = {
"genome": genome_fa,
"index": genome_fa + ".bwa.tar.gz",
"loc": fastq_file
}
output_files = {
"output": fastq_file.replace(".fastq", "_aln.bam"),
"bai": fastq_file.replace(".fastq", "_aln.bai")
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{"assembly": "test"}),
"index": Metadata(
"index_bwa", "", [genome_fa],
{
"assembly": "test",
"tool": "bwa_indexer"
}
),
"loc": Metadata(
"data_chip_seq", "fastq", fastq_file, None,
{"assembly": "test"}
)
}
bwa_t = bwaAlignerTool({"execution": resource_path})
bwa_t.run(input_files, metadata, output_files)
print(__file__)
assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22_aln.bam") is True
assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22_aln.bam") > 0
assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22_aln.bai") is True
assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22_aln.bai") > 0
cmdl = "samtools view -c -f 0 {}macs2.Human.DRR000150.22_aln.bam".format(resource_path)
process = subprocess.Popen(cmdl, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
proc_out, proc_err = process.communicate() # pylint: disable=unused-variable
assert int(proc_out) > 0
try:
os.remove(resource_path + "macs2.Human.DRR000150.22_aln.bam")
os.remove(resource_path + "macs2.Human.DRR000150.22_aln.bai")
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
try:
shutil.rmtree(resource_path + "tmp")
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
@pytest.mark.bwa
def test_bwa_aligner_mem():
"""
Function to test BWA Aligner
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "macs2.Human.GCA_000001405.22.fasta"
fastq_file = resource_path + "macs2.Human.DRR000150.22.fastq"
input_files = {
"genome": genome_fa,
"index": genome_fa + ".bwa.tar.gz",
"loc": fastq_file
}
output_files = {
"output": fastq_file.replace(".fastq", "_mem.bam"),
"bai": fastq_file.replace(".fastq", "_mem.bai")
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{"assembly": "test"}),
"index": Metadata(
"index_bwa", "", [genome_fa],
{
"assembly": "test",
"tool": "bwa_indexer"
}
),
"loc": Metadata(
"data_chip_seq", "fastq", fastq_file, None,
{"assembly": "test"}
)
}
bwa_t = bwaAlignerMEMTool({"execution": resource_path})
bwa_t.run(input_files, metadata, output_files)
print(__file__)
assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22_mem.bam") is True
assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22_mem.bam") > 0
assert os.path.isfile(resource_path + "macs2.Human.DRR000150.22_mem.bai") is True
assert os.path.getsize(resource_path + "macs2.Human.DRR000150.22_mem.bai") > 0
cmdl = "samtools view -c -f 0 {}macs2.Human.DRR000150.22_mem.bam".format(resource_path)
process = subprocess.Popen(cmdl, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
proc_out, proc_err = process.communicate() # pylint: disable=unused-variable
assert int(proc_out) > 0
try:
os.remove(resource_path + "macs2.Human.DRR000150.22_mem.bam")
os.remove(resource_path + "macs2.Human.DRR000150.22_mem.bai")
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
try:
shutil.rmtree(resource_path + "tmp")
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
@pytest.mark.bwa
def test_bwa_aligner_00():
"""
Extract the compressed FASTQ files
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
fastq_file_1 = resource_path + "bsSeeker.Mouse.SRR892982_1.fastq"
fastq_file_2 = resource_path + "bsSeeker.Mouse.SRR892982_2.fastq"
files = [fastq_file_1, fastq_file_2]
for fastq_gz in files:
with gzip.open(fastq_gz + '.gz', 'rb') as fgz_in:
with open(fastq_gz, 'wb') as f_out:
f_out.write(fgz_in.read())
assert os.path.isfile(fastq_file_1) is True
assert os.path.getsize(fastq_file_1) > 0
assert os.path.isfile(fastq_file_2) is True
assert os.path.getsize(fastq_file_2) > 0
@pytest.mark.bwa
def test_bwa_aligner_aln_paired():
"""
Function to test BWA Aligner
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "bsSeeker.Mouse.GRCm38.fasta"
fastq_file_1 = resource_path + "bsSeeker.Mouse.SRR892982_1.fastq"
fastq_file_2 = resource_path + "bsSeeker.Mouse.SRR892982_2.fastq"
input_files = {
"genome": genome_fa,
"index": genome_fa + ".bwa.tar.gz",
"loc": fastq_file_1,
"fastq2": fastq_file_2
}
output_files = {
"output": fastq_file_1.replace(".fastq", "_aln.bam"),
"bai": fastq_file_1.replace(".fastq", "_aln.bai")
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{"assembly": "test"}),
"index": Metadata(
"index_bwa", "", [genome_fa],
{
"assembly": "test",
"tool": "bwa_indexer"
}
),
"loc": Metadata(
"data_wgbs", "fastq", fastq_file_1, None,
{"assembly": "test"}
),
"fastq2": Metadata(
"data_wgbs", "fastq", fastq_file_2, None,
{"assembly": "test"}
)
}
bwa_t = bwaAlignerTool({"execution": resource_path})
bwa_t.run(input_files, metadata, output_files)
assert os.path.isfile(resource_path + "bsSeeker.Mouse.SRR892982_1_aln.bam") is True
assert os.path.getsize(resource_path + "bsSeeker.Mouse.SRR892982_1_aln.bam") > 0
assert os.path.isfile(resource_path + "bsSeeker.Mouse.SRR892982_1_aln.bai") is True
assert os.path.getsize(resource_path + "bsSeeker.Mouse.SRR892982_1_aln.bai") > 0
cmdl = "samtools view -c -f 1 {}bsSeeker.Mouse.SRR892982_1_aln.bam".format(resource_path)
process = subprocess.Popen(cmdl, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
proc_out, proc_err = process.communicate() # pylint: disable=unused-variable
assert int(proc_out) > 0
try:
os.remove(resource_path + "bsSeeker.Mouse.SRR892982_1_aln.bam")
os.remove(resource_path + "bsSeeker.Mouse.SRR892982_1_aln.bai")
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
try:
shutil.rmtree(resource_path + "tmp")
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
@pytest.mark.bwa
def test_bwa_aligner_mem_paired():
"""
Function to test BWA Aligner
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "bsSeeker.Mouse.GRCm38.fasta"
fastq_file_1 = resource_path + "bsSeeker.Mouse.SRR892982_1.fastq"
fastq_file_2 = resource_path + "bsSeeker.Mouse.SRR892982_2.fastq"
input_files = {
"genome": genome_fa,
"index": genome_fa + ".bwa.tar.gz",
"loc": fastq_file_1,
"fastq2": fastq_file_2
}
output_files = {
"output": fastq_file_1.replace(".fastq", "_mem.bam"),
"bai": fastq_file_1.replace(".fastq", "_mem.bai")
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{"assembly": "test"}),
"index": Metadata(
"index_bwa", "", [genome_fa],
{
"assembly": "test",
"tool": "bwa_indexer"
}
),
"loc": Metadata(
"data_wgbs", "fastq", fastq_file_1, None,
{"assembly": "test"}
),
"fastq2": Metadata(
"data_wgbs", "fastq", fastq_file_2, None,
{"assembly": "test"}
)
}
bwa_t = bwaAlignerMEMTool({"execution": resource_path})
bwa_t.run(input_files, metadata, output_files)
assert os.path.isfile(resource_path + "bsSeeker.Mouse.SRR892982_1_mem.bam") is True
assert os.path.getsize(resource_path + "bsSeeker.Mouse.SRR892982_1_mem.bam") > 0
assert os.path.isfile(resource_path + "bsSeeker.Mouse.SRR892982_1_mem.bai") is True
assert os.path.getsize(resource_path + "bsSeeker.Mouse.SRR892982_1_mem.bai") > 0
cmdl = "samtools view -c -f 1 {}bsSeeker.Mouse.SRR892982_1_mem.bam".format(resource_path)
process = subprocess.Popen(cmdl, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
proc_out, proc_err = process.communicate() # pylint: disable=unused-variable
assert int(proc_out) > 0
try:
os.remove(resource_path + "bsSeeker.Mouse.SRR892982_1_mem.bam")
os.remove(resource_path + "bsSeeker.Mouse.SRR892982_1_mem.bai")
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
try:
shutil.rmtree(resource_path + "tmp")
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
@pytest.mark.idamidseq
# @pytest.mark.bwa
def test_bwa_aligner_idamidseq():
"""
Function to test BWA Aligner
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "idear.Human.GCA_000001405.22.fasta"
fastq_files = [
resource_path + "idear.Human.SRR3714775.fastq",
resource_path + "idear.Human.SRR3714776.fastq",
resource_path + "idear.Human.SRR3714777.fastq",
resource_path + "idear.Human.SRR3714778.fastq"
]
# Unzipped the test data
for fastq_file in fastq_files:
with gzip.open(fastq_file + '.gz', 'rb') as fgz_in:
with open(fastq_file, 'w') as f_out:
f_out.write(fgz_in.read())
assert os.path.isfile(fastq_file) is True
assert os.path.getsize(fastq_file) > 0
# Run the aligner for each fastq file
for fastq_file in fastq_files:
input_files = {
"genome": genome_fa,
"index": genome_fa + ".bwa.tar.gz",
"loc": fastq_file
}
output_files = {
"output": fastq_file.replace(".fastq", ".bam"),
"bai": fastq_file.replace(".fastq", ".bai")
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{"assembly": "test"}),
"index": Metadata(
"index_bwa", "", [genome_fa],
{
"assembly": "test",
"tool": "bwa_indexer"
}
),
"loc": Metadata(
"data_damid_seq", "fastq", fastq_file, None,
{"assembly": "test"}
)
}
bwa_t = bwaAlignerMEMTool({"execution": resource_path})
bwa_t.run(input_files, metadata, output_files)
assert os.path.isfile(fastq_file.replace(".fastq", ".bam")) is True
assert os.path.getsize(fastq_file.replace(".fastq", ".bam")) > 0
assert os.path.isfile(fastq_file.replace(".fastq", ".bai")) is True
assert os.path.getsize(fastq_file.replace(".fastq", ".bai")) > 0
@pytest.mark.mnaseseq
@pytest.mark.bwa
def test_bwa_aligner_mnaseseq():
"""
Function to test BWA Aligner for MNase seq data
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genome_fa = resource_path + "inps.Mouse.GRCm38.fasta"
fastq_file = resource_path + "inps.Mouse.DRR000386.fastq"
input_files = {
"genome": genome_fa,
"index": genome_fa + ".bwa.tar.gz",
"loc": fastq_file
}
output_files = {
"output": fastq_file.replace(".fastq", ".bam"),
"bai": fastq_file.replace(".fastq", ".bai")
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", genome_fa, None,
{"assembly": "test"}),
"index": Metadata(
"index_bwa", "", [genome_fa],
{
"assembly": "test",
"tool": "bwa_indexer"
}
),
"loc": Metadata(
"data_chip_seq", "fastq", fastq_file, None,
{"assembly": "test"}
)
}
bwa_t = bwaAlignerTool({"execution": resource_path})
bwa_t.run(input_files, metadata, output_files)
assert os.path.isfile(resource_path + "inps.Mouse.DRR000386.bam") is True
assert os.path.getsize(resource_path + "inps.Mouse.DRR000386.bam") > 0
assert os.path.isfile(resource_path + "inps.Mouse.DRR000386.bai") is True
assert os.path.getsize(resource_path + "inps.Mouse.DRR000386.bai") > 0
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
import svgplotlib.VG as vg
from svgplotlib.SVG.VGParser import Parser, Box
class Renderer(Parser):
LINK = 'xlink:href'
SVG_NS = '{http://www.w3.org/2000/svg}'
SVG_ROOT = 'svg'
SVG_A = 'a'
SVG_G = 'g'
SVG_TITLE = 'title'
SVG_DESC = 'desc'
SVG_DEFS = 'defs'
SVG_SYMBOL = 'symbol'
SVG_USE = 'use'
SVG_RECT = 'rect'
SVG_CIRCLE = 'circle'
SVG_ELLIPSE = 'ellipse'
SVG_LINE = 'line'
SVG_POLYLINE = 'polyline'
SVG_POLYGON = 'polygon'
SVG_PATH = 'path'
SVG_LINEARGRADIENT = 'linearGradient'
SVG_RADIALGRADIENT = 'radialGradient'
SVG_TEXT = 'text'
SVG_TSPAN = 'tspan'
SVG_IMAGE = 'image'
SVG_NODES = frozenset((
SVG_ROOT, SVG_A, SVG_G, SVG_TITLE, SVG_DESC, SVG_DEFS, SVG_SYMBOL,
SVG_USE, SVG_RECT, SVG_CIRCLE, SVG_ELLIPSE, SVG_LINE, SVG_POLYLINE,
SVG_POLYGON, SVG_PATH, SVG_TEXT, SVG_TSPAN, SVG_IMAGE,
SVG_LINEARGRADIENT, SVG_RADIALGRADIENT
))
SKIP_NODES = frozenset((SVG_TITLE, SVG_DESC, SVG_DEFS, SVG_SYMBOL))
PATH_NODES = frozenset((SVG_RECT, SVG_CIRCLE, SVG_ELLIPSE, SVG_LINE,
SVG_POLYLINE, SVG_POLYGON, SVG_PATH, SVG_TEXT))
GRADIENT_NODES = frozenset((SVG_LINEARGRADIENT, SVG_RADIALGRADIENT))
def __init__(self, xmltree, imageprovider = None):
self.root = None
self.imageprovider = imageprovider
self.stroke = None
self.fill = None
self.level = 0
self.seen = set()
self.skip = set()
self.styles = {}
self.hasFill = False
self.gradient = None
self.hasStroke = False
self.bounds = Box()
if hasattr(xmltree,'element'):
xmltree = xmltree.element
self.xmltree = xmltree
# svg fragment?
if etree.iselement(xmltree) and xmltree.tag == self.SVG_ROOT:
root = xmltree
else:
root = xmltree.getroot()
if not root.tag == self.SVG_ROOT:
raise ValueError("Expected SVG fragment as root object")
# parse svg constructs for quicker display
self.preparse(root)
def findLink(self, link_id, section = None):
if section is None:
section = self.SVG_DEFS
if link_id.startswith('#'):
link_id = link_id[1:]
target = None
for defs in self.root.getiterator(section):
for element in defs:
if element.get('id') == link_id:
target = element
break
return target
def render(self, node = None, data = None):
if node is None:
self.level = 0
vg.Seti(vg.MATRIX_MODE, vg.MATRIX_PATH_USER_TO_SURFACE)
# Set up painters with default values
self.gradientFill = vg.CreatePaint()
stroke = self.stroke = vg.CreatePaint()
vg.SetParameterfv(stroke, vg.PAINT_COLOR, 4, [0.,0.,0.,1.])
vg.SetPaint(stroke, vg.STROKE_PATH)
fill = self.fill = vg.CreatePaint()
vg.SetParameterfv(fill, vg.PAINT_COLOR, 4, [0.,0.,0.,1.])
vg.SetPaint(fill, vg.FILL_PATH)
self.render(self.root)
return
# Skip non SVG nodes, hidden or malformed nodes
if node.tag not in self.SVG_NODES or node in self.skip:
return
if node.tag == self.SVG_ROOT:
nodedata = node.tail
# iterate children
for child in node:
self.render(child)
elif node.tag in (self.SVG_G, self.SVG_A, self.SVG_USE):
self.level += 1
nodedata = node.tail
saved_style = None
if 'style' in nodedata:
saved_style = self.applyStyle(nodedata['style'])
transform = nodedata.get('transform')
saved_transform = self.applyTransform(transform)
if node.tag == self.SVG_USE:
self.render(nodedata['target'])
else:
# iterate children
for child in node:
self.render(child)
if not saved_style is None:
self.applyStyle(saved_style, save = False)
if not saved_transform is None:
vg.LoadMatrix(saved_transform)
self.level -= 1
elif node.tag in self.SKIP_NODES:
# Skip non-graphics elements
return
elif node.tag in self.GRADIENT_NODES:
nodedata = node.tail
if nodedata['unit'] != "objectBoundingBox":
if node.tag == self.SVG_LINEARGRADIENT:
args = nodedata['x1'], nodedata['y1'], \
nodedata['x2'], nodedata['y2']
mode = vg.PAINT_LINEAR_GRADIENT
else:
args = nodedata['cx'], nodedata['cy'], \
nodedata['fx'], nodedata['fy'], \
nodedata['radius']
mode = vg.PAINT_RADIAL_GRADIENT
vg.SetParameterfv(self.gradientFill, mode, len(args), args)
stops = nodedata['stops']
vg.SetParameteri(self.gradientFill, vg.PAINT_COLOR_RAMP_SPREAD_MODE, nodedata['spreadMode'])
if node.tag == self.SVG_LINEARGRADIENT:
vg.SetParameteri(self.gradientFill, vg.PAINT_TYPE, vg.PAINT_TYPE_LINEAR_GRADIENT)
else:
vg.SetParameteri(self.gradientFill, vg.PAINT_TYPE, vg.PAINT_TYPE_RADIAL_GRADIENT)
vg.SetParameterfv(self.gradientFill, vg.PAINT_COLOR_RAMP_STOPS, len(stops), stops)
elif node.tag == self.SVG_TEXT:
return
elif node.tag == self.SVG_IMAGE:
nodedata = node.tail
if not 'image' in nodedata:
rgba_data = nodedata['rgba_data']
if rgba_data is None:
return
# create image
w, h, rgba_data = rgba_data
image = vg.CreateImage(vg.lABGR_8888, w, h)
vg.ImageSubData(image, rgba_data, w*4, vg.lABGR_8888, 0, 0, w, h)
nodedata['image'] = image
# remove raw data
del nodedata['rgba_data']
transform = nodedata.get('transform')
saved_transform = self.applyTransform(transform)
modelmat = vg.GetMatrix()
vg.Seti(vg.MATRIX_MODE, vg.MATRIX_IMAGE_USER_TO_SURFACE)
vg.LoadMatrix(modelmat)
vg.DrawImage(nodedata['image'])
vg.Seti(vg.MATRIX_MODE, vg.MATRIX_PATH_USER_TO_SURFACE)
if not saved_transform is None:
vg.LoadMatrix(saved_transform)
elif node.tag in self.PATH_NODES:
nodedata = node.tail
if not 'path' in nodedata:
nodedata['path'] = self.createPath(node)
style = nodedata.get('style')
saved_style = self.applyStyle(style)
transform = nodedata.get('transform')
saved_transform = self.applyTransform(transform)
if self.hasStroke:
vg.DrawPath(nodedata['path'], vg.STROKE_PATH)
if self.hasFill:
if not self.gradient is None:
gdata = self.gradient.tail
if gdata['unit'] == "objectBoundingBox":
self.updateGradient(nodedata['bounds'])
vg.SetPaint(self.gradientFill, vg.FILL_PATH)
transform = gdata.get('transform')
saved_paint = self.applyTransform(transform, vg.MATRIX_FILL_PAINT_TO_USER)
vg.DrawPath(nodedata['path'], vg.FILL_PATH)
if not saved_paint is None:
vg.Seti(vg.MATRIX_MODE, vg.MATRIX_FILL_PAINT_TO_USER)
vg.LoadMatrix(saved_paint)
vg.Seti(vg.MATRIX_MODE, vg.MATRIX_PATH_USER_TO_SURFACE)
vg.SetPaint(self.fill, vg.FILL_PATH)
else:
vg.DrawPath(nodedata['path'], vg.FILL_PATH)
self.applyStyle(saved_style, save = False)
if not saved_transform is None:
vg.LoadMatrix(saved_transform)
def createPath(self, node):
nodedata = node.tail
args = nodedata['args']
path = vg.CreatePath(vg.PATH_FORMAT_STANDARD, vg.PATH_DATATYPE_F,
1,0,0,0, vg.PATH_CAPABILITY_ALL)
if node.tag == self.SVG_LINE:
vg.Line(path, args[0], args[1], args[2], args[3])
elif node.tag == self.SVG_RECT:
if args[4] == 0. and args[5] == 0.:
vg.Rect(path, args[0], args[1], args[2], args[3])
else:
vg.RoundRect(path, args[0], args[1], args[2], args[3],
args[4], args[5])
elif node.tag == self.SVG_CIRCLE:
cx, cy, r = args
vg.Ellipse(path, cx, cy, 2*r, 2*r)
elif node.tag == self.SVG_ELLIPSE:
vg.Ellipse(path, args[0], args[1], args[2], args[3])
elif node.tag == self.SVG_POLYLINE:
if len(args) == 4:
vg.Line(path, args[0], args[1], args[2], args[3])
else:
vg.Polygon(path, args, len(args)/2, False)
elif node.tag == self.SVG_POLYGON:
vg.Polygon(path, args, len(args)/2, True)
elif node.tag == self.SVG_PATH:
segs, data = args
vg.AppendPathData(path, len(segs), segs, data)
else:
raise SVGError("Tag '%s' not implemented!" % node.tag)
return path
def updateGradient(self, bounds):
'''
Update gradient to element bounding box
'''
gdata = self.gradient.tail
x0, y0 = bounds.minx, bounds.miny
w, h = bounds.width, bounds.height
if self.gradient.tag == self.SVG_LINEARGRADIENT:
x1,y1 = gdata['x1'], gdata['y1']
x2,y2 = gdata['x2'], gdata['y2']
if "%" in x1:
x1 = x0 + float(x1[:-1])/100 * w
else:
x1 = x0 + parseLength(x1)
if "%" in x2:
x2 = x0 + float(x2[:-1])/100 * w
else:
x2 = x0 + parseLength(x2)
if "%" in y1:
y1 = y0 + float(y1[:-1])/100 * h
else:
y1 = y0 + parseLength(y1)
if "%" in y2:
y2 = y0 + float(y2[:-1])/100 * h
else:
y2 = y0 + parseLength(y2)
data = (x1,y1,x2,y2)
vg.SetParameterfv(self.gradientFill, vg.PAINT_LINEAR_GRADIENT, 4, data)
else:
cx, cy = gdata['cx'], gdata['cy']
fx, fy = gdata['fx'], gdata['fy']
radius = gdata['radius']
if "%" in fx:
fx = x0 + float(fx[:-1])/100 * w
else:
fx = x0 + parseLength(fx)
if "%" in fy:
fy = y0 + float(fy[:-1])/100 *h
else:
fy = y0 + parseLength(fy)
if "%" in cx:
cx = x0 + float(cx[:-1])/100 * w
else:
cx = x0 + parseLength(cx)
if "%" in cy:
cy = y0 + float(cy[:-1])/100 * h
else:
cy = y0 + parseLength(cy)
if "%" in radius:
r = float(radius[:-1])/100 * w
else:
r = parseLength(radius)
if (fx - cx)**2 + (fy - cy)**2 > r**2:
angle = math.atan2(fy - cy, fx - cx)
fx = cx + r*math.cos(angle)
fy = cy + r*math.sin(angle)
data = (cx,cy,fx,fy,r)
vg.SetParameterfv(self.gradientFill, vg.PAINT_RADIAL_GRADIENT, 5, data)
def applyStyle(self, style, save = True):
if style is None:
return
saved = {}
for name, value in style.iteritems():
if not value:
continue
if name == 'hasFill':
if save:
saved[name] = self.hasFill
self.hasFill = value
elif name == 'hasStroke':
if save:
saved[name] = self.hasStroke
self.hasStroke = value
elif name == 'gradient':
if save:
saved[name] = self.gradient
self.gradient = value
elif name == 'fill':
if etree.iselement(value):
self.render(value)
else:
if save:
saved[name] = vg.GetParameterfv(self.fill, vg.PAINT_COLOR)
if 'fill-opacity' in style:
value = value[0], value[1], value[2], style['fill-opacity']
vg.SetParameterfv(self.fill, vg.PAINT_COLOR, 4, value)
elif name == 'fill-rule':
if save:
saved[name] = vg.Geti(vg.FILL_RULE)
vg.Seti(vg.FILL_RULE, value)
elif name == 'stroke':
if save:
saved[name] = vg.GetParameterfv(self.stroke, vg.PAINT_COLOR)
if 'stroke-opacity' in style:
value = value[0], value[1], value[2], style['stroke-opacity']
vg.SetParameterfv(self.stroke, vg.PAINT_COLOR, 4, value)
elif name == 'stroke-linecap':
if save:
saved[name] = vg.Geti(vg.STROKE_CAP_STYLE)
vg.Seti(vg.STROKE_CAP_STYLE, value)
elif name == 'stroke-linejoin':
if save:
saved[name] = vg.Geti(vg.STROKE_JOIN_STYLE)
vg.Seti(vg.STROKE_JOIN_STYLE, value)
elif name == 'stroke-dasharray':
if save:
saved[name] = vg.Getfv(vg.STROKE_DASH_PATTERN)
vg.Setfv(vg.STROKE_DASH_PATTERN, len(value), value)
elif name == 'stroke-width':
if save:
saved[name] = vg.Getf(vg.STROKE_LINE_WIDTH)
vg.Setf(vg.STROKE_LINE_WIDTH, value)
elif name == 'stroke-dashoffset':
if save:
saved[name] = vg.Getf(vg.STROKE_DASH_PHASE)
vg.Setf(vg.STROKE_DASH_PHASE, value)
elif name == 'stroke-miterlimit':
if save:
saved[name] = vg.Getf(vg.STROKE_MITER_LIMIT)
vg.Setf(vg.STROKE_MITER_LIMIT, value)
return saved
def applyTransform(self, transform, mode = None):
if transform is None:
return
if mode is None:
mode = vg.MATRIX_PATH_USER_TO_SURFACE
if vg.Geti(vg.MATRIX_MODE) != mode:
vg.Seti(vg.MATRIX_MODE, mode)
saved = vg.GetMatrix()
vg.MultMatrix(transform)
return saved
if __name__ == '__main__':
xmltree = etree.fromstring("""<?xml
version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg width="500" height="500" viewBox="0 0 1200 400"
xmlns="http://www.w3.org/2000/svg" version="1.1">
<desc>Example rect02 - rounded rectangles</desc>
<!-- Show outline of canvas using 'rect' element -->
<rect x="1" y="1" width="1198" height="398"
fill="none" stroke="blue" stroke-width="2"/>
<rect x="100" y="100" width="400" height="200" rx="50"
fill="green" />
<g transform="translate(700 210) rotate(-30)">
<rect x="0" y="0" width="400" height="200" rx="50"
fill="none" stroke="purple" stroke-width="30" />
</g>
</svg>
""")
renderer = Renderer(xmltree)
import svgplotlib.FLTK as Fl
WIDTH, HEIGHT = 600,700
window = Fl.Window(WIDTH, HEIGHT)
width,height = renderer.width, renderer.height
widget = Fl.Button(10, 10, width, height)
pixels = vg.PixelBuffer(width,height)
ctx = vg.CreateOffScreenSH()
vg.StartOffScreenSH(ctx, width, height)
vg.Setfv(vg.CLEAR_COLOR, 4, [1.,1.,1.,1.])
vg.Clear(0, 0, width, height)
# center on bounding box
box = renderer.bounds
scale = min(width/box.width, height/box.height)
vg.Seti(vg.MATRIX_MODE, vg.MATRIX_PATH_USER_TO_SURFACE)
vg.LoadIdentity()
vg.Scale(scale, scale)
vg.Translate(0., 1.5*box.height)
vg.Scale(1., -1.)
vg.Translate(-box.minx, -box.miny + .5*box.height)
renderer.render()
vg.EndOffScreenSH(ctx, pixels)
vg.DestroyOffScreenSH(ctx)
img = Fl.RGB_Image(width,height, 4, 0, pixels)
widget.set_image(img)
window.show()
Fl.run()
|
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities functions to manipulate the data in the colab."""
import dataclasses
import datetime
import itertools
import operator
from typing import List, Optional
import numpy as np
import pandas as pd
import pandas.io.formats.style as style
from scipy import stats
from trimmed_match.design import common_classes
TimeWindow = common_classes.TimeWindow
FormatOptions = common_classes.FormatOptions
_operator_functions = {'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne}
_inverse_op = {'<': '>', '<=': '>=', '>': '<', '>=': '<=', '=': '!='}
@dataclasses.dataclass
class CalculateMinDetectableIroas:
"""Class for the calculation of the minimum detectable iROAS.
Hypothesis testing for H0: iROAS=0 vs H1: iROAS>=min_detectable_iroas based
on one sample X which follows a normal distribution with mean iROAS (unknown)
and standard deviation rmse (known).
Typical usage example:
calc_min_detectable_iroas = CalculateMinDetectableIroas(0.1, 0.9)
min_detectable_iroas = calc_min_detectable_iroas.at(2.0)
"""
# chance of rejecting H0 incorrectly when H0 holds.
significance_level: float = 0.1
# chance of rejecting H0 correctly when H1 holds.
power_level: float = 0.9
# minimum detectable iroas at rmse=1.
rmse_multiplier: float = dataclasses.field(init=False)
def __post_init__(self):
"""Calculates rmse_multiplier.
Raises:
ValueError: if significance_level or power_level is not in (0, 1).
"""
if self.significance_level <= 0 or self.significance_level >= 1.0:
raise ValueError('significance_level must be in (0, 1), but got '
f'{self.significance_level}.')
if self.power_level <= 0 or self.power_level >= 1.0:
raise ValueError('power_level must be in (0, 1), but got '
f'{self.power_level}.')
self.rmse_multiplier = (
stats.norm.ppf(self.power_level) +
stats.norm.ppf(1 - self.significance_level))
def at(self, rmse: float) -> float:
"""Calculates min_detectable_iroas at the specified rmse."""
return rmse * self.rmse_multiplier
def find_days_to_exclude(
dates_to_exclude: List[str]) -> List[TimeWindow]:
"""Returns a list of time windows to exclude from a list of days and weeks.
Args:
dates_to_exclude: a List of strings with format indicating a single day as
'2020/01/01' (YYYY/MM/DD) or an entire time period as
'2020/01/01 - 2020/02/01' (indicating start and end date of the time period)
Returns:
days_exclude: a List of TimeWindows obtained from the list in input.
"""
days_exclude = []
for x in dates_to_exclude:
tmp = x.split('-')
if len(tmp) == 1:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[0])))
except ValueError:
raise ValueError(f'Cannot convert the string {tmp[0]} to a valid date.')
elif len(tmp) == 2:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[1])))
except ValueError:
raise ValueError(
f'Cannot convert the strings in {tmp} to a valid date.')
else:
raise ValueError(f'The input {tmp} cannot be interpreted as a single' +
' day or a time window')
return days_exclude
def expand_time_windows(periods: List[TimeWindow]) -> List[pd.Timestamp]:
"""Return a list of days to exclude from a list of TimeWindows.
Args:
periods: List of time windows (first day, last day).
Returns:
days_exclude: a List of obtained by expanding the list in input.
"""
days_exclude = []
for window in periods:
days_exclude += pd.date_range(window.first_day, window.last_day, freq='D')
return list(set(days_exclude))
def overlap_percent(dates_left: List['datetime.datetime'],
dates_right: List['datetime.datetime']) -> float:
"""Find the size of the intersections of two arrays, relative to the first array.
Args:
dates_left: List of datetime.datetime
dates_right: List of datetime.datetime
Returns:
percentage: the percentage of elements of dates_right that also appear in
dates_left
"""
intersection = np.intersect1d(dates_left, dates_right)
percentage = 100 * len(intersection) / len(dates_right)
return percentage
def check_time_periods(geox_data: pd.DataFrame,
start_date_eval: pd.Timestamp,
start_date_aa_test: pd.Timestamp,
experiment_duration_weeks: int,
frequency: str) -> bool:
"""Checks that the geox_data contains the data for the two periods.
Check that the geox_data contains all observations during the evaluation and
AA test periods to guarantee that the experiment lasts exactly a certain
number of days/weeks, depending on the frequency of the data (daily/weekly).
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date_eval: start date of the evaluation period.
start_date_aa_test: start date of the aa test period.
experiment_duration_weeks: int, length of the experiment in weeks.
frequency: str indicating the frequency of the time series. It should be one
of 'infer', 'D', 'W'.
Returns:
bool: a bool, True if the time periods specified pass all the checks
Raises:
ValueError: if part of the evaluation or AA test period are shorter than
experiment_duration (either weeks or days).
"""
if frequency not in ['infer', 'D', 'W']:
raise ValueError(
f'frequency should be one of ["infer", "D", "W"], got {frequency}')
if frequency == 'infer':
tmp = geox_data.copy().set_index(['date', 'geo'])
frequency = infer_frequency(tmp, 'date', 'geo')
if frequency == 'W':
frequency = '7D'
number_of_observations = experiment_duration_weeks
else:
number_of_observations = 7 * experiment_duration_weeks
freq_str = 'weeks' if frequency == '7D' else 'days'
missing_eval = find_missing_dates(geox_data, start_date_eval,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_eval:
raise ValueError(
(f'The evaluation period contains the following {freq_str} ' +
f'{missing_eval} for which we do not have data.'))
missing_aa_test = find_missing_dates(geox_data, start_date_aa_test,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_aa_test:
raise ValueError((f'The AA test period contains the following {freq_str} ' +
f'{missing_aa_test} for which we do not have data.'))
return True
def find_missing_dates(geox_data: pd.DataFrame, start_date: pd.Timestamp,
period_duration_weeks: int,
number_of_observations: int,
frequency: str) -> List[str]:
"""Find missing observations in a time period.
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date: start date of the evaluation period.
period_duration_weeks: int, length of the period in weeks.
number_of_observations: expected number of time points.
frequency: str or pd.DateOffset indicating the frequency of the time series.
Returns:
missing: a list of strings, containing the dates for which data are missing
in geox_data.
"""
days = datetime.timedelta(days=7 * period_duration_weeks - 1)
period_dates = ((geox_data['date'] >= start_date) &
(geox_data['date'] <= start_date + days))
days_in_period = geox_data.loc[
period_dates, 'date'].drop_duplicates().dt.strftime('%Y-%m-%d').to_list()
missing = np.array([])
if len(days_in_period) != number_of_observations:
expected_observations = list(
pd.date_range(start_date, start_date + days,
freq=frequency).strftime('%Y-%m-%d'))
missing = set(expected_observations) - set(days_in_period)
return sorted(missing)
def infer_frequency(data: pd.DataFrame, date_index: str,
series_index: str) -> str:
"""Infers frequency of data from pd.DataFrame with multiple indices.
Infers frequency of data from pd.DataFrame with two indices, one for the slice
name and one for the date-time.
Example:
df = pd.Dataframe{'date': [2020-10-10, 2020-10-11], 'geo': [1, 1],
'response': [10, 20]}
df.set_index(['geo', 'date'], inplace=True)
infer_frequency(df, 'date', 'geo')
Args:
data: a pd.DataFrame for which frequency needs to be inferred.
date_index: string containing the name of the time index.
series_index: string containing the name of the series index.
Returns:
A str, either 'D' or 'W' indicating the most likely frequency inferred
from the data.
Raises:
ValueError: if it is not possible to infer frequency of sampling from the
provided pd.DataFrame.
"""
data = data.sort_values(by=[date_index, series_index])
# Infer most likely frequence for each series_index
series_names = data.index.get_level_values(series_index).unique().tolist()
series_frequencies = []
for series in series_names:
observed_times = data.iloc[data.index.get_level_values(series_index) ==
series].index.get_level_values(date_index)
n_steps = len(observed_times)
if n_steps > 1:
time_diffs = (
observed_times[1:n_steps] -
observed_times[0:(n_steps - 1)]).astype('timedelta64[D]').values
min_frequency = np.min(time_diffs)
series_frequencies.append(min_frequency)
if not series_frequencies:
raise ValueError(
'At least one series with more than one observation must be provided.')
if series_frequencies.count(series_frequencies[0]) != len(series_frequencies):
raise ValueError(
'The provided time series seem to have irregular frequencies.')
try:
frequency = {
1: 'D',
7: 'W'
}[series_frequencies[0]]
except KeyError:
raise ValueError('Frequency could not be identified. Got %d days.' %
series_frequencies[0])
return frequency
def human_readable_number(number: float) -> str:
"""Print a large number in a readable format.
Return a readable format for a number, e.g. 123 milions becomes 123M.
Args:
number: a float to be printed in human readable format.
Returns:
readable_number: a string containing the formatted number.
"""
number = float('{:.3g}'.format(number))
magnitude = 0
while abs(number) >= 1000 and magnitude < 4:
magnitude += 1
number /= 1000.0
readable_number = '{}{}'.format('{:f}'.format(number).rstrip('0').rstrip('.'),
['', 'K', 'M', 'B', 'tn'][magnitude])
return readable_number
def change_background_row(df: pd.DataFrame, value: float, operation: str,
column: str):
"""Colors a row of a table based on the expression in input.
Color a row in:
- orange if the value of the column satisfies the expression in input
- beige if the value of the column satisfies the inverse expression in input
- green otherwise
For example, if the column has values [1, 2, 3] and we pass 'value' equal to
2, and operation '>', then
- 1 is marked in beige (1 < 2, which is the inverse expression)
- 2 is marked in green (it's not > and it's not <)
- 3 is marked in orange(3 > 2, which is the expression)
Args:
df: the table of which we want to change the background color.
value: term of comparison to be used in the expression.
operation: a string to define which operator to use, e.g. '>' or '='. For a
full list check _operator_functions.
column: name of the column to be used for the comparison
Returns:
pd.Series
"""
if _operator_functions[operation](float(df[column]), value):
return pd.Series('background-color: orange', df.index)
elif _operator_functions[_inverse_op[operation]](float(df[column]), value):
return pd.Series('background-color: beige', df.index)
else:
return pd.Series('background-color: lightgreen', df.index)
def flag_percentage_value(val, value: float, operation: str):
"""Colors a cell in red if its value satisfy the expression in input.
Colors a cell in red if the expression is true for that cell, e.g. if the
value of the cell is 10, 'value' in input is 5 and operation is '>', then we
will color the cell in red as 10 > 5.
Args:
val: value in a cell of a dataframe.
value: term of comparison used to decide the color of the cell.
operation: a string to define which operator to use, e.g. '>' or '='. For a
full list check _operator_functions.
Returns:
a str defining the color coding of the cell.
"""
if _operator_functions[operation](float(val.strip(' %')), value):
color = 'red'
else:
color = 'black'
return 'color: %s' % color
def create_output_table(results: pd.DataFrame,
total_response: float,
total_spend: float,
geo_treatment: pd.DataFrame,
budgets_for_design: List[float],
average_order_value: float,
num_geos: int,
confidence_level: float = 0.9,
power_level: float = 0.8) -> pd.DataFrame:
"""Creates the table with the output designs.
Args:
results: table with columns (num_pairs_filtered,
experiment_response, experiment_spend, spend_response_ratio, budget,
iroas, rmse, proportion_cost_in_experiment) containing the generated
design, e.g. the first output of the
function TrimmedMatchGeoXDesign.report_candidate_design.
total_response: total response for all geos (excluded as well) during the
evaluation period.
total_spend: total spend for all geos (excluded as well) during the
evaluation period.
geo_treatment: table with columns (geo, response, spend, pair) containing the
treatment geos and their overall response and spend during the evaluation
period.
budgets_for_design: list of budgets to be considered for the designs.
average_order_value: factor used to change scale from conversion count to
conversion value.
num_geos: number of geos available.
confidence_level: confidence level for the test H0: iROAS=0
vs H1: iROAS>=minimum_detectable_iroas.
power_level: level used for the power analysis.
Returns:
a pd.DataFrame with the output designs.
"""
calc_min_detectable_iroas = CalculateMinDetectableIroas(
1 - confidence_level, power_level)
designs = []
for budget in budgets_for_design:
tmp_result = results[results['budget'] == budget]
chosen_design = tmp_result.loc[
tmp_result['rmse_cost_adjusted'].idxmin()].squeeze()
baseline = geo_treatment['response'].sum()
cost_in_experiment = geo_treatment['spend'].sum()
min_detectable_iroas_raw = calc_min_detectable_iroas.at(
chosen_design['rmse'])
min_detectable_iroas = average_order_value * min_detectable_iroas_raw
min_detectable_lift = budget * 100 * min_detectable_iroas_raw / baseline
num_geo_pairs = int(chosen_design['num_pairs'])
num_removed_geos = num_geos - int(2 * chosen_design['num_pairs'])
treat_control_removed = (f'{num_geo_pairs} / {num_geo_pairs} / ' +
f'{num_removed_geos}')
revenue_covered = 100 * baseline / total_response
proportion_cost_in_experiment = cost_in_experiment / total_spend
national_budget = human_readable_number(
budget / proportion_cost_in_experiment)
designs.append({
'Budget': human_readable_number(budget),
'Minimum detectable iROAS': f'{min_detectable_iroas:.3}',
'Minimum detectable lift in response': f'{min_detectable_lift:.2f} %',
'Treatment/control/excluded geos': treat_control_removed,
'Revenue covered by treatment group': f'{revenue_covered:.2f} %',
'Cost/baseline response': f'{(budget / baseline * 100):.2f} %',
'Cost if test budget is scaled nationally': national_budget
})
designs = pd.DataFrame(designs)
designs.index.rename('Design', inplace=True)
return designs
def format_table(
df: pd.DataFrame,
formatting_options: List[FormatOptions]) -> style.Styler:
"""Formats a table with the output designs.
Args:
df: a table to be formatted.
formatting_options: a dictionary indicating for each column (key) what
formatting function to be used and its additional args, e.g.
formatting_options =
{'column_1': {'function': fnc, 'args': {'input1': 1, 'input2': 2}}}
Returns:
a pandas.io.formats.style.Styler with the table formatted.
"""
for ind in range(len(formatting_options)):
tmp_options = formatting_options[ind]
if ind == 0:
# if axis is in the args, then the function should be applied on rows/cols
if 'axis' in tmp_options.args:
formatted_table = df.style.apply(tmp_options.function,
**tmp_options.args)
# apply the formatting elementwise
else:
formatted_table = df.style.applymap(tmp_options.function,
**tmp_options.args)
else:
# if axis is in the args, then the function should be applied on rows/cols
if 'axis' in tmp_options.args:
formatted_table = formatted_table.apply(tmp_options.function,
**tmp_options.args)
# apply the formatting elementwise
else:
formatted_table = formatted_table.applymap(tmp_options.function,
**tmp_options.args)
return formatted_table
def format_design_table(designs: pd.DataFrame,
minimum_detectable_iroas: float,
minimum_lift_in_response_metric: float = 10.0,
minimum_revenue_covered_by_treatment: float = 5.0):
"""Formats a table with the output designs.
Args:
designs: table with columns (Budget, Minimum detectable iROAS,
Minimum Detectable lift in response, Treatment/control/excluded geos,
Revenue covered by treatment group, Cost/baseline response,
Cost if test budget is scaled nationally) containing the output designs,
e.g. the output of the function create_output_table.
minimum_detectable_iroas: target minimum detectable iROAS used to define
the optimality of a design.
minimum_lift_in_response_metric: threshold minimum detectable lift
in percentage used to flag designs with higher detectable lift.
minimum_revenue_covered_by_treatment: value used to flag any design where the
treatment group is too small based on response.
Returns:
a pandas.io.formats.style.Styler with the table formatted.
"""
formatting_options = [
FormatOptions(
column='Minimum detectable lift in response',
function=flag_percentage_value,
args={
'value': minimum_lift_in_response_metric,
'operation': '>'
}),
FormatOptions(
column='Revenue covered by treatment group',
function=flag_percentage_value,
args={
'value': minimum_revenue_covered_by_treatment,
'operation': '<'
}),
FormatOptions(
column='Minimum detectable iROAS',
function=change_background_row,
args={
'value': minimum_detectable_iroas,
'operation': '>',
'axis': 1
})
]
return format_table(designs, formatting_options)
def check_input_data(
input_data: pd.DataFrame,
numeric_columns_to_impute: Optional[List[str]] = None) -> pd.DataFrame:
"""Returns data to be analysed using Trimmed Match with data imputation.
Args:
input_data: data frame with columns (date, geo) and any column specified in
numeric_columns_to_impute, which should contain at least the columns with
response and spend information if they have a different name than
'response' and 'cost', respectively.
numeric_columns_to_impute: list of columns for which data imputation must be
performed.
Returns:
data frame with columns (date, geo, response, cost) and imputed missing
data.
Raises:
ValueError: if one of the mandatory columns is missing.
ValueError: if any (date, geo) pair is duplicated.
"""
data = input_data.copy()
numeric_columns_to_impute = numeric_columns_to_impute or ['response', 'cost']
mandatory_columns = set(['date', 'geo'] + numeric_columns_to_impute)
if not mandatory_columns.issubset(data.columns):
raise ValueError('The mandatory columns ' +
f'{mandatory_columns - set(data.columns)} are missing ' +
'from the input data.')
if (data.shape[0] > data[['date', 'geo']].drop_duplicates().shape[0]):
raise ValueError('There are duplicated date geo pairs.')
data['date'] = pd.to_datetime(data['date'])
for column in ['geo'] + numeric_columns_to_impute:
try:
data[column] = pd.to_numeric(data[column])
except:
raise ValueError(f'Unable to convert column {column} to numeric.')
geos_and_dates = pd.DataFrame(
itertools.product(data['date'].unique(), data['geo'].unique()),
columns=['date', 'geo'])
data = pd.merge(
geos_and_dates, data, on=['date', 'geo'],
how='left').fillna(dict([
(x, 0) for x in numeric_columns_to_impute
])).sort_values(by=['date', 'geo']).reset_index(drop=True)
return data
def check_pairs(pretest_data: pd.DataFrame, pairs: List[pd.DataFrame]):
"""Checks that all the candidate pairings are valid.
Args:
pretest_data: pd.DataFrame (date, geo, ...).
pairs: list of dataframes with columns (geo1, geo2, pair)
containing the pairs of geos to use for the power analysis.
Raises:
ValueError: if the pairs is not a list.
ValueError: if any geo is duplicated.
ValueError: if a geo appears in the pairing but not in the pretest data.
ValueError: if a pair has more than two geos.
"""
if not isinstance(pairs, list):
raise ValueError('pairs must be a list of dataframes.')
for candidate in pairs:
geos_in_pairs = set(candidate['geo1']) | set(candidate['geo2'])
if not geos_in_pairs.issubset(set(pretest_data['geo'])):
raise ValueError(
'The geos ' +
f'{geos_in_pairs - set(pretest_data["geo"])} ' +
'appear in the pairs but not in the pretest data.')
if len(geos_in_pairs) != (len(candidate['geo1']) +
len(candidate['geo2'])):
raise ValueError(f'Some geos are duplicated in the pairing {candidate}.')
if not np.alltrue(candidate['pair'].value_counts() == 1):
raise ValueError('a pair should only have two geos.')
|
|
# Defining the goal state
goal_items = [1, 2, 3, 8, 0, 4, 7, 6, 5]
visited_vertices = []
# Defining imput states with increasing levels of difficulty
easy_initial_state = [1, 3, 4, 8, 6, 2, 7, 0, 5]
medium_initial_state = [2, 8, 1, 0, 4, 3, 7, 6, 5]
hard_initial_state = [5, 6, 7, 4, 0, 8, 3, 2, 1]
max = 0
import sys
import time
'''Defining a class vertex with the following attributes:
vertex.items is the current state of the elements of 8-puzzle in the vertex
vertex.ancestor is the ancestor of the vertex
vertex.operator is the decision to move the null "0" element in the 8-puzzle
vertex.depth is the depth level of the vertex'''
class vertex:
def __init__( self, items, ancestor, operator, depth, cost ):
# Contains the items of the vertex
self.items = items
# Contains the ancestor vertex that generated this vertex
self.ancestor = ancestor
# Contains the operation that generated this vertex from the ancestor
self.operator = operator
# Contains the depth of this vertex (ancestor.depth +1)
self.depth = depth
# Contains the path cost of this vertex from depth 0. Not used for depth/breadth first.
self.cost = cost
# Main method
def main():
print("------- SEARCH RUN MENU ------")
print("1. BFS - Easy")
print("2. BFS - Medium")
print("3. BFS - Hard")
print("4. DFS - Easy")
print("5. DFS - Medium")
print("6. DFS - Hard")
print("7. IDS - Easy")
print("8. IDS - Medium")
print("9. IDS - Hard")
print("10. A* - Easy")
print("11. A* - Medium")
print("12. A* - Hard")
print("13. Greedy BFS - Easy")
print("14. Greedy BFS - Medium")
print("15. Greedy BFS - Hard")
print("16. IDA* - Easy")
print("17. IDA* - Medium")
print("18. IDA* - Hard")
n = raw_input("Enter what would you like to run: ")
n = int(n)
start = time.clock() #Starting time.clock() to count time taken for the function to execute
if(n == 1):
print "Initial Puzzle State:"
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = bfs( easy_initial_state, goal_items )
elif(n == 2):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = bfs( medium_initial_state, goal_items )
elif(n == 3):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = bfs( hard_initial_state, goal_items )
elif(n == 4):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = dfs( easy_initial_state, goal_items )
elif(n == 5):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = dfs( medium_initial_state, goal_items )
elif(n == 6):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = dfs( hard_initial_state, goal_items )
elif(n == 7):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = ids( easy_initial_state, goal_items )
elif(n == 8):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = ids( medium_initial_state, goal_items )
elif(n == 9):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = ids( hard_initial_state, goal_items )
elif(n == 10):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = astar( easy_initial_state, goal_items )
elif(n == 11):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = astar( medium_initial_state, goal_items )
elif(n == 12):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = astar( hard_initial_state, goal_items )
elif(n == 13):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = gbfs( easy_initial_state, goal_items )
elif(n == 14):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = gbfs( medium_initial_state, goal_items )
elif(n == 15):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = gbfs( hard_initial_state, goal_items )
elif(n == 16):
print_vertices(easy_initial_state)
print "\nGoal Puzzle State: "
output = idastar( easy_initial_state, goal_items )
elif(n == 17):
print_vertices(medium_initial_state)
print "\nGoal Puzzle State: "
output = idastar( medium_initial_state, goal_items )
elif(n == 18):
print_vertices(hard_initial_state)
print "\nGoal Puzzle State: "
output = idastar( hard_initial_state, goal_items )
else:
print "Wrong Input!"
print "Direction of Moves:"
print output
print "Moves taken: ", len(output)
print "Nodes visited: ", len(visited_vertices) - 1
print "Max. length of Node List: ", max
print "Time taken: ", time.clock() - start
def print_vertices( vertex_items ):
print "| %i %i %i |" % (vertex_items[0], vertex_items[1], vertex_items[2])
print "| %i %i %i |" % (vertex_items[3], vertex_items[4], vertex_items[5])
print "| %i %i %i |" % (vertex_items[6], vertex_items[7], vertex_items[8])
def traverse_left( items ):
new_state = items[:]
index = new_state.index( 0 )
# Sanity check
if index not in [0, 3, 6]:
temp = new_state[index - 1] #Exchanging null element with positioned element
new_state[index - 1] = new_state[index]
new_state[index] = temp
return new_state
else: #
return None
#Function defined for moving the "null" element one place right in the 8-puzzle
def traverse_right( items ):
# Perform object copy
new_state = items[:]
index = new_state.index( 0 )
# Sanity check
if index not in [2, 5, 8]:
# Swap the values.
temp = new_state[index + 1]
new_state[index + 1] = new_state[index]
new_state[index] = temp
return new_state
else:
#Return non if no moves possible
return None
#Function defined for moving the "null" element one place up in the 8-puzzle
def traverse_up( items ):
new_state = items[:]
index = new_state.index( 0 )
# Sanity check
if index not in [0, 1, 2]:
# Swap the values.
temp = new_state[index - 3]
new_state[index - 3] = new_state[index]
new_state[index] = temp
return new_state
else:
# Can't move it, return None
return None
#Function defined for moving the "null" element one place up in the 8-puzzle
def traverse_down( items ):
new_state = items[:]
index = new_state.index( 0 )
# Sanity check
if index not in [6, 7, 8]:
# Swap the values.
temp = new_state[index + 3]
new_state[index + 3] = new_state[index]
new_state[index] = temp
return new_state
else:
# Can't move, return None
return None
#Defining a function for initializing a node
def vertex_init(items, ancestor, operator, depth, cost ):
return vertex(items, ancestor, operator, depth, cost )
def expand_vertex( vertex, vertices, visited_vertices):
#Returns a list of expanded child vertices
child_vertices = []
if vertex.items not in visited_vertices:
visited_vertices.extend([vertex.items])
child_vertices.append( vertex_init( traverse_up( vertex.items ), vertex, 'UP ->', vertex.depth + 1, 0 ) )
child_vertices.append( vertex_init( traverse_down( vertex.items ), vertex, 'DOWN ->', vertex.depth + 1, 0 ) )
child_vertices.append( vertex_init( traverse_left( vertex.items ), vertex, 'LEFT ->', vertex.depth + 1, 0 ) )
child_vertices.append( vertex_init( traverse_right( vertex.items), vertex, 'RIGHT ->', vertex.depth + 1, 0 ) )
child_vertices = [vertex for vertex in child_vertices if vertex.items != None]
return child_vertices
#Defining a breadth first search function
def bfs( start, goal ):
global max
vertices = []
# Create the queue with the root vertex in it.
vertices.append( vertex_init( start, None, None, 0, 0 ) )
while True:
# If no states exist
if len( vertices ) == 0:
return None
vertex = vertices.pop(0)
#returning list of directions/moves taken to get to goal state
if vertex.items == goal:
moves = []
print_vertices(vertex.items)
temp = vertex
while True:
moves.insert(0, temp.operator)
if temp.depth == 1: break
temp = temp.ancestor
return moves
# Expand the vertex and add all the expansions to the front of the stack
vertices.extend( expand_vertex( vertex, vertices, visited_vertices ) )
if len(vertices) > max: max = len(vertices)
#Defining a depth first search function
def dfs( start, goal, depth= 150 ):
global max
depth_limit = depth
vertices = []
vertices.append( vertex_init( start, None, None, 0, 0 ) ) #Appending the queue with the root vertex
while True:
if len( vertices ) == 0: return None #No solution if states are over
vertex = vertices.pop(0)
if vertex.items == goal: #code for returning the goal state and moves it took to reach there
moves = []
print_vertices(vertex.items)
temp = vertex
while True:
moves.insert(0, temp.operator)
if temp.depth <= 1: break
temp = temp.ancestor
return moves
#Append the child_nodes to the front of the stack
if vertex.depth < depth_limit:
child_vertices = expand_vertex( vertex, vertices, visited_vertices )
child_vertices.extend( vertices )
vertices = child_vertices
if len(vertices) > max: max = len(vertices)
#Defining an iterative-depeening search function, with a variable for depth, which can be modified.
def ids( start, goal, depth = 150 ):
for i in range( 100, depth ):
output = dfs( start, goal, i )
if output != None:
return output
def astar( start, goal ):
global max #Define an informed search A* based on a heuristic function
vertices = []
vertices.append( vertex_init( start, None, None, 0, 0 ) )
while True:
# If no states exist
if len( vertices ) == 0:
return None
# Vertices behaves as a priority Queue using the dort function
vertices.sort( cmpastar )
# Popping the head of the priority Queue
vertex = vertices.pop(0)
# if this vertex is the goal, return the moves it took to get here.
print "Checking state", vertex.items, " with direction: ", vertex.operator
if vertex.items == goal:
moves = []
print_vertices(vertex.items)
temp = vertex
while True:
moves.insert( 0, temp.operator )
if temp.depth <=1: break
temp = temp.ancestor
return moves
#Expand the vertex and add all expansions to the end of the queue
vertices.extend( expand_vertex( vertex, vertices, visited_vertices ))
if len(vertices) > max: max = len(vertices)
def gbfs(start, goal):
global max
vertices = []
vertices.append( vertex_init( start, None, None, 0, 0 ) )
while True:
# If no states exist
if len( vertices ) == 0:
return None
#Sorting by making list behave as a proirity queue on basis of compare function. We need to manually change the heuristic
vertices.sort( cmpgbfs )
# take the vertex from the front of the queue
vertex = vertices.pop(0)
#if we reach the goal vertex, returm the directions to reach here
print "Checking state", vertex.items, " with direction: ", vertex.operator
#print visited_vertices
if vertex.items == goal:
moves = []
print_vertices(vertex.items)
temp = vertex
while True:
moves.insert( 0, temp.operator )
if temp.depth <=1: break
temp = temp.ancestor
return moves
#Expand the vertex and add all expansions to the end of the queue
vertices.extend( expand_vertex( vertex, vertices, visited_vertices ))
if len(vertices) > max: max = len(vertices)
def idastar(start, goal, depth_limit = 300):
global max
for i in range( 100, depth_limit ):
vertices = []
vertices.append( vertex_init( start, None, None, 0, 0 ) )
while True:
# If no states exist
if len( vertices ) == 0:
return None
# Vertices behaves as a priority Queue using the dort function
vertices.sort( cmpastar )
# Popping the head of the priority Queue
vertex = vertices.pop(0)
# If this vertex is the goal, return the moves it took to get here.
print "Checking state", vertex.items, " with direction: ", vertex.operator
if vertex.items == goal:
moves = []
print_vertices(vertex.items)
temp = vertex
while True:
moves.insert( 0, temp.operator )
if temp.depth <=1: break
temp = temp.ancestor
return moves
# Append the vertices in front of the queue
if vertex.depth < depth_limit:
child_vertices = expand_vertex( vertex, vertices, visited_vertices )
child_vertices.extend( vertices )
vertices = child_vertices
if len(vertices) > max: max = len(vertices)
def cmpastar( a, b ):
#Compare function for astar. f(n) = g(n) + h(n)
return (a.depth + h( a.items, goal_items )) - (b.depth + h( b.items, goal_items ))
def cmpgbfs( a, b ):
#Compare function for gbfs - h(n)
return h( a.items, goal_items ) - h( b.items, goal_items )
def h(items, goal ):
# Defining a heuristic h1(n) for the Informed search algorithms
score = 0
for i in range( len( items ) ):
if items[i] != goal[i]:
score = score + 1
return score
def h2 ( items, goal_items):
value = 0
for i in items:
if not i:
continue
row_idx = items.index(i) / 3
col_idx = items.index(i) % 3
row_idx_goal = goal_items.index(i) / 3
col_idx_goal = goal_items.index(i) % 3
value += abs(row_idx_goal - row_idx) + abs(col_idx_goal - col_idx)
return value
def cmpastar2(a, b):
return h2( a.items, goal_items ) - h2( b.items, goal_items )
def cmpgbfs2(a, b):
return h2( a.items, goal_items ) - h2( b.items, goal_items )
# For execuing the main() function
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
VNX Common Utils
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
from cinder.volume import configuration
from cinder.volume.drivers.dell_emc.vnx import const
from cinder.volume import group_types
from cinder.volume import volume_types
storops = importutils.try_import('storops')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 60 * 60 * 24 * 365
INTERVAL_5_SEC = 5
INTERVAL_20_SEC = 20
INTERVAL_30_SEC = 30
INTERVAL_60_SEC = 60
SNAP_EXPIRATION_HOUR = '1h'
BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both'])
QOS_MAX_IOPS = 'maxIOPS'
QOS_MAX_BWS = 'maxBWS'
VNX_OPTS = [
cfg.StrOpt('storage_vnx_authentication_type',
default='global',
help='VNX authentication scope type. '
'By default, the value is global.'),
cfg.StrOpt('storage_vnx_security_file_dir',
help='Directory path that contains the VNX security file. '
'Make sure the security file is generated first.'),
cfg.StrOpt('naviseccli_path',
help='Naviseccli Path.'),
cfg.ListOpt('storage_vnx_pool_names',
help='Comma-separated list of storage pool names to be used.'),
cfg.IntOpt('default_timeout',
default=DEFAULT_TIMEOUT,
help='Default timeout for CLI operations in minutes. '
'For example, LUN migration is a typical long '
'running operation, which depends on the LUN size and '
'the load of the array. '
'An upper bound in the specific deployment can be set to '
'avoid unnecessary long wait. '
'By default, it is 365 days long.'),
cfg.IntOpt('max_luns_per_storage_group',
default=255,
help='Default max number of LUNs in a storage group.'
' By default, the value is 255.'),
cfg.BoolOpt('destroy_empty_storage_group',
default=False,
help='To destroy storage group '
'when the last LUN is removed from it. '
'By default, the value is False.'),
# iscsi_initiators is a dict which key is string and value is a list.
# This could be a DictOpt. Unfortunately DictOpt doesn't support the value
# of list type.
cfg.StrOpt('iscsi_initiators',
help='Mapping between hostname and '
'its iSCSI initiator IP addresses.'),
cfg.ListOpt('io_port_list',
help='Comma separated iSCSI or FC ports '
'to be used in Nova or Cinder.'),
cfg.BoolOpt('initiator_auto_registration',
default=False,
help='Automatically register initiators. '
'By default, the value is False.'),
cfg.BoolOpt('initiator_auto_deregistration',
default=False,
help='Automatically deregister initiators after the related '
'storage group is destroyed. '
'By default, the value is False.'),
cfg.BoolOpt('check_max_pool_luns_threshold',
default=False,
deprecated_for_removal=True,
help='DEPRECATED: Report free_capacity_gb as 0 when the limit '
'to maximum number of pool LUNs is reached. '
'By default, the value is False.'),
cfg.BoolOpt('force_delete_lun_in_storagegroup',
default=False,
help='Delete a LUN even if it is in Storage Groups. '
'By default, the value is False.'),
cfg.BoolOpt('ignore_pool_full_threshold',
default=False,
help='Force LUN creation even if '
'the full threshold of pool is reached. '
'By default, the value is False.')
]
CONF.register_opts(VNX_OPTS, group=configuration.SHARED_CONF_GROUP)
PROTOCOL_FC = 'fc'
PROTOCOL_ISCSI = 'iscsi'
class ExtraSpecs(object):
_provision_key = 'provisioning:type'
_tier_key = 'storagetype:tiering'
_replication_key = 'replication_enabled'
PROVISION_DEFAULT = const.PROVISION_THICK
TIER_DEFAULT = None
def __init__(self, extra_specs, group_specs=None):
self.specs = extra_specs
self._provision = self._get_provision()
self.provision = self._provision
self._tier = self._get_tier()
self.tier = self._tier
self.apply_default_values()
self.group_specs = group_specs if group_specs else {}
def apply_default_values(self):
self.provision = (ExtraSpecs.PROVISION_DEFAULT
if self.provision is None
else self.provision)
# Can not set Tier when provision is set to deduped. So don't set the
# tier default when provision is deduped.
if self.provision != storops.VNXProvisionEnum.DEDUPED:
self.tier = (ExtraSpecs.TIER_DEFAULT if self.tier is None
else self.tier)
@classmethod
def set_defaults(cls, provision_default, tier_default):
cls.PROVISION_DEFAULT = provision_default
cls.TIER_DEFAULT = tier_default
def _get_provision(self):
value = self._parse_to_enum(self._provision_key,
storops.VNXProvisionEnum)
return value
def _get_tier(self):
return self._parse_to_enum(self._tier_key, storops.VNXTieringEnum)
@property
def is_replication_enabled(self):
return self.specs.get('replication_enabled', '').lower() == '<is> true'
@property
def is_group_replication_enabled(self):
return self.group_specs.get(
'consistent_group_replication_enabled', '').lower() == '<is> true'
def _parse_to_enum(self, key, enum_class):
value = (self.specs[key]
if key in self.specs else None)
if value is not None:
try:
value = enum_class.parse(value)
except ValueError:
reason = (_("The value %(value)s for key %(key)s in extra "
"specs is invalid.") %
{'key': key, 'value': value})
raise exception.InvalidVolumeType(reason=reason)
return value
@classmethod
def from_volume(cls, volume):
specs = {}
type_id = volume['volume_type_id']
if type_id is not None:
specs = volume_types.get_volume_type_extra_specs(type_id)
return cls(specs)
@classmethod
def from_group(cls, group):
group_specs = {}
if group and group.group_type_id:
group_specs = group_types.get_group_type_specs(
group.group_type_id)
return cls(extra_specs={}, group_specs=group_specs)
@classmethod
def from_volume_type(cls, type):
return cls(type['extra_specs'])
@classmethod
def from_lun(cls, lun):
ex = cls({})
ex.provision = lun.provision
ex.tier = (lun.tier
if lun.provision != storops.VNXProvisionEnum.DEDUPED
else None)
return ex
def match_with_lun(self, lun):
ex = ExtraSpecs.from_lun(lun)
return (self.provision == ex.provision and
self.tier == ex.tier)
def validate(self, enabler_status):
"""Checks whether the extra specs are valid.
:param enabler_status: Instance of VNXEnablerStatus
"""
if "storagetype:pool" in self.specs:
LOG.warning("Extra spec key 'storagetype:pool' is obsoleted "
"since driver version 5.1.0. This key will be "
"ignored.")
if (self._provision == storops.VNXProvisionEnum.DEDUPED and
self._tier is not None):
msg = _("Can not set tiering policy for a deduplicated volume. "
"Set the tiering policy on the pool where the "
"deduplicated volume locates.")
raise exception.InvalidVolumeType(reason=msg)
if (self._provision == storops.VNXProvisionEnum.COMPRESSED
and not enabler_status.compression_enabled):
msg = _("Compression Enabler is not installed. "
"Can not create compressed volume.")
raise exception.InvalidVolumeType(reason=msg)
if (self._provision == storops.VNXProvisionEnum.DEDUPED
and not enabler_status.dedup_enabled):
msg = _("Deduplication Enabler is not installed. "
"Can not create deduplicated volume.")
raise exception.InvalidVolumeType(reason=msg)
if (self._provision in [storops.VNXProvisionEnum.THIN,
storops.VNXProvisionEnum.COMPRESSED,
storops.VNXProvisionEnum.DEDUPED]
and not enabler_status.thin_enabled):
msg = _("ThinProvisioning Enabler is not installed. "
"Can not create thin volume.")
raise exception.InvalidVolumeType(reason=msg)
if (self._tier is not None
and not enabler_status.fast_enabled):
msg = _("FAST VP Enabler is not installed. "
"Can not set tiering policy for the volume.")
raise exception.InvalidVolumeType(reason=msg)
return True
def __len__(self):
return len(self.specs)
def __getitem__(self, key):
return self.specs[key]
def __iter__(self):
return iter(self.specs)
def __contains__(self, item):
return item in self.specs
def __eq__(self, other):
if isinstance(other, ExtraSpecs):
return self.specs == other.specs
elif isinstance(other, dict):
return self.specs == other
else:
return False
def __hash__(self):
return self.specs.__hash__()
class LUNState(object):
INITIALIZING = 'Initializing'
READY = 'Ready'
FAULTED = 'Faulted'
class PoolState(object):
INITIALIZING = 'Initializing'
OFFLINE = 'Offline'
DELETING = 'Deleting'
VALID_CREATE_LUN_STATE = (INITIALIZING, OFFLINE, DELETING)
class VNXEnablerStatus(object):
def __init__(self,
dedup=False,
compression=False,
fast=False,
thin=False,
snap=False):
self.dedup_enabled = dedup
self.compression_enabled = compression
self.fast_enabled = fast
self.thin_enabled = thin
self.snap_enabled = snap
class WaitUtilTimeoutException(exception.VolumeDriverException):
"""Raised when timeout occurs in wait_until."""
# TODO(Ryan) put this exception under Cinder shared module.
pass
class Host(object):
"""The model of a host which acts as an initiator to access the storage."""
def __init__(self, name, initiators, ip=None, wwpns=None):
# ip and wwpns are optional.
self.name = name
if not self.name:
raise ValueError(('Name of host cannot be empty.'))
self.initiators = initiators
if not self.initiators:
raise ValueError(_('Initiators of host cannot be empty.'))
self.ip = ip
self.wwpns = wwpns
class Volume(object):
"""The internal volume which is used to pass in method call."""
def __init__(self, name, id, vnx_lun_id=None):
self.name = name
self.id = id
self.vnx_lun_id = vnx_lun_id
class ISCSITargetData(dict):
def __init__(self, volume_id, is_discovered, iqn='unknown', iqns=None,
portal='unknown', portals=None, lun='unknown', luns=None):
data = {'volume_id': volume_id, 'target_discovered': is_discovered,
'target_iqn': iqn, 'target_iqns': iqns,
'target_portal': portal, 'target_portals': portals,
'target_lun': lun, 'target_luns': luns}
self['driver_volume_type'] = 'iscsi'
self['data'] = data
def to_dict(self):
"""Converts to the dict.
It helps serialize and deserialize the data before returning to nova.
"""
return {key: value for (key, value) in self.items()}
class FCTargetData(dict):
def __init__(self, volume_id, is_discovered, wwn=None, lun=None,
initiator_target_map=None):
data = {'volume_id': volume_id, 'target_discovered': is_discovered,
'target_lun': lun, 'target_wwn': wwn,
'initiator_target_map': initiator_target_map}
self['driver_volume_type'] = 'fibre_channel'
self['data'] = {key: value for key, value in data.items()
if value is not None}
def to_dict(self):
"""Converts to the dict.
It helps serialize and deserialize the data before returning to nova.
"""
return {key: value for (key, value) in self.items()}
class ReplicationDevice(object):
def __init__(self, replication_device):
self.replication_device = replication_device
@property
def backend_id(self):
return self.replication_device.get('backend_id')
@property
def san_ip(self):
return self.replication_device.get('san_ip')
@property
def san_login(self):
return self.replication_device.get('san_login')
@property
def san_password(self):
return self.replication_device.get('san_password')
@property
def storage_vnx_authentication_type(self):
return self.replication_device.get(
'storage_vnx_authentication_type',
'global')
@property
def storage_vnx_security_file_dir(self):
return self.replication_device.get('storage_vnx_security_file_dir')
@property
def pool_name(self):
return self.replication_device.get('pool_name', None)
class ReplicationDeviceList(list):
"""Replication devices configured in cinder.conf
Cinder supports multiple replication_device, while VNX driver
only support one replication_device for now.
"""
def __init__(self, configuration):
self.list = []
self.configuration = configuration
self._device_map = dict()
self.parse_configuration()
def parse_configuration(self):
if self.configuration.replication_device:
for replication_device in self.configuration.replication_device:
rd = ReplicationDevice(replication_device)
if not rd.backend_id or not rd.san_ip:
msg = _('backend_id or san_ip cannot be empty for '
'replication_device.')
raise exception.InvalidInput(reason=msg)
self._device_map[rd.backend_id] = rd
self.list.append(rd)
return self._device_map
def get_device(self, backend_id):
try:
device = self._device_map[backend_id]
except KeyError:
device = None
LOG.warning('Unable to find secondary device named: %s',
backend_id)
return device
@property
def devices(self):
return self._device_map.values()
def __len__(self):
return len(self.list)
def __iter__(self):
self._iter = self.list.__iter__()
return self
def next(self):
return next(self._iter)
def __next__(self):
return self.next()
def __getitem__(self, item):
return self.list[item]
@classmethod
def get_backend_ids(cls, config):
"""Returns all configured device_id."""
rep_list = cls(config)
backend_ids = []
for item in rep_list.devices:
backend_ids.append(item.backend_id)
return backend_ids
class VNXMirrorView(object):
def __init__(self, primary_client, secondary_client):
self.primary_client = primary_client
self.secondary_client = secondary_client
def create_mirror(self, name, primary_lun_id):
self.primary_client.create_mirror(name, primary_lun_id)
def create_secondary_lun(self, pool_name, lun_name, size, provision, tier):
return self.secondary_client.create_lun(
pool_name, lun_name, size, provision, tier)
def delete_secondary_lun(self, lun_name):
self.secondary_client.delete_lun(lun_name)
def delete_mirror(self, mirror_name):
self.primary_client.delete_mirror(mirror_name)
def add_image(self, mirror_name, secondary_lun_id):
sp_ip = self.secondary_client.get_available_ip()
self.primary_client.add_image(mirror_name, sp_ip, secondary_lun_id)
def remove_image(self, mirror_name):
self.primary_client.remove_image(mirror_name)
def fracture_image(self, mirror_name):
self.primary_client.fracture_image(mirror_name)
def promote_image(self, mirror_name):
"""Promote the image on the secondary array."""
self.secondary_client.promote_image(mirror_name)
def destroy_mirror(self, mirror_name, secondary_lun_name):
"""Destroy the mirror view's related VNX objects.
NOTE: primary lun will not be deleted here.
:param mirror_name: name of mirror to be destroyed
:param secondary_lun_name: name of LUN name
"""
mv = self.primary_client.get_mirror(mirror_name)
if not mv.existed:
# We will skip the mirror operations if not existed
LOG.warning('Mirror view %s was deleted already.',
mirror_name)
return
self.fracture_image(mirror_name)
self.remove_image(mirror_name)
self.delete_mirror(mirror_name)
self.delete_secondary_lun(lun_name=secondary_lun_name)
def create_mirror_group(self, group_name):
return self.primary_client.create_mirror_group(group_name)
def delete_mirror_group(self, group_name):
return self.primary_client.delete_mirror_group(group_name)
def add_mirror(self, group_name, mirror_name):
return self.primary_client.add_mirror(group_name, mirror_name)
def remove_mirror(self, group_name, mirror_name):
return self.primary_client.remove_mirror(group_name, mirror_name)
def sync_mirror_group(self, group_name):
return self.primary_client.sync_mirror_group(group_name)
def promote_mirror_group(self, group_name):
"""Promote the mirror group on the secondary array."""
return self.secondary_client.promote_mirror_group(group_name)
def fracture_mirror_group(self, group_name):
return self.primary_client.fracture_mirror_group(group_name)
|
|
"""Library for interacting with InfluxDB."""
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError, InfluxDBServerError
import requests # pytype: disable=pyi-error
from faucet.gauge_pollers import GaugePortStatePoller, GaugeFlowTablePoller, GaugePortStatsPoller
class InfluxShipper:
"""Convenience class for shipping values to InfluxDB.
Inheritors must have a WatcherConf object as conf.
"""
conf = None
ship_error_prefix = 'error shipping points: '
logger = None
def ship_points(self, points):
"""Make a connection to InfluxDB and ship points."""
if self.conf is not None:
try:
client = InfluxDBClient(
host=self.conf.influx_host,
port=self.conf.influx_port,
username=self.conf.influx_user,
password=self.conf.influx_pwd,
database=self.conf.influx_db,
timeout=self.conf.influx_timeout)
if not client:
self.logger.warning('%s error connecting to InfluxDB' % self.ship_error_prefix)
return False
if not client.write_points(points=points, time_precision='s'):
self.logger.warning('%s failed to update InfluxDB' % self.ship_error_prefix)
return False
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout,
InfluxDBClientError, InfluxDBServerError) as err:
self.logger.warning('%s %s' % (self.ship_error_prefix, err))
return False
return True
return False
@staticmethod
def make_point(tags, rcv_time, stat_name, stat_val):
"""Make an InfluxDB point."""
# InfluxDB has only one integer type, int64. We are logging OF
# stats that are uint64. Use float64 to prevent an overflow.
# q.v. https://docs.influxdata.com/influxdb/v1.2/write_protocols/line_protocol_reference/
point = {
'measurement': stat_name,
'tags': tags,
'time': int(rcv_time),
# pylint: disable=no-member
'fields': {'value': float(stat_val)}}
return point
def make_port_point(self, dp_name, port_name, rcv_time, stat_name, stat_val):
"""Make an InfluxDB point about a port measurement."""
port_tags = {
'dp_name': dp_name,
'port_name': port_name,
}
return self.make_point(port_tags, rcv_time, stat_name, stat_val)
class GaugePortStateInfluxDBLogger(GaugePortStatePoller, InfluxShipper):
"""
Example:
::
> use faucet
Using database faucet
> precision rfc3339
> select * from port_state_reason where port_name = 'port1.0.1' order by time desc limit 10;
name: port_state_reason
-----------------------
time dp_name port_name value
2017-02-21T02:12:29Z windscale-faucet-1 port1.0.1 2
2017-02-21T02:12:25Z windscale-faucet-1 port1.0.1 2
2016-07-27T22:05:08Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:33:00Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:32:57Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:31:21Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:31:18Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:27:07Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:27:04Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:24:53Z windscale-faucet-1 port1.0.1 2
"""
def update(self, rcv_time, dp_id, msg):
super(GaugePortStateInfluxDBLogger, self).update(rcv_time, dp_id, msg)
reason = msg.reason
port_no = msg.desc.port_no
if port_no in self.dp.ports:
port_name = self.dp.ports[port_no].name
points = [
self.make_port_point(
self.dp.name, port_name, rcv_time, 'port_state_reason', reason)]
self.ship_points(points)
class GaugePortStatsInfluxDBLogger(GaugePortStatsPoller, InfluxShipper):
"""Periodically sends a port stats request to the datapath and parses \
and outputs the response.
Example:
::
> use faucet
Using database faucet
> show measurements
name: measurements
------------------
bytes_in
bytes_out
dropped_in
dropped_out
errors_in
packets_in
packets_out
port_state_reason
> precision rfc3339
> select * from packets_out where port_name = 'port1.0.1' order by time desc limit 10;
name: packets_out
-----------------
time dp_name port_name value
2017-03-06T05:21:42Z windscale-faucet-1 port1.0.1 76083431
2017-03-06T05:21:33Z windscale-faucet-1 port1.0.1 76081172
2017-03-06T05:21:22Z windscale-faucet-1 port1.0.1 76078727
2017-03-06T05:21:12Z windscale-faucet-1 port1.0.1 76076612
2017-03-06T05:21:02Z windscale-faucet-1 port1.0.1 76074546
2017-03-06T05:20:52Z windscale-faucet-1 port1.0.1 76072730
2017-03-06T05:20:42Z windscale-faucet-1 port1.0.1 76070528
2017-03-06T05:20:32Z windscale-faucet-1 port1.0.1 76068211
2017-03-06T05:20:22Z windscale-faucet-1 port1.0.1 76065982
2017-03-06T05:20:12Z windscale-faucet-1 port1.0.1 76063941
"""
def update(self, rcv_time, dp_id, msg):
super(GaugePortStatsInfluxDBLogger, self).update(rcv_time, dp_id, msg)
points = []
for stat in msg.body:
port_name = self._stat_port_name(msg, stat, dp_id)
for stat_name, stat_val in self._format_port_stats('_', stat):
points.append(
self.make_port_point(
self.dp.name, port_name, rcv_time, stat_name, stat_val))
self.ship_points(points)
class GaugeFlowTableInfluxDBLogger(GaugeFlowTablePoller, InfluxShipper):
# pylint: disable=line-too-long
"""
Example:
::
> use faucet
Using database faucet
> show series where table_id = '0' and in_port = '2'
key
---
flow_byte_count,dp_name=windscale-faucet-1,eth_type=2048,in_port=2,ip_proto=17,priority=9099,table_id=0,udp_dst=53
flow_byte_count,dp_name=windscale-faucet-1,eth_type=2048,in_port=2,ip_proto=6,priority=9098,table_id=0,tcp_dst=53
flow_byte_count,dp_name=windscale-faucet-1,in_port=2,priority=9097,table_id=0
flow_packet_count,dp_name=windscale-faucet-1,eth_type=2048,in_port=2,ip_proto=17,priority=9099,table_id=0,udp_dst=53
flow_packet_count,dp_name=windscale-faucet-1,eth_type=2048,in_port=2,ip_proto=6,priority=9098,table_id=0,tcp_dst=53
flow_packet_count,dp_name=windscale-faucet-1,in_port=2,priority=9097,table_id=0
> select * from flow_byte_count where table_id = '0' and in_port = '2' and ip_proto = '17' and time > now() - 5m
name: flow_byte_count
time arp_tpa dp_name eth_dst eth_src eth_type icmpv6_type in_port ip_proto ipv4_dst ipv6_dst priority table_id tcp_dst udp_dst value vlan_vid
---- ------- ------- ------- ------- -------- ----------- ------- -------- -------- -------- -------- -------- ------- ------- ----- --------
1501154797000000000 windscale-faucet-1 2048 2 17 9099 0 53 9414
1501154857000000000 windscale-faucet-1 2048 2 17 9099 0 53 10554
1501154917000000000 windscale-faucet-1 2048 2 17 9099 0 53 10554
1501154977000000000 windscale-faucet-1 2048 2 17 9099 0 53 12164
1501155037000000000 windscale-faucet-1 2048 2 17 9099 0 53 12239
"""
def update(self, rcv_time, dp_id, msg):
super(GaugeFlowTableInfluxDBLogger, self).update(rcv_time, dp_id, msg)
points = []
jsondict = msg.to_jsondict()
for stats_reply in jsondict['OFPFlowStatsReply']['body']:
stats = stats_reply['OFPFlowStats']
for var, tags, count in self._parse_flow_stats(stats):
points.append(self.make_point(tags, rcv_time, var, count))
self.ship_points(points)
|
|
from __future__ import print_function
from pyfasta import Fasta
import operator
import collections
import string
import sys
import optparse
from cStringIO import StringIO
def newnames(oldname, n, kmers=None, overlap=None, header=None):
"""
>>> newnames('some.fasta', 1)
['some.split.fasta']
>>> newnames('some.fasta', 2)
['some.0.fasta', 'some.1.fasta']
>>> newnames('some', 2)
['some.0', 'some.1']
>>> newnames('some.fasta', 2, kmers=1000)
['some.0.1Kmer.fasta', 'some.1.1Kmer.fasta']
>>> newnames('some.fasta', 2, kmers=10000, overlap=2000)
['some.0.10Kmer.2Koverlap.fasta', 'some.1.10Kmer.2Koverlap.fasta']
>>> newnames('some.fasta', 1, kmers=100000, overlap=2000)
['some.split.100Kmer.2Koverlap.fasta']
"""
if kmers and kmers % 1000 == 0: kmers = "%iK" % (kmers/1000)
if overlap and overlap % 1000 == 0: overlap = "%iK" % (overlap/1000)
p = oldname.rfind("fa")
kstr = kmers is not None and ("%smer." % kmers) or ""
ostr = overlap is not None and ("%soverlap." % overlap) or ""
if p != -1:
pattern = oldname[:p] + "%s." + kstr + ostr + oldname[p:]
else:
pattern = oldname + kstr + ostr + ".%s"
if n == 1:
names = [pattern % "split"]
else:
width = len(str(n))
names = [pattern % str(i).rjust(width, '0') for i in range(n)]
print("creating new files:", file=sys.stderr)
print("\n".join(names), file=sys.stderr)
return names
def print_to_fh(fh, fasta, lens, seqinfo):
key, seqlen = seqinfo
lens[fh.name] += seqlen
f = fasta
assert len(str(f[key])) == seqlen, (key, seqlen, len(str(f[key])))
print(">%s" % key, file=fh)
print(str(f[key]), file=fh)
def format_kmer(seqid, start):
"""
prints out a header with 1-based indexing.
>>> format_kmer('chr3', 1000)
'chr3_1001'
"""
return "%s_%i" % (seqid, start + 1)
def split(args):
parser = optparse.OptionParser("""\
split a fasta file into separated files.
pyfasta split -n 6 [-k 5000 ] some.fasta
the output will be some.0.fasta, some.1.fasta ... some.6.fasta
the sizes will be as even as reasonable.
""")
parser.add_option("--header", dest="header", metavar="FILENAME_FMT",
help="""this overrides all other options. if specified, it will
split the file into a separate file for each header. it
will be a template specifying the file name for each new file.
e.g.: "%(fasta)s.%(seqid)s.fasta"
where 'fasta' is the basename of the input fasta file and seqid
is the header of each entry in the fasta file.""" ,default=None)
parser.add_option("-n", "--n", type="int", dest="nsplits",
help="number of new files to create")
parser.add_option("-o", "--overlap", type="int", dest="overlap",
help="overlap in basepairs", default=0)
parser.add_option("-k", "--kmers", type="int", dest="kmers", default=-1,
help="""\
split big files into pieces of this size in basepairs. default
default of -1 means do not split the sequence up into k-mers, just
split based on the headers. a reasonable value would be 10Kbp""")
options, fasta = parser.parse_args(args)
if not (fasta and (options.nsplits or options.header)):
sys.exit(parser.print_help())
if isinstance(fasta, (tuple, list)):
assert len(fasta) == 1, fasta
fasta = fasta[0]
kmer = options.kmers if options.kmers != -1 else None
overlap = options.overlap if options.overlap != 0 else None
f = Fasta(fasta)
if options.header:
names = dict([(seqid, options.header % \
dict(fasta=f.fasta_name, seqid=seqid)) \
for seqid in f.iterkeys()])
"""
if len(names) > 0:
assert names[0][1] != names[1][1], ("problem with header format", options.header)
fhs = dict([(seqid, open(fn, 'wb')) for seqid, fn in names[:200]])
fhs.extend([(seqid, StringIO(), fn) for seqid, fn in names[200:]])
"""
return with_header_names(f, names)
else:
names = newnames(fasta, options.nsplits, kmers=kmer, overlap=overlap,
header=options.header)
#fhs = [open(n, 'wb') for n in names]
if options.kmers == -1:
return without_kmers(f, names)
else:
return with_kmers(f, names, options.kmers, options.overlap)
def with_header_names(f, names):
"""
split the fasta into the files in fhs by headers.
"""
for seqid, name in names.iteritems():
with open(name, 'w') as fh:
print(">%s" % seqid, file=fh)
print(str(f[seqid]), file=fh)
def with_kmers(f, names, k, overlap):
"""
split the sequences in Fasta object `f` into pieces of length `k`
with the given `overlap` the results are written to the array of files
`fhs`
"""
fhs = [open(name, 'w') for name in names]
i = 0
for seqid in f.iterkeys():
seq = f[seqid]
for (start0, subseq) in Fasta.as_kmers(seq, k, overlap=overlap):
fh = fhs[i % len(fhs)]
print(">%s" % format_kmer(seqid, start0), file=fh)
print(subseq, file=fh)
i += 1
for fh in fhs:
fh.close()
def without_kmers(f, names):
"""
long crappy function that does not solve the bin-packing problem.
but attempts to distribute the sequences in Fasta object `f` evenly
among the file handles in fhs.
"""
fhs = [open(name, 'w') for name in names]
name2fh = dict([(fh.name, fh) for fh in fhs])
items = sorted([(key, len(f[key])) for key in f.iterkeys()],
key=operator.itemgetter(1))
l1 = len(items) - 1
l0 = 0
lens = collections.defaultdict(int)
n_added = 0
while l0 < l1:
fh = fhs[n_added % len(fhs)]
added = False
if n_added >= len(fhs):
while l1 > l0:
lmin = min(lens.itervalues())
lmax = max(lens.itervalues())
if float(lmin) / lmax < 0.80:
# it's way off, so add a large (l1)
name = find_name_from_len(lmin, lens)
fh = name2fh[name]
print_to_fh(fh, f, lens, items[l1])
l1 -= 1
added = True
n_added += 1
elif float(lmin) / lmax < 0.94:
# it's just a little off so add a small (l0)
name = find_name_from_len(lmin, lens)
fh = name2fh[name]
print_to_fh(fh, f, lens, items[l0])
l0 += 1
added = True
n_added += 1
else:
break
if not added:
break
# TODO: test this on glycine
#added = False
if added:
continue
print_to_fh(fh, f, lens, items[l1])
l1 -= 1
n_added += 1
if l0 == l1:
fh = fhs[l0 % len(fhs)]
print_to_fh(fh, f, lens, items[l0])
for fh in fhs:
fh.close()
def find_name_from_len(lmin, lens):
"""
reverse lookup, get name from dict
>>> lens = {'chr1': 4, 'chr2': 5, 'chr3': 4}
>>> find_name_from_len(5, lens)
'chr2'
"""
for fname, l in lens.iteritems():
if l == lmin:
return fname
raise Exception('name not found')
|
|
#
# Banana banana banana
#
# ~ ABorgna
#
import constants as const
import encoderSrc.encoder as encoder
import threading
import numpy as np
from sys import stderr
from time import sleep, time
if const.PY3:
import queue
else:
import Queue as queue
class Transmitter ( threading.Thread ):
""""
The asynchronous communication thread
"""
def __init__(self,socket):
super(Transmitter, self).__init__()
# Status
self.depth = 1 # bits per color
self.height = 32 # column height
self.lastBlitTime = 0 # in seconds
# Image buffer, the data to transmit
self.buffer = np.empty((480*self.height*3),dtype=np.uint8)
# Burst task already queued
self.burstInQueue = threading.Event()
# Makes it stop when the program ends
self.setDaemon(True)
# I/O queue
self.inQ = queue.Queue(0)
self.outQ = queue.Queue(0)
# Where it connects with the display
self.socket = socket
def send(self,packet):
"""
Queues a task to transmit
"""
self.outQ.put(packet,block=True)
def txJoin(self):
"""
Waits for the transmitter to empty the out queue
"""
self.outQ.join()
def recv(self):
"""
Gets the response of the last task
(Blocking)
"""
return self.inQ.get(block=True)
def run (self):
"""
Processes all the communication with the device
"""
while True:
# Check if connected, retry and warn every 1s
tries = 1
if self.socket.timeout:
tries = 1.0/self.socket.timeout
while not self.socket.isConnected():
self.socket.reconnect()
sleep(0.1)
tries -= 1
if tries <= 0:
tries = 1
if self.socket.timeout:
tries = 1.0/self.socket.timeout
stderr.write("BluePoV not responding...")
# Wait for tasks
task = self.outQ.get(block=True)
# Timeout for blitting
while self.lastBlitTime + 5 > time():
sleep(0.01)
pass
# Send everything
self._sendData(task)
# Frees the references
task = ()
# ACK
response = None
r = self.socket.recv()
if r != None:
response = r << 8
r = self.socket.recv()
if r != None:
response = response|r if response != None else r
self.inQ.put(response)
if response != None:
print('resp = '+hex(response))
else:
print('resp = None')
# Mark as done and wait another
self.outQ.task_done()
def _sendData(self,task):
# Send the appropriated bytes
# Token
token = task[0] & 0xff
if token & const.DATA:
token |= const.PRECODED
self.socket.send(token)
# Special
if task[0] == const.PING|const.GET:
pass
elif task[0] == const.STORE|const.SET:
pass
elif task[0] == const.CLEAN|const.SET:
pass
# Setters
elif task[0] == const.HEIGHT|const.SET:
self.height = task[1];
self.socket.send(task[1] >> 8)
self.socket.send(task[1])
elif task[0] == const.WIDTH|const.SET:
self.socket.send(task[1] >> 8)
self.socket.send(task[1])
elif task[0] == const.DEPTH|const.SET:
self.depth = task[1]
self.socket.send(task[1])
elif task[0] == const.TOTAL_WIDTH|const.SET:
self.socket.send(task[1] >> 8)
self.socket.send(task[1])
elif task[0] == const.SPEED|const.SET:
self.socket.send(task[1] >> 8)
self.socket.send(task[1])
elif task[0] == const.DIM|const.SET:
self.socket.send(task[1])
# Getters
elif task[0] == const.FPS|const.GET:
pass
elif task[0] == const.HEIGHT|const.GET:
pass
elif task[0] == const.WIDTH|const.GET:
pass
elif task[0] == const.DEPTH|const.GET:
pass
elif task[0] == const.TOTAL_WIDTH|const.GET:
pass
elif task[0] == const.SPEED|const.GET:
pass
elif task[0] == const.DIM|const.GET:
pass
# Data
elif task[0] == const.BURST|const.DATA \
or task[0] == const.INTERLACED_BURST|const.DATA:
# There is not a burst task in the queue now
self.burstInQueue.clear()
# Copy the matrix in the internal buffer, so its not modified while encoding
self.buffer = np.copy(task[1].flatten())
# Encode the data
frame = self._arrangePixels(interlaced=False)
print('Data:')
print(frame[:24])
print('len:')
l = len(frame)
print(str(l)+' ('+str(l/12)+'*12)')
print()
# Send it
self.socket.send(frame)
elif task[0] == const.WRITE_COLUMN|const.DATA:
# Send column number
self.socket.send(task[1])
# Copy the matrix in the internal buffer, so its not modified while encoding
self.buffer = np.copy(task[2].flatten())
# Encode the data
frame = self._arrangePixels(lenght=1)
# Send it
self.socket.send(frame)
elif task[0] == const.WRITE_SECTION|const.DATA:
# Send first column number
self.socket.send(task[1])
# Send section length
self.socket.send(task[2])
# Copy the matrix in the internal buffer, so its not modified while encoding
self.buffer = np.copy(task[3].flatten())
# Encode the data
frame = self._arrangePixels(lenght=task[2])
# Send it
self.socket.send(frame)
self.lastBlitTime = time()
def _arrangePixels(self,lenght = 0, interlaced = False):
respLen = int(len(self.buffer)*self.depth/8)
resp = np.empty((respLen),dtype=np.uint8)
encoder.encodeRGB3d(self.buffer,resp,self.depth,self.height)
return resp.tolist()
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from parlai.core.opt import Opt
from parlai.tasks.empathetic_dialogues.agents import (
EmotionClassificationSituationTeacher,
EmpatheticDialoguesTeacher,
)
from parlai.utils import testing as testing_utils
EPISODE_COUNTS = {
'train_experiencer_only': 19531,
'train_both_sides': 39057,
'valid': 2769,
'test': 2547,
}
EXAMPLE_COUNTS = {
'train_experiencer_only': 40254,
'train_both_sides': 64636,
'valid': 5738,
'test': 5259,
}
class TestEDTeacher(unittest.TestCase):
"""
Basic tests to count the number of examples/episodes and to check a few utterances.
# Counting num episodes (from the original internal copy of the data)
cat /checkpoint/parlai/tasks/empathetic_dialogues/train.csv | grep -E '^hit:[0-9]+_conv:[0-9]+,2' | wc # 19531
cat /checkpoint/parlai/tasks/empathetic_dialogues/train.csv | grep -E '^hit:[0-9]+_conv:[0-9]+,(2|3)' | wc # 39057
cat /checkpoint/parlai/tasks/empathetic_dialogues/valid_random_cands.csv | grep -E '^hit:[0-9]+_conv:[0-9]+,2' | wc # 2769
cat /checkpoint/parlai/tasks/empathetic_dialogues/test_random_cands.csv | grep -E '^hit:[0-9]+_conv:[0-9]+,2' | wc # 2547
# We count the number of lines with turn_idx=2 because this means that we have at
# least one full utterance in the conversation. For train_experiencer_only==False,
# we also include turn_idx=3 to count the Listener-based conversations in the same
# manner.
# Count num examples (from the original internal copy of the data)
grep -E 'hit:[0-9]+_conv:[0-9]+,(2|4|6|8|10|12),' /checkpoint/parlai/tasks/empathetic_dialogues/train.csv | wc # 40254
grep -E 'hit:[0-9]+_conv:[0-9]+,(2|3|4|5|6|7|8|9|10|11|12),' /checkpoint/parlai/tasks/empathetic_dialogues/train.csv | wc # 64636 (--train-experiencer-only False)
grep -E 'hit:[0-9]+_conv:[0-9]+,(2|4|6|8|10|12),' /checkpoint/parlai/tasks/empathetic_dialogues/valid_random_cands.csv | wc # 5738
grep -E 'hit:[0-9]+_conv:[0-9]+,(2|4|6|8|10|12),' /checkpoint/parlai/tasks/empathetic_dialogues/test_random_cands.csv | wc # 5259
"""
def test_counts(self):
with testing_utils.tempdir() as tmpdir:
data_path = tmpdir
# Check EmpatheticDialoguesTeacher, with multiple examples per episode
opts_episodes_and_examples = [
(
{'datatype': 'train'},
EPISODE_COUNTS['train_both_sides'],
EXAMPLE_COUNTS['train_both_sides'],
), # Test the default mode
(
{'datatype': 'train', 'train_experiencer_only': True},
EPISODE_COUNTS['train_experiencer_only'],
EXAMPLE_COUNTS['train_experiencer_only'],
),
(
{'datatype': 'train', 'train_experiencer_only': False},
EPISODE_COUNTS['train_both_sides'],
EXAMPLE_COUNTS['train_both_sides'],
),
(
{'datatype': 'valid'},
EPISODE_COUNTS['valid'],
EXAMPLE_COUNTS['valid'],
),
({'datatype': 'test'}, EPISODE_COUNTS['test'], EXAMPLE_COUNTS['test']),
]
for teacher_class in [EmpatheticDialoguesTeacher]:
for opt, num_episodes, num_examples in opts_episodes_and_examples:
full_opt = Opt({**opt, 'datapath': data_path})
teacher = teacher_class(full_opt)
self.assertEqual(teacher.num_episodes(), num_episodes)
self.assertEqual(teacher.num_examples(), num_examples)
# Check EmotionClassificationSituationTeacher, with one example per episode
train_episode_count = EPISODE_COUNTS['train_experiencer_only']
# For the situation classifier, we only want to have one episode per train
# conversation
opts_episodes = [
({'datatype': 'train'}, train_episode_count), # Test the default mode
(
{'datatype': 'train', 'train_experiencer_only': True},
train_episode_count,
),
(
{'datatype': 'train', 'train_experiencer_only': False},
train_episode_count,
),
({'datatype': 'valid'}, EPISODE_COUNTS['valid']),
({'datatype': 'test'}, EPISODE_COUNTS['test']),
]
for teacher_class in [EmotionClassificationSituationTeacher]:
for opt, num_episodes in opts_episodes:
full_opt = Opt({**opt, 'datapath': data_path})
teacher = teacher_class(full_opt)
self.assertEqual(teacher.num_episodes(), num_episodes)
self.assertEqual(teacher.num_examples(), num_episodes)
def test_check_examples(self):
with testing_utils.tempdir() as tmpdir:
data_path = tmpdir
# Check EmpatheticDialoguesTeacher
opts_and_examples = [
(
{'datatype': 'train', 'train_experiencer_only': True},
{
'situation': ' i used to scare for darkness',
'emotion': 'afraid',
'text': 'dont you feel so.. its a wonder ',
'labels': [
'I do actually hit blank walls a lot of times but i get by'
],
'prepend_ctx': None,
'prepend_cand': None,
'deepmoji_ctx': None,
'deepmoji_cand': None,
'episode_done': False,
'label_candidates': [],
},
),
(
{'datatype': 'train', 'train_experiencer_only': False},
{
'situation': 'I remember going to the fireworks with my best friend. There was a lot of people, but it only felt like us in the world.',
'emotion': 'sentimental',
'text': 'Where has she gone?',
'labels': ['We no longer talk.'],
'prepend_ctx': None,
'prepend_cand': None,
'deepmoji_ctx': None,
'deepmoji_cand': None,
'episode_done': True,
'label_candidates': [],
},
),
(
{'datatype': 'valid'},
{
'situation': 'I was walking through my hallway a few week ago, and my son was hiding under the table and grabbed my ankle. I thought i was got. ',
'emotion': 'surprised',
'text': 'I may have let out a scream that will have him question my manhood for the rest of our lives, lol. ',
'labels': ['I would probably scream also.'],
'prepend_ctx': None,
'prepend_cand': None,
'deepmoji_ctx': None,
'deepmoji_cand': None,
'episode_done': True,
'label_candidates': [
"That really does make it special. I'm glad you have that. ",
"It must've have been. Glad they are okay now.",
"Well sometimes companies make mistakes. I doubt it's anything you did.",
"Oh no, I'm so so sorry. I've always had at least one pet throughout my life, and they're truly part of the family.",
'Wow. That must suck. Do you like the band incubus? I missed them a couple of times but I saw them this year',
"I can't play those kinds of games. Too spooky for me.",
'I think your boss should give you more recognition in that case!',
"That's always a good thing. It means you should get on great with your neighbors.",
"Yeah, I had my Commodore 64 and Amiga in the late 80's. Still, the games were great when they worked!",
"That's ok, you did the right thing. It probably happens to lots of people.",
"That's good. Now you don't have to worry about it.",
'Hopefully one day you will be willing to explore a relationship in a serious way.',
"I'm sorry, things will get better.",
'Oh, okay. Maybe you should ask your teacher for some extra help or find a study buddy. i hope you do better next time.',
'Why? What did she do?',
'I do enjoy the zoo and the animals. I think they could be just as good.',
'Well at least you managed to save him!',
'That sucks, how much is it?',
'Yeah, that is a hard one to deal with. Maybe you should give it back so you will not feel bad about yourself.',
'HAve you been practicing? Do you have note cards?',
"That's good news at least. I hope you are feeling better now. And don't be hard on yourself, accidents happen.",
'Oops. I hate when that happens. Did they say anything to you?',
'no its not',
'Yes, my friends are coming with me. :)',
"Oh my gosh! I'm sorry! haha Thats funny. Atleast you have them a story to always remember.;)",
'I am so happy for you! All of your hard work paid off!',
'Wow, thats a nice car',
"Does it make you feel like you're living in an alternate reality?",
'glade all was well',
'ah, crisis averted! that could have been a lot worse',
"Maybe if we weren't so attached to being creatures of comfort. Some things we just can't let go of, wouldn't exist without some poor shmuck having to do the dirty work. I guess we're all that shmuck to someone, someway or another.",
"That's awesome! You're going to be rolling in the dough with those skills",
"Don't worry, from what you said it doesn't sound like you almost ruined it. It wasn't something on purpose at least.",
'Have you tried yoga? It can help in the meanwhile till you get a proper vacation.',
"I wish my insurance would give me something like that! It's good to go anyways.",
'I bet you are pretty anxious and excited at the same time.',
'Do you honk at them?',
"That's a bad supervisor. Did you call him/her out on it?",
"Geniuses don't do chores my friend.",
'Which country? that sounds fun, are you guys doing anything fun there?',
'oh that is so exciting!!! good for you man!',
'Wow! Any way they can get out? Did they call someone?',
'I love that nostalgic feeling. ',
'Congratulations. You have done great!',
'hahaha I definitely admire your courage to have done that.',
'wait til she leaves and continue',
'I do too. I am so sorry you are going through this',
'That is awesome. Congratulations. Im sure you earned every penny.',
'I want some of whatever you had for breakfast. You seem very happy.',
'Oh wow! I am so sorry that happened to you.',
'Well, hopefully there will be nothing but great things for him in his future.',
'Oh that was so nice of them! I bet you were relieved!',
'how was it ?',
"Nice! Why do you like it more than other places you've lived?",
'It must be difficult, do you think she will come back ever?',
"That's so messed up! Why was he doing that?",
'Did you try therapy at all or counseling?',
'Did you reconnect and promise to keep in touch?',
'I am so sorry for you. Perhaps you can hang with her after your workdays?',
"That's good that you found happiness. That's what were all in the pursuit of right?",
'I hope these feelings last for you!',
'you have eyes too',
"Wow, that's rude! He won't last long...",
"Hopefully the person learned what they had to do so they don't hold the line up in the future.",
'Oh no that must have been a terrible experience, I hope no one got hurt from the shattered glass.',
"Congrats!, I'm sure you must be very happy!",
"That's good to know! You all have a lot to be thankful for!",
'It depends, if you love her, you could try to work it out. Or you could cheat on her too',
"I'm sorry to hear that, I'm pretty terrified of the dentist myself. Ask for gas! Good luck, I'm sure everything will be just fine.",
'That makes sense, you are a good older sibling!',
'They say dogs are mans best friend. ',
'I would probably scream also.',
'Well I hope he gets to felling better.',
"If I had a bag of M&M's right now I would eat them for sure!",
'Yep. Happy and healthy is a blessing',
'Wow was it a scam or was it legit?',
'that is good to hear, it was a motivation to succeed, a great character building ',
'Its not time to get over it, you arent doing any wrong its okay to "feel" things. I hope people around you give you a lot of love! ',
'Awww. Did you keep it?',
"Oh I see.. Well that's a pretty positive talent then, huh? Maybe you should encourage him to keep doing it. Maybe he misses it. You could get him a present for his birthday or Christmas that was related to drawing tools/pencils and all that.",
"You learn a lot about someone when you move in with them, so if you feel comfortable in your relationship I think that's actually rather prudent.",
'That is terrible. How long have you had this pet?',
'Oh that sucks...did she explain herself yet',
"8 Miles!? That's very impressive. I bet I could barely make it a mile!",
'That stuff is pretty expensive. Maybe you can sell it on eBay or something.',
'Its horrible to have to got through things like thaty',
'Oh god.. so sorry to hear that.. May i ask how did Tom pass?',
'Like a paranormal type fear or a human with intent to harm type fear?',
'I bet you cant wait. WHere are going for your vacation?',
'Aw, that sucks. Did you give her a proper burial?',
'Awesome! What are you going to see?',
'What kind of food does it serve? Sounds wonderful!',
"Oh no! What's wrong with your dad?",
'oh god yes i know what you mean, any ideas what you wanna do ?',
"Hopefully you'll able to get it all sorted out soon. I'm sure when it's done it'll be a beautiful house.",
'That would be bad you should double check before you leave',
'I hope he continues to do well.',
"You can only do so much. Next time I'd just let him drink on his own.",
'I am sure you will meet them',
'Wow thats nice. What do you drive?',
],
},
),
(
{'datatype': 'test'},
{
'situation': 'My mother stopped by my house one day and said she saw 3 dogs on the road, down from our house. They were starving, with ribs showing, and it was a mother dog and her two small puppies. Of course, my daughter wanted to bring them to our house, so we could feed and help them. We did, and my heart went out to them, as they were so sweet, but really were in a bad shape.',
'emotion': 'caring',
'text': "Oh my goodness, that's very scary! I hope you are okay now and the drunk driver was punished for his actions?",
'labels': ['Yeah he was punished hes in jail still'],
'prepend_ctx': None,
'prepend_cand': None,
'deepmoji_ctx': None,
'deepmoji_cand': None,
'episode_done': True,
'label_candidates': [
"Are you really able to work from home? Finding a gig is so difficult, I'm glad that it is working for you.",
"Oh no. That's quite unfortunate for the deer. Did you just drive past it?",
'Wow, you must have felt jealous',
'I can only imagine! How is he now?',
'Oh goodness, what happened for the past 3 weeks?',
'LOL i hate that',
'I love a warm fire outside while camping! Sounds like a great time.',
'Yeah he was punished hes in jail still',
'Was he upset?',
"Wow that's awesome! Are you on a team?",
'Oh man that is just crazy! Feel bad for the person who has to clean it.',
'im sorry, thats awful. its a shame his parents arent being more supportive',
'I bet that was scary. Did he surprise you with something?',
'That sounds pretty stressful. Are you moving soon?',
"Well, if I were you, I'd keep it up, whether or not my spouse laughed at me, or a new girlfriend/boyfriend, whatever. It's not childish to me. Life is stressful enough. Let us snuggle what we want.",
"That's hilarious! Is he usually down for a good prank?",
'Oh I love seeing kids achieve things! Adorable. Good for her! :) ',
'that makes two of us! i am terrified of all snakes',
'that is dangerous, glad that both of your are okay',
"That's good to hear. I hope I meet someone that will do that for me.",
"Well that's good.",
'We need more people like you in the world. Theres always someone out there who needs a helping hand and could use a friend.',
'How ever so exciting! is this your first cruise?',
'Do you feel any less nervous? Job interviews are always nerve-wracking ',
'Maybe you could try to better that?',
"That's what matters most, that you had a good time and made memories!",
'Oh man! I hear you. I rescue animals and it is VERY hard to potty train them!',
'Hopefully they will give him a better shift.',
"That's a big step. I hope it works out for you.",
"Hiking is probably a tough environment to meet people! LA is real nice, but I hear the people there aren't/",
'I hope things turn out better for you. Keep fighting.',
"please don't lol i'm a man, I appreciate what you women go through when you're pregnant or on your period but i'm okay with not knowing details",
'I wish refrigerators would have a warranty that replaced food when they went bad. ',
'Seeing old friends that you have not contacted in so long is a nice feeling. ',
'Cool. Will you leave it there forever?',
'Oh wow. How far away did you move??',
'So inconsiderate! It reminds me of my neighbours doing building work one morning at 6AM!',
'Oh no, did they do something embarrasing?',
'That is awesome! Is there a particular reason you are so happy?',
'Did you buy all the essential items the dog will need? ',
'Fantastic, now do you have a job lined up?',
'Better luck next time! I love to scratch!',
'Thats neat. Do you guys make a lot of money?',
'I would be furious. What did you do?',
"Well hopefully you're able to familiarize yourself quickly. Good luck!",
'Oh thats good for your friend, but it sounds like you really would like to live there! I can imagine feeling jealous',
"That's unfortunate. What are you doing now?",
'Oh no. I rent also so I know your pain. My last landlord was awful. How did your landlord react?',
'Im sorry to hear that, how long did you have him?',
'Lovely. What did you do together?',
'Have you thought about getting another dog? ',
'Oh yeah? Do you still look awesome like you did back then?',
'Do you dress up when the new movies come out also?',
"That's a shame. I hate it when a place doesn't live up to the hype.",
"Sometimes life isn't very fair. I like to think of it as motivation to get a better job.",
'Well at least you have a plan. Are you planning to start the renovation soon?',
"Kids pick those things up quickly. And it'll help with her hand-eye coordination, reading - all sorts of things! ",
'did you enjoy yourself ',
'Haha, how did she feel when she found out?',
'that would really help if it was a permanent solution',
"Wow that must have been frustrating. I hope it didn't cost too much.",
'How nice of her. You must have been so happy to see her.',
"I know it's hard, but practice makes perfect! Keep trying and I am sure you will get it!",
'Do they live in a different state than you?',
"It reallyi s the best way to do things. That way even if you forget something you've got time to remember and remedy the situation",
'Wow, must have been rather frightening. Glad you are ok!',
'That was really nice! What a wonderful surprise! This act of kindness helps to restore my faith in humanity.',
"It's located in a small farming town in Vermont. I went on a tour of their factory once and it was interesting to see the cheese being made.",
"Poor guy was just nervous, I'm sure the more you take him the more he will venture away from you and have some fun!",
'How did he scare you?',
"Isn't that what sisters are for? What were you guys upset about?",
'That is a long time to be in the car for sure.',
"I'm glad to hear... Weddings can be stressful",
"That sounds amazing! How'd you guys meet?",
"Getting out of town and way from all of everday life's struggles always sounds like a great time. Did you leave you cell phone at home while you weer away to 'really' get away from everything for a minute?",
'Man that is scary! Granted i like to hear things about that. ',
'Yikes! Was anything damaged? ',
'Ouch, I would try and wear something on your neck next time you go in there.',
'awesome! was it hold em?',
'Not me! haha I love them all!',
"Oh that's nice, I love doing that. Did the cat seem happy?",
"Yeah, I can imagine. At least it's only one week!",
'Ew, I hate spiders. We are in the process of getting them out of our garage.',
"That's great news. I don't know what I would do if my mom passed.",
'Is that like the child equivalent of under the bed?',
"That's really fantastic! I'm glad to hear you turned your life around. ",
'What kind of work do you do?',
'Ah ok I undestand.',
'Very sad to hear. You have a good heart and are very caring, that is something to atleast be proud of!',
'Man that sounds really stressful...',
'You are so strong! Please thank your husband for his service and thank you for being his support, no matter the miles between you. Take care of yourself and get out with friends when you can!',
'I see. Is it your favorite food now? :p',
'YAY! good job! He/she is going to be beautiful',
'Nothing went wrong, we just have different lives in different places. I go visit every now and then.',
"A spelling bee - what fun! I'm sure you will win - I bet you've worked hard toward your goal.",
'You should install security cameras outside your house.',
'Border collie. She was great!',
'Oh dear me.. So sorry to hear that! what did you do?',
'Praise God man! He really is amazing and we should always be grateful for we have ',
'Oh no.. Did he pass away?',
],
},
),
]
for opt, example in opts_and_examples:
full_opt = Opt({**opt, 'datapath': data_path})
teacher = EmpatheticDialoguesTeacher(full_opt)
self.assertEqual(teacher.get(episode_idx=1, entry_idx=1), example)
# Check EmotionClassificationSituationTeacher
opts_and_examples = [
(
{'datatype': 'train', 'train_experiencer_only': True},
{
'text': ' i used to scare for darkness',
'labels': ['afraid'],
'episode_done': True,
},
),
(
{'datatype': 'train', 'train_experiencer_only': False},
{
'text': ' i used to scare for darkness',
'labels': ['afraid'],
'episode_done': True,
},
),
(
{'datatype': 'valid'},
{
'text': 'I was walking through my hallway a few week ago, and my son was hiding under the table and grabbed my ankle. I thought i was got. ',
'labels': ['surprised'],
'episode_done': True,
},
),
(
{'datatype': 'test'},
{
'text': "My mother stopped by my house one day and said she saw 3 dogs on the road, down from our house. They were starving, with ribs showing, and it was a mother dog and her two small puppies. Of course, my daughter wanted to bring them to our house, so we could feed and help them. We did, and my heart went out to them, as they were so sweet, but really were in a bad shape.",
'labels': ['caring'],
'episode_done': True,
},
),
]
for opt, example in opts_and_examples:
full_opt = Opt({**opt, 'datapath': data_path})
teacher = EmotionClassificationSituationTeacher(full_opt)
self.assertEqual(teacher.get(episode_idx=1), example)
if __name__ == '__main__':
unittest.main()
|
|
from vanilla import *
from defconAppKit.windows.baseWindow import BaseWindowController
from mojo.UI import MultiLineView
from mojo.events import addObserver, removeObserver
from mojo.roboFont import AllFonts, CurrentFont
from mojo.drawingTools import *
from defconAppKit.tools.textSplitter import splitText
from lib.UI.integerEditText import NumberEditText
def listFontNames(fontList):
return [fontName(font) for font in fontList]
def fontName(font):
familyName = font.info.familyName
styleName = font.info.styleName
if familyName is None: font.info.familyName = familyName = 'Unnamed Font'
if styleName is None: font.info.styleName = styleName = 'Unnamed style'
return ' > '.join([familyName, styleName])
class FontList(List):
def __init__(self, posSize, fontList, callback):
fontNames = listFontNames(fontList)
super(FontList, self).__init__(posSize, fontNames, allowsMultipleSelection=False,
allowsEmptySelection=False, enableDelete=False, selectionCallback=self.updateSelectedFonts)
self.fonts = fontList
self.selection = None
self.callback = callback
def update(self, fontList=None):
if fontList is None: self.fonts = AllFonts()
elif fontList is not None: self.fonts = fontList
self.set(listFontNames(self.fonts))
def updateSelectedFonts(self, info):
self.selection = [self.fonts[i] for i in info.getSelection()]
self.callback(self.selection[0])
def selectedFonts(self):
return self.selection
def select(self, thisFont):
for i, font in enumerate(self.fonts):
if thisFont == font:
self.setSelection([i])
class UnderlineStrikethroughPreview(BaseWindowController):
def __init__(self):
self.fonts = AllFonts()
self.font = CurrentFont()
self.testString = "Hlaetgys"
self.underlineThickness = {}
self.underlinePosition = {}
self.strikeThickness = {}
self.strikePosition = {}
for font in self.fonts:
self.underlineThickness[font.path] = font.info.postscriptUnderlineThickness
self.underlinePosition[font.path] = font.info.postscriptUnderlinePosition
self.strikeThickness[font.path] = font.info.openTypeOS2StrikeoutSize
self.strikePosition[font.path] = font.info.openTypeOS2StrikeoutPosition
# create a window
self.w = Window((900, 450), "Underline and Strikethrough", minSize=(775, 350))
# add the preview to the window
self.w.preview = MultiLineView((270, 10, -10, -175), pointSize=100, hasVerticalScroller=False)
# labels
self.w.textStrikethroughTitle = TextBox((275, -165, -10, 17), "Strikethrough")
self.w.textStrikeThickness = TextBox((278, -115, -10, 17), "Thickness", sizeStyle='small')
self.w.textStrikePos = TextBox((351, -115, -10, 17), "Position", sizeStyle='small')
self.w.textUnderlineTitle = TextBox((470, -165, -10, 17), "Underline")
self.w.textUnderThickness = TextBox((473, -115, -10, 17), "Thickness", sizeStyle='small')
self.w.textUnderPos = TextBox((546, -115, -10, 17), "Position", sizeStyle='small')
self.w.textTestText = TextBox((278, -47, -10, 17), "Testing text", sizeStyle='small')
# data
# NumberEditText defaults: allowFloat=True, allowNegative=True, allowEmpty=True, minimum=None, maximum=None, decimals=2
self.w.strike = NumberEditText((277, -140, 70, 22), callback=self.strikeCallback, allowFloat=False, allowNegative=False)
self.w.strikePos = NumberEditText((350, -140, 70, 22), callback=self.strikePosCallback, allowFloat=False)
self.w.under = NumberEditText((472, -140, 70, 22), callback=self.underCallback, allowFloat=False, allowNegative=False)
self.w.underPos = NumberEditText((545, -140, 70, 22), callback=self.underPosCallback, allowFloat=False)
self.w.testText = EditText((277, -72, 143, 22), text=self.testString, callback=self.testTextCallback)
# add font list to window
self.w.fontList = FontList((10, 10, 250, -10), self.fonts, self.updateFont)
# apply
self.w.set = Button((645, -139, 120, 20), "Apply to current", callback=self.applySingleCallback)
self.w.applyAll = Button((645, -109, 120, 20), "Apply to all", callback=self.applyAllCallback)
# set UI
self.setUI()
# subscribe to drawing callback in a multi line view
addObserver(self, "drawLines", "spaceCenterDraw")
# setup base behavior (from the defcon BaseWindowController)
self.setUpBaseWindowBehavior()
# open the window
self.w.open()
def setUI(self):
self.w.strike.set(self.strikeThickness[self.font.path])
self.w.strikePos.set(self.strikePosition[self.font.path])
self.w.under.set(self.underlineThickness[self.font.path])
self.w.underPos.set(self.underlinePosition[self.font.path])
marginGlyph = RGlyph()
marginGlyph.width = self.font['space'].width if self.font['space'] is not None else 300
self.w.preview.setFont(self.font)
self.testGlyphs = []
self.testGlyphs.append(marginGlyph)
charmap = self.font.getCharacterMapping()
testGlyphNames = splitText(self.testString, charmap)
for gn in testGlyphNames:
if gn in self.font:
self.testGlyphs.append(self.font[gn])
self.testGlyphs.append(marginGlyph)
self.w.preview.set(self.testGlyphs)
def testTextCallback(self, sender):
self.testString = sender.get()
self.setUI()
def updateFont(self, font):
self.font = font
self.setUI()
def strikeCallback(self, sender):
value = sender.get()
if value != '-':
self.strikeThickness[self.font.path] = value
self.updateView()
def applyAllCallback(self, sender):
uT = self.underlineThickness
uP = self.underlinePosition
sT = self.strikeThickness
sP = self.strikePosition
cf = self.font
for font in self.fonts:
font.info.postscriptUnderlineThickness = uT[cf.path]
uT[font.path] = uT[cf.path]
font.info.postscriptUnderlinePosition = uP[cf.path]
uP[font.path] = uP[cf.path]
font.info.openTypeOS2StrikeoutSize = sT[cf.path]
sT[font.path] = sT[cf.path]
font.info.openTypeOS2StrikeoutPosition = sP[cf.path]
sP[font.path] = sP[cf.path]
def applySingleCallback(self, sender):
font = self.font
uT = self.underlineThickness
uP = self.underlinePosition
sT = self.strikeThickness
sP = self.strikePosition
font.info.postscriptUnderlineThickness = uT[font.path]
font.info.postscriptUnderlinePosition = uP[font.path]
font.info.openTypeOS2StrikeoutSize = sT[font.path]
font.info.openTypeOS2StrikeoutPosition = sP[font.path]
def strikePosCallback(self, sender):
value = sender.get()
if value != '-':
self.strikePosition[self.font.path] = value
self.updateView()
def underCallback(self, sender):
value = sender.get()
if value != '-':
self.underlineThickness[self.font.path] = value
self.updateView()
def underPosCallback(self, sender):
value = sender.get()
if value != '-':
self.underlinePosition[self.font.path] = value
self.updateView()
def updateView(self):
self.w.preview.contentView().refresh()
def windowCloseCallback(self, sender):
super(UnderlineStrikethroughPreview, self).windowCloseCallback(sender)
removeObserver(self, "spaceCenterDraw")
def drawLines(self, notification):
glyph = notification["glyph"]
if glyph:
fill(0)
if self.underlinePosition[self.font.path] is not None and self.underlineThickness[self.font.path] is not None:
underlineY = self.underlinePosition[self.font.path] - self.underlineThickness[self.font.path] / 2
rect(-10, underlineY, glyph.width+20, self.underlineThickness[self.font.path])
if self.strikePosition[self.font.path] is not None and self.strikeThickness[self.font.path] is not None:
strikeY = self.strikePosition[self.font.path] - self.strikeThickness[self.font.path]
rect(-10, strikeY, glyph.width+20, self.strikeThickness[self.font.path])
OpenWindow(UnderlineStrikethroughPreview)
|
|
from contextlib import contextmanager
import itertools
from docutils import parsers, nodes
from CommonMark import DocParser, HTMLRenderer
from warnings import warn
__all__ = ['CommonMarkParser']
def flatten(iterator):
return itertools.chain.from_iterable(iterator)
class _SectionHandler(object):
def __init__(self, document):
self._level_to_elem = {0: document}
def _parent_elem(self, child_level):
parent_level = max(level for level in self._level_to_elem
if child_level > level)
return self._level_to_elem[parent_level]
def _prune_levels(self, limit_level):
self._level_to_elem = dict((level, elem)
for level, elem in self._level_to_elem.items()
if level <= limit_level)
def add_new_section(self, section, level):
parent = self._parent_elem(level)
parent.append(section)
self._level_to_elem[level] = section
self._prune_levels(level)
class CommonMarkParser(object, parsers.Parser):
supported = ('md', 'markdown')
def convert_blocks(self, blocks):
for block in blocks:
self.convert_block(block)
def convert_block(self, block):
if (block.t == "Document"):
self.convert_blocks(block.children)
elif (block.t == "ATXHeader") or (block.t == "SetextHeader"):
self.section(block)
elif (block.t == "Paragraph"):
self.paragraph(block)
elif (block.t == "BlockQuote"):
self.blockquote(block)
elif (block.t == "ListItem"):
self.list_item(block)
elif (block.t == "List"):
self.list_block(block)
elif (block.t == "IndentedCode"):
self.verbatim(block.string_content)
elif (block.t == "FencedCode"):
# FIXME: add pygment support as done in code_role in rst/roles.py
self.verbatim(block.string_content)
elif (block.t == "ReferenceDef"):
self.reference(block)
elif (block.t == "HorizontalRule"):
self.horizontal_rule()
elif (block.t == "HtmlBlock"):
self.html_block(block)
else:
warn("Unsupported block type: " + block.t)
def parse(self, inputstring, document):
self.setup_parse(inputstring, document)
self.document = document
self.current_node = document
self.section_handler = _SectionHandler(document)
parser = DocParser()
ast = parser.parse(inputstring + '\n')
self.convert_block(ast)
self.finish_parse()
@contextmanager
def _temp_current_node(self, current_node):
saved_node = self.current_node
self.current_node = current_node
yield
self.current_node = saved_node
# Blocks
def section(self, block):
new_section = nodes.section()
new_section.line = block.start_line
new_section['level'] = block.level
title_node = nodes.title()
title_node.line = block.start_line
append_inlines(title_node, block.inline_content)
new_section.append(title_node)
name = nodes.fully_normalize_name(title_node.astext())
new_section['names'].append(name)
self.current_node.document.note_implicit_target(new_section, new_section)
new_section['ids'].append(nodes.make_id(name))
self.section_handler.add_new_section(new_section, block.level)
self.current_node = new_section
def verbatim(self, text):
verbatim_node = nodes.literal_block()
text = ''.join(flatten(text))
if text.endswith('\n'):
text = text[:-1]
verbatim_node.append(nodes.Text(text))
self.current_node.append(verbatim_node)
def paragraph(self, block):
p = nodes.paragraph()
p.line = block.start_line
append_inlines(p, block.inline_content)
self.current_node.append(p)
def blockquote(self, block):
q = nodes.block_quote()
q.line = block.start_line
with self._temp_current_node(q):
self.convert_blocks(block.children)
self.current_node.append(q)
def list_item(self, block):
node = nodes.list_item()
node.line = block.start_line
with self._temp_current_node(node):
self.convert_blocks(block.children)
self.current_node.append(node)
def list_block(self, block):
list_node = None
if (block.list_data['type'] == "Bullet"):
list_node = nodes.bullet_list()
else:
list_node = nodes.enumerated_list()
list_node.line = block.start_line
with self._temp_current_node(list_node):
self.convert_blocks(block.children)
self.current_node.append(list_node)
def html_block(self, block):
raw_node = nodes.raw('', block.string_content, format='html')
raw_node.line = block.start_line
self.current_node.append(raw_node)
def horizontal_rule(self):
transition_node = nodes.transition()
self.current_node.append(transition_node)
def reference(self, block):
target_node = nodes.target()
target_node.line = block.start_line
target_node['names'].append(make_refname(block.label))
target_node['refuri'] = block.destination
if block.title:
target_node['title'] = block.title
self.current_node.append(target_node)
def make_refname(label):
return text_only(label).lower()
def text_only(nodes):
return "".join(s.c if s.t == "Str" else text_only(s.children)
for s in nodes)
# Inlines
def emph(inlines):
emph_node = nodes.emphasis()
append_inlines(emph_node, inlines)
return emph_node
def strong(inlines):
strong_node = nodes.strong()
append_inlines(strong_node, inlines)
return strong_node
def inline_code(inline):
literal_node = nodes.literal()
literal_node.append(nodes.Text(inline.c))
return literal_node
def inline_html(inline):
literal_node = nodes.raw('', inline.c, format='html')
return literal_node
def inline_entity(inline):
val = HTMLRenderer().renderInline(inline)
entity_node = nodes.paragraph('', val, format='html')
return entity_node
def reference(block):
ref_node = nodes.reference()
label = make_refname(block.label)
ref_node['name'] = label
if block.destination is not None:
ref_node['refuri'] = block.destination
else:
ref_node['refname'] = label
# self.document.note_refname(ref_node)
if block.title:
ref_node['title'] = block.title
append_inlines(ref_node, block.label)
return ref_node
def image(block):
img_node = nodes.image()
img_node['uri'] = block.destination
if block.title:
img_node['title'] = block.title
img_node['alt'] = text_only(block.label)
return img_node
def parse_inline(parent_node, inline):
node = None
if (inline.t == "Str"):
node = nodes.Text(inline.c)
elif (inline.t == "Softbreak"):
node = nodes.Text('\n')
elif inline.t == "Emph":
node = emph(inline.c)
elif inline.t == "Strong":
node = strong(inline.c)
elif inline.t == "Link":
node = reference(inline)
elif inline.t == "Image":
node = image(inline)
elif inline.t == "Code":
node = inline_code(inline)
elif inline.t == "Html":
node = inline_html(inline)
elif (inline.t == "Entity"):
node = inline_entity(inline)
else:
warn("Unsupported inline type " + inline.t)
return
node.line = inline.start_line
parent_node.append(node)
def append_inlines(parent_node, inlines):
for i in range(len(inlines)):
parse_inline(parent_node, inlines[i])
|
|
# Copyright 2014 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class uVisionDefinitions():
uvision_settings = {
# C/C++ settings
'Cads': {
'interw': 0, # Execute-only code
'Optim': [0], # Optimization level
'oTime': 0, # Optimize for time
'SplitLS': 0, # Split load and store multiple
'OneElfS': 0, # One elf section per function
'Strict': 0, # Strict ANSI C
'EnumInt': 0, # Enum container always int
'PlainCh': 0, # Plain char is signed
'Ropi': 0, # Read-only position independent code
'Rwpi': 0, # Read-write position independent code
'wLevel': 0, # Warnings level
'uThumb': 0, # Thumb mode
'uSurpInc': 0, # No auto includes
'uC99': 0, # C99 mode
'MiscControls': [], # Misc controls
},
# Linker settings
'LDads': {
'umfTarg': 0, # Use Memory from Target dialog window
'Ropi': 0, # Make RO section position independent
'Rwpi': 0, # Make RW section position independent
'noStLib': 0, # Dont search Standard libraries
'RepFail': 0, # Report might fail conditions as errors
'useFile': 0,
'TextAddressRange': 0, # RO address range
'DataAddressRange': 0, # RW address range
'IncludeLibs': 0,
'IncludeLibsPath': 0,
'Misc': [], # Misc controls
'LinkerInputFile': 0, # Scatter file
'DisabledWarnings': [], # Disable warnings
},
# Assembly settings
'Aads': {
'interw': 0, # Execute-only code
'Ropi': 0, # RO position independent
'Rwpi': 0, # RW position independent
'thumb': 0, # Thumb mode
'SplitLS': 0, # Split load and store multiple
'SwStkChk': 0,
'NoWarn': 0, # No warnings
'uSurpInc': 0, # No auto includes
'MiscControls': [], # Misc controls
'Define': [], # Define
'Undefine': 0, # Undefine
'IncludePath': [], # Include paths
},
# User settings
'TargetOption': {
'CreateExecutable': 0, # Create executable
'CreateLib': 0, # Create library
'CreateHexFile': 0, # Create hex file
'DebugInformation': 0, # Debug information
'BrowseInformation': 0, # Browse information
'CreateBatchFile': 0, # Create batch file
'BeforeCompile': { # Run user program before compilation
'RunUserProg1': 0, # Run #1
'UserProg1Name': 0, # Program #1 name
'RunUserProg2': 0, # Run #2
'UserProg2Name': 0, # Program #2 name
'UserProg1Dos16Mode': 0, # Dos16 mode for #1
'UserProg2Dos16Mode': 0, # Dos16 mode for #2
},
'BeforeMake': { # User programs before build
'RunUserProg1': 0, # Run #1
'UserProg1Name': 0, # Program #1 name
'RunUserProg2': 0, # Run #2
'UserProg2Name': 0, # Program #2 name
'UserProg1Dos16Mode': 0, # Dos16 mode for #1
'UserProg2Dos16Mode': 0, # Dos16 mode for #2
},
'AfterMake': {
'RunUserProg1': 0, # Run #2
'UserProg1Name': 0, # Program #1 name
'RunUserProg2': 0, # Run #2
'UserProg2Name': 0, # Program #2 name
'UserProg1Dos16Mode': 0, # Dos16 mode for #1
'UserProg2Dos16Mode': 0, # Dos16 mode for #2
}
},
# Target settings
'ArmAdsMisc': {
'useUlib': 0, # use MicroLIB
'NoZi1': 0, #
'NoZi2': 0,
'NoZi3': 0,
'NoZi4': 0,
'NoZi5': 0,
'OCR_RVCT1': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
},
'OCR_RVCT2': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
},
'OCR_RVCT3': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
},
'OCR_RVCT4': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
},
'OCR_RVCT5': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
},
'OCR_RVCT6': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
},
'OCR_RVCT7': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
},
'OCR_RVCT8': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
},
'OCR_RVCT9': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
},
'OCR_RVCT10': {
'Type': 0,
'StartAddress': 0,
'Size': 0,
}
},
'CommonProperty': {
'UseCPPCompile': 0, # Use CPP compiler for C files
'RVCTCodeConst': 0,
'RVCTZI': 0,
'RVCTOtherData': 0,
'ModuleSelection': 0,
'IncludeInBuild': 0,
'AlwaysBuild': 0,
'GenerateAssemblyFile': 0,
'AssembleAssemblyFile': 0,
'PublicsOnly': 0,
'StopOnExitCode': 0,
'CustomArgument': 0,
'IncludeLibraryModules': 0,
},
'DebugOption': {
'Simulator': {
'UseSimulator': 0,
'LoadApplicationAtStartup': 1,
'RunToMain': 1,
'RestoreBreakpoints': 1,
'RestoreWatchpoints': 1,
'RestoreMemoryDisplay': 1,
'RestoreFunctions': 1,
'RestoreToolbox': 1,
'LimitSpeedToRealTime': 0,
},
'Target': {
'UseTarget': 1,
'LoadApplicationAtStartup': 1,
'RunToMain': 1,
'RestoreBreakpoints': 1,
'RestoreWatchpoints': 1,
'RestoreMemoryDisplay': 1,
'RestoreFunctions': 1,
'RestoreToolbox': 1,
'RestoreTracepoints': 1,
'RestoreTracepoints': 1,
'RestoreTracepoints': 1,
},
'RunDebugAfterBuild': 0,
'TargetSelection': 0,
},
'Utilities': {
'Flash1': {
'UseTargetDll': 0,
'UseExternalTool': 0,
'RunIndependent': 0,
'UpdateFlashBeforeDebugging': 0,
'Capability': 0,
'DriverSelection': 0,
},
'bUseTDR': 1,
'Flash2': 'BIN\CMSIS_AGDI.dll',
'Flash3': 0,
'Flash4': 0,
'pFcarmOut': 0,
'pFcarmGrp': 0,
'pFcArmRoot': 0,
'FcArmLst': 0,
}
}
|
|
"""Provide the functionality to group entities."""
from __future__ import annotations
from abc import abstractmethod
import asyncio
from collections.abc import Iterable
from contextvars import ContextVar
import logging
from typing import Any, List, cast
import voluptuous as vol
from homeassistant import core as ha
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_NAME,
CONF_ENTITIES,
CONF_ICON,
CONF_NAME,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
EVENT_HOMEASSISTANT_START,
SERVICE_RELOAD,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import CoreState, HomeAssistant, callback, split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.integration_platform import (
async_process_integration_platforms,
)
from homeassistant.helpers.reload import async_reload_integration_platforms
from homeassistant.loader import bind_hass
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
DOMAIN = "group"
GROUP_ORDER = "group_order"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_ALL = "all"
ATTR_ADD_ENTITIES = "add_entities"
ATTR_AUTO = "auto"
ATTR_ENTITIES = "entities"
ATTR_OBJECT_ID = "object_id"
ATTR_ORDER = "order"
ATTR_ALL = "all"
SERVICE_SET = "set"
SERVICE_REMOVE = "remove"
PLATFORMS = ["light", "cover", "notify"]
REG_KEY = f"{DOMAIN}_registry"
_LOGGER = logging.getLogger(__name__)
current_domain: ContextVar[str] = ContextVar("current_domain")
def _conf_preprocess(value):
"""Preprocess alternative configuration formats."""
if not isinstance(value, dict):
value = {CONF_ENTITIES: value}
return value
GROUP_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(CONF_ENTITIES): vol.Any(cv.entity_ids, None),
CONF_NAME: cv.string,
CONF_ICON: cv.icon,
CONF_ALL: cv.boolean,
}
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.match_all: vol.All(_conf_preprocess, GROUP_SCHEMA)})},
extra=vol.ALLOW_EXTRA,
)
class GroupIntegrationRegistry:
"""Class to hold a registry of integrations."""
on_off_mapping: dict[str, str] = {STATE_ON: STATE_OFF}
off_on_mapping: dict[str, str] = {STATE_OFF: STATE_ON}
on_states_by_domain: dict[str, set] = {}
exclude_domains: set = set()
def exclude_domain(self) -> None:
"""Exclude the current domain."""
self.exclude_domains.add(current_domain.get())
def on_off_states(self, on_states: set, off_state: str) -> None:
"""Register on and off states for the current domain."""
for on_state in on_states:
if on_state not in self.on_off_mapping:
self.on_off_mapping[on_state] = off_state
if len(on_states) == 1 and off_state not in self.off_on_mapping:
self.off_on_mapping[off_state] = list(on_states)[0]
self.on_states_by_domain[current_domain.get()] = set(on_states)
@bind_hass
def is_on(hass, entity_id):
"""Test if the group state is in its ON-state."""
if REG_KEY not in hass.data:
# Integration not setup yet, it cannot be on
return False
state = hass.states.get(entity_id)
if state is not None:
return state.state in hass.data[REG_KEY].on_off_mapping
return False
@bind_hass
def expand_entity_ids(hass: HomeAssistant, entity_ids: Iterable[Any]) -> list[str]:
"""Return entity_ids with group entity ids replaced by their members.
Async friendly.
"""
found_ids: list[str] = []
for entity_id in entity_ids:
if not isinstance(entity_id, str) or entity_id in (
ENTITY_MATCH_NONE,
ENTITY_MATCH_ALL,
):
continue
entity_id = entity_id.lower()
try:
# If entity_id points at a group, expand it
domain, _ = ha.split_entity_id(entity_id)
if domain == DOMAIN:
child_entities = get_entity_ids(hass, entity_id)
if entity_id in child_entities:
child_entities = list(child_entities)
child_entities.remove(entity_id)
found_ids.extend(
ent_id
for ent_id in expand_entity_ids(hass, child_entities)
if ent_id not in found_ids
)
else:
if entity_id not in found_ids:
found_ids.append(entity_id)
except AttributeError:
# Raised by split_entity_id if entity_id is not a string
pass
return found_ids
@bind_hass
def get_entity_ids(
hass: HomeAssistant, entity_id: str, domain_filter: str | None = None
) -> list[str]:
"""Get members of this group.
Async friendly.
"""
group = hass.states.get(entity_id)
if not group or ATTR_ENTITY_ID not in group.attributes:
return []
entity_ids = group.attributes[ATTR_ENTITY_ID]
if not domain_filter:
return cast(List[str], entity_ids)
domain_filter = f"{domain_filter.lower()}."
return [ent_id for ent_id in entity_ids if ent_id.startswith(domain_filter)]
@bind_hass
def groups_with_entity(hass: HomeAssistant, entity_id: str) -> list[str]:
"""Get all groups that contain this entity.
Async friendly.
"""
if DOMAIN not in hass.data:
return []
groups = []
for group in hass.data[DOMAIN].entities:
if entity_id in group.tracking:
groups.append(group.entity_id)
return groups
async def async_setup(hass, config):
"""Set up all groups found defined in the configuration."""
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(_LOGGER, DOMAIN, hass)
hass.data[REG_KEY] = GroupIntegrationRegistry()
await async_process_integration_platforms(hass, DOMAIN, _process_group_platform)
await _async_process_config(hass, config, component)
async def reload_service_handler(service):
"""Remove all user-defined groups and load new ones from config."""
auto = list(filter(lambda e: not e.user_defined, component.entities))
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
await component.async_add_entities(auto)
await async_reload_integration_platforms(hass, DOMAIN, PLATFORMS)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({})
)
service_lock = asyncio.Lock()
async def locked_service_handler(service):
"""Handle a service with an async lock."""
async with service_lock:
await groups_service_handler(service)
async def groups_service_handler(service):
"""Handle dynamic group service functions."""
object_id = service.data[ATTR_OBJECT_ID]
entity_id = f"{DOMAIN}.{object_id}"
group = component.get_entity(entity_id)
# new group
if service.service == SERVICE_SET and group is None:
entity_ids = (
service.data.get(ATTR_ENTITIES)
or service.data.get(ATTR_ADD_ENTITIES)
or None
)
extra_arg = {
attr: service.data[attr]
for attr in (ATTR_ICON,)
if service.data.get(attr) is not None
}
await Group.async_create_group(
hass,
service.data.get(ATTR_NAME, object_id),
object_id=object_id,
entity_ids=entity_ids,
user_defined=False,
mode=service.data.get(ATTR_ALL),
**extra_arg,
)
return
if group is None:
_LOGGER.warning("%s:Group '%s' doesn't exist!", service.service, object_id)
return
# update group
if service.service == SERVICE_SET:
need_update = False
if ATTR_ADD_ENTITIES in service.data:
delta = service.data[ATTR_ADD_ENTITIES]
entity_ids = set(group.tracking) | set(delta)
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_ENTITIES in service.data:
entity_ids = service.data[ATTR_ENTITIES]
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_NAME in service.data:
group.name = service.data[ATTR_NAME]
need_update = True
if ATTR_ICON in service.data:
group.icon = service.data[ATTR_ICON]
need_update = True
if ATTR_ALL in service.data:
group.mode = all if service.data[ATTR_ALL] else any
need_update = True
if need_update:
group.async_write_ha_state()
return
# remove group
if service.service == SERVICE_REMOVE:
await component.async_remove_entity(entity_id)
hass.services.async_register(
DOMAIN,
SERVICE_SET,
locked_service_handler,
schema=vol.All(
vol.Schema(
{
vol.Required(ATTR_OBJECT_ID): cv.slug,
vol.Optional(ATTR_NAME): cv.string,
vol.Optional(ATTR_ICON): cv.string,
vol.Optional(ATTR_ALL): cv.boolean,
vol.Exclusive(ATTR_ENTITIES, "entities"): cv.entity_ids,
vol.Exclusive(ATTR_ADD_ENTITIES, "entities"): cv.entity_ids,
}
)
),
)
hass.services.async_register(
DOMAIN,
SERVICE_REMOVE,
groups_service_handler,
schema=vol.Schema({vol.Required(ATTR_OBJECT_ID): cv.slug}),
)
return True
async def _process_group_platform(hass, domain, platform):
"""Process a group platform."""
current_domain.set(domain)
platform.async_describe_on_off_states(hass, hass.data[REG_KEY])
async def _async_process_config(hass, config, component):
"""Process group configuration."""
hass.data.setdefault(GROUP_ORDER, 0)
tasks = []
for object_id, conf in config.get(DOMAIN, {}).items():
name = conf.get(CONF_NAME, object_id)
entity_ids = conf.get(CONF_ENTITIES) or []
icon = conf.get(CONF_ICON)
mode = conf.get(CONF_ALL)
# We keep track of the order when we are creating the tasks
# in the same way that async_create_group does to make
# sure we use the same ordering system. This overcomes
# the problem with concurrently creating the groups
tasks.append(
Group.async_create_group(
hass,
name,
entity_ids,
icon=icon,
object_id=object_id,
mode=mode,
order=hass.data[GROUP_ORDER],
)
)
# Keep track of the group order without iterating
# every state in the state machine every time
# we setup a new group
hass.data[GROUP_ORDER] += 1
await asyncio.gather(*tasks)
class GroupEntity(Entity):
"""Representation of a Group of entities."""
@property
def should_poll(self) -> bool:
"""Disable polling for group."""
return False
async def async_added_to_hass(self) -> None:
"""Register listeners."""
async def _update_at_start(_):
await self.async_update()
self.async_write_ha_state()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _update_at_start)
async def async_defer_or_update_ha_state(self) -> None:
"""Only update once at start."""
if self.hass.state != CoreState.running:
return
await self.async_update()
self.async_write_ha_state()
@abstractmethod
async def async_update(self) -> None:
"""Abstract method to update the entity."""
class Group(Entity):
"""Track a group of entity ids."""
def __init__(
self,
hass,
name,
order=None,
icon=None,
user_defined=True,
entity_ids=None,
mode=None,
):
"""Initialize a group.
This Object has factory function for creation.
"""
self.hass = hass
self._name = name
self._state = None
self._icon = icon
self._set_tracked(entity_ids)
self._on_off = None
self._assumed = None
self._on_states = None
self.user_defined = user_defined
self.mode = any
if mode:
self.mode = all
self._order = order
self._assumed_state = False
self._async_unsub_state_changed = None
@staticmethod
def create_group(
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
order=None,
):
"""Initialize a group."""
return asyncio.run_coroutine_threadsafe(
Group.async_create_group(
hass, name, entity_ids, user_defined, icon, object_id, mode, order
),
hass.loop,
).result()
@staticmethod
async def async_create_group(
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
order=None,
):
"""Initialize a group.
This method must be run in the event loop.
"""
if order is None:
hass.data.setdefault(GROUP_ORDER, 0)
order = hass.data[GROUP_ORDER]
# Keep track of the group order without iterating
# every state in the state machine every time
# we setup a new group
hass.data[GROUP_ORDER] += 1
group = Group(
hass,
name,
order=order,
icon=icon,
user_defined=user_defined,
entity_ids=entity_ids,
mode=mode,
)
group.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id or name, hass=hass
)
# If called before the platform async_setup is called (test cases)
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([group])
return group
@property
def should_poll(self):
"""No need to poll because groups will update themselves."""
return False
@property
def name(self):
"""Return the name of the group."""
return self._name
@name.setter
def name(self, value):
"""Set Group name."""
self._name = value
@property
def state(self):
"""Return the state of the group."""
return self._state
@property
def icon(self):
"""Return the icon of the group."""
return self._icon
@icon.setter
def icon(self, value):
"""Set Icon for group."""
self._icon = value
@property
def extra_state_attributes(self):
"""Return the state attributes for the group."""
data = {ATTR_ENTITY_ID: self.tracking, ATTR_ORDER: self._order}
if not self.user_defined:
data[ATTR_AUTO] = True
return data
@property
def assumed_state(self):
"""Test if any member has an assumed state."""
return self._assumed_state
def update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs."""
asyncio.run_coroutine_threadsafe(
self.async_update_tracked_entity_ids(entity_ids), self.hass.loop
).result()
async def async_update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs.
This method must be run in the event loop.
"""
self._async_stop()
self._set_tracked(entity_ids)
self._reset_tracked_state()
self._async_start()
def _set_tracked(self, entity_ids):
"""Tuple of entities to be tracked."""
# tracking are the entities we want to track
# trackable are the entities we actually watch
if not entity_ids:
self.tracking = ()
self.trackable = ()
return
excluded_domains = self.hass.data[REG_KEY].exclude_domains
tracking = []
trackable = []
for ent_id in entity_ids:
ent_id_lower = ent_id.lower()
domain = split_entity_id(ent_id_lower)[0]
tracking.append(ent_id_lower)
if domain not in excluded_domains:
trackable.append(ent_id_lower)
self.trackable = tuple(trackable)
self.tracking = tuple(tracking)
@callback
def _async_start(self, *_):
"""Start tracking members and write state."""
self._reset_tracked_state()
self._async_start_tracking()
self.async_write_ha_state()
@callback
def _async_start_tracking(self):
"""Start tracking members.
This method must be run in the event loop.
"""
if self.trackable and self._async_unsub_state_changed is None:
self._async_unsub_state_changed = async_track_state_change_event(
self.hass, self.trackable, self._async_state_changed_listener
)
self._async_update_group_state()
@callback
def _async_stop(self):
"""Unregister the group from Home Assistant.
This method must be run in the event loop.
"""
if self._async_unsub_state_changed:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
async def async_update(self):
"""Query all members and determine current group state."""
self._state = None
self._async_update_group_state()
async def async_added_to_hass(self):
"""Handle addition to Home Assistant."""
if self.hass.state != CoreState.running:
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, self._async_start
)
return
if self.tracking:
self._reset_tracked_state()
self._async_start_tracking()
async def async_will_remove_from_hass(self):
"""Handle removal from Home Assistant."""
self._async_stop()
async def _async_state_changed_listener(self, event):
"""Respond to a member state changing.
This method must be run in the event loop.
"""
# removed
if self._async_unsub_state_changed is None:
return
self.async_set_context(event.context)
new_state = event.data.get("new_state")
if new_state is None:
# The state was removed from the state machine
self._reset_tracked_state()
self._async_update_group_state(new_state)
self.async_write_ha_state()
def _reset_tracked_state(self):
"""Reset tracked state."""
self._on_off = {}
self._assumed = {}
self._on_states = set()
for entity_id in self.trackable:
state = self.hass.states.get(entity_id)
if state is not None:
self._see_state(state)
def _see_state(self, new_state):
"""Keep track of the the state."""
entity_id = new_state.entity_id
domain = new_state.domain
state = new_state.state
registry = self.hass.data[REG_KEY]
self._assumed[entity_id] = new_state.attributes.get(ATTR_ASSUMED_STATE)
if domain not in registry.on_states_by_domain:
# Handle the group of a group case
if state in registry.on_off_mapping:
self._on_states.add(state)
elif state in registry.off_on_mapping:
self._on_states.add(registry.off_on_mapping[state])
self._on_off[entity_id] = state in registry.on_off_mapping
else:
entity_on_state = registry.on_states_by_domain[domain]
if domain in self.hass.data[REG_KEY].on_states_by_domain:
self._on_states.update(entity_on_state)
self._on_off[entity_id] = state in entity_on_state
@callback
def _async_update_group_state(self, tr_state=None):
"""Update group state.
Optionally you can provide the only state changed since last update
allowing this method to take shortcuts.
This method must be run in the event loop.
"""
# To store current states of group entities. Might not be needed.
if tr_state:
self._see_state(tr_state)
if not self._on_off:
return
if (
tr_state is None
or self._assumed_state
and not tr_state.attributes.get(ATTR_ASSUMED_STATE)
):
self._assumed_state = self.mode(self._assumed.values())
elif tr_state.attributes.get(ATTR_ASSUMED_STATE):
self._assumed_state = True
num_on_states = len(self._on_states)
# If all the entity domains we are tracking
# have the same on state we use this state
# and its hass.data[REG_KEY].on_off_mapping to off
if num_on_states == 1:
on_state = list(self._on_states)[0]
# If we do not have an on state for any domains
# we use None (which will be STATE_UNKNOWN)
elif num_on_states == 0:
self._state = None
return
# If the entity domains have more than one
# on state, we use STATE_ON/STATE_OFF
else:
on_state = STATE_ON
group_is_on = self.mode(self._on_off.values())
if group_is_on:
self._state = on_state
else:
self._state = self.hass.data[REG_KEY].on_off_mapping[on_state]
|
|
#!/usr/bin/python
import sys
if len (sys.argv) != 4:
print >>sys.stderr, "usage: ./gen-indic-table.py IndicSyllabicCategory.txt IndicMatraCategory.txt Blocks.txt"
sys.exit (1)
BLACKLISTED_BLOCKS = ["Thai", "Lao", "Tibetan"]
files = [file (x) for x in sys.argv[1:]]
headers = [[f.readline () for i in range (2)] for f in files]
data = [{} for f in files]
values = [{} for f in files]
for i, f in enumerate (files):
for line in f:
j = line.find ('#')
if j >= 0:
line = line[:j]
fields = [x.strip () for x in line.split (';')]
if len (fields) == 1:
continue
uu = fields[0].split ('..')
start = int (uu[0], 16)
if len (uu) == 1:
end = start
else:
end = int (uu[1], 16)
t = fields[1]
for u in range (start, end + 1):
data[i][u] = t
values[i][t] = values[i].get (t, 0) + end - start + 1
# Merge data into one dict:
defaults = ('Other', 'Not_Applicable', 'No_Block')
for i,v in enumerate (defaults):
values[i][v] = values[i].get (v, 0) + 1
combined = {}
for i,d in enumerate (data):
for u,v in d.items ():
if i == 2 and not u in combined:
continue
if not u in combined:
combined[u] = list (defaults)
combined[u][i] = v
combined = {k:v for k,v in combined.items() if v[2] not in BLACKLISTED_BLOCKS}
data = combined
del combined
num = len (data)
for u in [0x17CD, 0x17CE, 0x17CF, 0x17D0, 0x17D3]:
if data[u][0] == 'Other':
data[u][0] = "Vowel_Dependent"
# Move the outliers NO-BREAK SPACE and DOTTED CIRCLE out
singles = {}
for u in [0x00A0, 0x25CC]:
singles[u] = data[u]
del data[u]
print "/* == Start of generated table == */"
print "/*"
print " * The following table is generated by running:"
print " *"
print " * ./gen-indic-table.py IndicSyllabicCategory.txt IndicMatraCategory.txt Blocks.txt"
print " *"
print " * on files with these headers:"
print " *"
for h in headers:
for l in h:
print " * %s" % (l.strip())
print " */"
print
print '#include "hb-ot-shape-complex-indic-private.hh"'
print
# Shorten values
short = [{
"Bindu": 'Bi',
"Cantillation_Mark": 'Ca',
"Joiner": 'ZWJ',
"Non_Joiner": 'ZWNJ',
"Number": 'Nd',
"Visarga": 'Vs',
"Vowel": 'Vo',
"Vowel_Dependent": 'M',
"Consonant_Prefixed": 'CPrf',
"Other": 'x',
},{
"Not_Applicable": 'x',
}]
all_shorts = [{},{}]
# Add some of the values, to make them more readable, and to avoid duplicates
for i in range (2):
for v,s in short[i].items ():
all_shorts[i][s] = v
what = ["INDIC_SYLLABIC_CATEGORY", "INDIC_MATRA_CATEGORY"]
what_short = ["ISC", "IMC"]
for i in range (2):
print
vv = values[i].keys ()
vv.sort ()
for v in vv:
v_no_and = v.replace ('_And_', '_')
if v in short[i]:
s = short[i][v]
else:
s = ''.join ([c for c in v_no_and if ord ('A') <= ord (c) <= ord ('Z')])
if s in all_shorts[i]:
raise Exception ("Duplicate short value alias", v, all_shorts[i][s])
all_shorts[i][s] = v
short[i][v] = s
print "#define %s_%s %s_%s %s/* %3d chars; %s */" % \
(what_short[i], s, what[i], v.upper (), \
' '* ((48-1 - len (what[i]) - 1 - len (v)) / 8), \
values[i][v], v)
print
print "#define _(S,M) INDIC_COMBINE_CATEGORIES (ISC_##S, IMC_##M)"
print
print
total = 0
used = 0
last_block = None
def print_block (block, start, end, data):
global total, used, last_block
if block and block != last_block:
print
print
print " /* %s */" % block
num = 0
assert start % 8 == 0
assert (end+1) % 8 == 0
for u in range (start, end+1):
if u % 8 == 0:
print
print " /* %04X */" % u,
if u in data:
num += 1
d = data.get (u, defaults)
sys.stdout.write ("%9s" % ("_(%s,%s)," % (short[0][d[0]], short[1][d[1]])))
total += end - start + 1
used += num
if block:
last_block = block
uu = data.keys ()
uu.sort ()
last = -100000
num = 0
offset = 0
starts = []
ends = []
print "static const INDIC_TABLE_ELEMENT_TYPE indic_table[] = {"
for u in uu:
if u <= last:
continue
block = data[u][2]
start = u//8*8
end = start+1
while end in uu and block == data[end][2]:
end += 1
end = (end-1)//8*8 + 7
if start != last + 1:
if start - last <= 1+16*3:
print_block (None, last+1, start-1, data)
last = start-1
else:
if last >= 0:
ends.append (last + 1)
offset += ends[-1] - starts[-1]
print
print
print "#define indic_offset_0x%04xu %d" % (start, offset)
starts.append (start)
print_block (block, start, end, data)
last = end
ends.append (last + 1)
offset += ends[-1] - starts[-1]
print
print
occupancy = used * 100. / total
page_bits = 12
print "}; /* Table items: %d; occupancy: %d%% */" % (offset, occupancy)
print
print "INDIC_TABLE_ELEMENT_TYPE"
print "hb_indic_get_categories (hb_codepoint_t u)"
print "{"
print " switch (u >> %d)" % page_bits
print " {"
pages = set([u>>page_bits for u in starts+ends+singles.keys()])
for p in sorted(pages):
print " case 0x%0Xu:" % p
for (start,end) in zip (starts, ends):
if p not in [start>>page_bits, end>>page_bits]: continue
offset = "indic_offset_0x%04xu" % start
print " if (hb_in_range (u, 0x%04Xu, 0x%04Xu)) return indic_table[u - 0x%04Xu + %s];" % (start, end-1, start, offset)
for u,d in singles.items ():
if p != u>>page_bits: continue
print " if (unlikely (u == 0x%04Xu)) return _(%s,%s);" % (u, short[0][d[0]], short[1][d[1]])
print " break;"
print ""
print " default:"
print " break;"
print " }"
print " return _(x,x);"
print "}"
print
print "#undef _"
for i in range (2):
print
vv = values[i].keys ()
vv.sort ()
for v in vv:
print "#undef %s_%s" % \
(what_short[i], short[i][v])
print
print "/* == End of generated table == */"
# Maintain at least 30% occupancy in the table */
if occupancy < 30:
raise Exception ("Table too sparse, please investigate: ", occupancy)
|
|
"""Utilities to work with Jedi.
Translates pygls types back and forth with Jedi
"""
from inspect import Parameter
from typing import Dict, List, Optional
import docstring_to_markdown
import jedi.api.errors
import jedi.inference.references
import jedi.settings
from jedi import Project, Script
from jedi.api.classes import Completion, Name, ParamName, Signature
from pygls.lsp.types import (
CompletionItem,
CompletionItemKind,
Diagnostic,
DiagnosticSeverity,
DocumentSymbol,
InsertTextFormat,
Location,
MarkupContent,
MarkupKind,
Position,
Range,
SymbolInformation,
SymbolKind,
)
from pygls.workspace import Document
from .initialization_options import InitializationOptions
from .type_map import get_lsp_completion_type, get_lsp_symbol_type
def set_jedi_settings( # pylint: disable=invalid-name
initialization_options: InitializationOptions,
) -> None:
"""Sets jedi settings."""
jedi.settings.auto_import_modules = list(
set(
jedi.settings.auto_import_modules
+ initialization_options.jedi_settings.auto_import_modules
)
)
jedi.settings.case_insensitive_completion = (
initialization_options.jedi_settings.case_insensitive_completion
)
def script(project: Optional[Project], document: Document) -> Script:
"""Simplifies getting jedi Script."""
return Script(code=document.source, path=document.path, project=project)
def lsp_range(name: Name) -> Range:
"""Get LSP range from Jedi definition.
- jedi is 1-indexed for lines and 0-indexed for columns
- LSP is 0-indexed for lines and 0-indexed for columns
- Therefore, subtract 1 from Jedi's definition line
"""
return Range(
start=Position(line=name.line - 1, character=name.column),
end=Position(
line=name.line - 1,
character=name.column + len(name.name),
),
)
def lsp_location(name: Name) -> Location:
"""Get LSP location from Jedi definition."""
return Location(uri=name.module_path.as_uri(), range=lsp_range(name))
def lsp_symbol_information(name: Name) -> SymbolInformation:
"""Get LSP SymbolInformation from Jedi definition."""
return SymbolInformation(
name=name.name,
kind=get_lsp_symbol_type(name.type),
location=lsp_location(name),
container_name=(
"None" if name is None else (name.full_name or name.name or "None")
),
)
def _document_symbol_range(name: Name) -> Range:
"""Get accurate full range of function.
Thanks <https://github.com/CXuesong> from
<https://github.com/palantir/python-language-server/pull/537/files> for the
inspiration!
Note: I add tons of extra space to make dictionary completions work. Jedi
cuts off the end sometimes before the final function statement. This may be
the cause of bugs at some point.
"""
start = name.get_definition_start_position()
end = name.get_definition_end_position()
if start is None or end is None:
return lsp_range(name)
(start_line, start_column) = start
(end_line, end_column) = end
return Range(
start=Position(line=start_line - 1, character=start_column),
end=Position(line=end_line - 1, character=end_column),
)
def lsp_document_symbols(names: List[Name]) -> List[DocumentSymbol]:
"""Get hierarchical symbols.
We do some cleaning here. Names from scopes that aren't directly
accessible with dot notation are removed from display. See comments
inline for cleaning steps.
"""
_name_lookup: Dict[Name, DocumentSymbol] = {}
results: List[DocumentSymbol] = []
for name in names:
symbol = DocumentSymbol(
name=name.name,
kind=get_lsp_symbol_type(name.type),
range=_document_symbol_range(name),
selection_range=lsp_range(name),
detail=name.description,
children=[],
)
parent = name.parent()
if parent.type == "module":
# add module-level variables to list
results.append(symbol)
if name.type == "class":
# if they're a class, they can also be a namespace
_name_lookup[name] = symbol
elif (
parent.type == "class"
and name.type == "function"
and name.name in {"__init__"}
):
# special case for __init__ method in class; names defined here
symbol.kind = SymbolKind.Method
parent_symbol = _name_lookup[parent]
assert parent_symbol.children is not None
parent_symbol.children.append(symbol)
_name_lookup[name] = symbol
elif parent not in _name_lookup:
# unqualified names are not included in the tree
continue
elif name.is_side_effect() and name.get_line_code().strip().startswith(
"self."
):
# handle attribute creation on __init__ method
symbol.kind = SymbolKind.Property
parent_symbol = _name_lookup[parent]
assert parent_symbol.children is not None
parent_symbol.children.append(symbol)
elif parent.type == "class":
# children are added for class scopes
if name.type == "function":
# No way to identify @property decorated items. That said, as
# far as code is concerned, @property-decorated items should be
# considered "methods" since do more than just assign a value.
symbol.kind = SymbolKind.Method
else:
symbol.kind = SymbolKind.Property
parent_symbol = _name_lookup[parent]
assert parent_symbol.children is not None
parent_symbol.children.append(symbol)
return results
def lsp_diagnostic(error: jedi.api.errors.SyntaxError) -> Diagnostic:
"""Get LSP Diagnostic from Jedi SyntaxError."""
return Diagnostic(
range=Range(
start=Position(line=error.line - 1, character=error.column),
end=Position(
line=error.until_line - 1, character=error.until_column
),
),
message=error.get_message(),
severity=DiagnosticSeverity.Error,
source="jedi",
)
def line_column(jedi_script: Script, position: Position) -> Dict[str, int]:
"""Translate pygls Position to Jedi's line/column.
Returns a dictionary because this return result should be unpacked as a
function argument to Jedi's functions.
Jedi is 1-indexed for lines and 0-indexed for columns. LSP is 0-indexed for
lines and 0-indexed for columns. Therefore, add 1 to LSP's request for the
line.
Note: as of version 3.15, LSP's treatment of "position" conflicts with
Jedi in some cases. According to the LSP docs:
Character offset on a line in a document (zero-based). Assuming that
the line is represented as a string, the `character` value represents
the gap between the `character` and `character + 1`.
If the character value is greater than the line length it defaults back
to the line length.
Sources:
https://microsoft.github.io/language-server-protocol/specification#position
https://github.com/palantir/python-language-server/pull/201/files
"""
lines = jedi_script._code_lines # pylint: disable=protected-access
line_length = len(lines[position.line])
return dict(
line=position.line + 1,
column=min(position.character, line_length - 1 if line_length else 0),
)
def line_column_range(pygls_range: Range) -> Dict[str, int]:
"""Translate pygls range to Jedi's line/column/until_line/until_column.
Returns a dictionary because this return result should be unpacked as a
function argument to Jedi's functions.
Jedi is 1-indexed for lines and 0-indexed for columns. LSP is 0-indexed for
lines and 0-indexed for columns. Therefore, add 1 to LSP's request for the
line.
"""
return dict(
line=pygls_range.start.line + 1,
column=pygls_range.start.character,
until_line=pygls_range.end.line + 1,
until_column=pygls_range.end.character,
)
def compare_names(name1: Name, name2: Name) -> bool:
"""Check if one Name is equal to another.
This function, while trivial, is useful for documenting types
without needing to directly import anything from jedi into
`server.py`
"""
equal: bool = name1 == name2
return equal
def complete_sort_name(name: Completion) -> str:
"""Return sort name for a jedi completion.
Should be passed to the sortText field in CompletionItem. Strings sort a-z,
a comes first and z comes last.
Additionally, we'd like to keep the sort order to what Jedi has provided.
For this reason, we make sure the sort-text is just a letter and not the
name itself.
"""
if name.type == "param" and name.name.endswith("="):
return "a"
return "z"
def clean_completion_name(name: str, char_before_cursor: str) -> str:
"""Clean the completion name, stripping bad surroundings.
1. Remove all surrounding " and '. For
"""
if char_before_cursor in {"'", '"'}:
return name.lstrip(char_before_cursor)
return name
_POSITION_PARAMETERS = {
Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD,
}
_PARAM_NAME_IGNORE = {"/", "*"}
def get_snippet_signature(signature: Signature) -> str:
"""Return the snippet signature."""
params: List[ParamName] = signature.params
if not params:
return "()$0"
signature_list = []
count = 1
for param in params:
param_name = param.name
if param_name in _PARAM_NAME_IGNORE:
continue
if param.kind in _POSITION_PARAMETERS:
param_str = param.to_string()
if "=" in param_str: # hacky default argument check
break
result = "${" + f"{count}:{param_name}" + "}"
signature_list.append(result)
count += 1
continue
if not signature_list:
return "($0)"
return "(" + ", ".join(signature_list) + ")$0"
def is_import(script_: Script, line: int, column: int) -> bool:
"""Check whether a position is a Jedi import.
`line` and `column` are Jedi lines and columns
NOTE: this function is a bit of a hack and should be revisited with each
Jedi release. Additionally, it doesn't really work for manually-triggered
completions, without any text, which will may cause issues for users with
manually triggered completions.
"""
# pylint: disable=protected-access
tree_name = script_._module_node.get_name_of_position((line, column))
if tree_name is None:
return False
name = script_._get_module_context().create_name(tree_name)
if name is None:
return False
name_is_import: bool = name.is_import()
return name_is_import
_LSP_TYPE_FOR_SNIPPET = {
CompletionItemKind.Class,
CompletionItemKind.Function,
}
_MOST_RECENT_COMPLETIONS: Dict[str, Completion] = {}
def clear_completions_cache() -> None:
"""Clears the cache of completions used for completionItem/resolve."""
_MOST_RECENT_COMPLETIONS.clear()
def lsp_completion_item(
completion: Completion,
char_before_cursor: str,
enable_snippets: bool,
resolve_eagerly: bool,
markup_kind: MarkupKind,
) -> CompletionItem:
"""Using a Jedi completion, obtain a jedi completion item."""
completion_name = completion.name
name_clean = clean_completion_name(completion_name, char_before_cursor)
lsp_type = get_lsp_completion_type(completion.type)
completion_item = CompletionItem(
label=completion_name,
filter_text=completion_name,
kind=lsp_type,
sort_text=complete_sort_name(completion),
insert_text=name_clean,
insert_text_format=InsertTextFormat.PlainText,
)
_MOST_RECENT_COMPLETIONS[completion_name] = completion
if resolve_eagerly:
completion_item = lsp_completion_item_resolve(
completion_item, markup_kind=markup_kind
)
if not enable_snippets:
return completion_item
if lsp_type not in _LSP_TYPE_FOR_SNIPPET:
return completion_item
signatures = completion.get_signatures()
if not signatures:
return completion_item
try:
snippet_signature = get_snippet_signature(signatures[0])
except Exception: # pylint: disable=broad-except
return completion_item
new_text = completion_name + snippet_signature
completion_item.insert_text = new_text
completion_item.insert_text_format = InsertTextFormat.Snippet
return completion_item
def convert_docstring(docstring: str, markup_kind: MarkupKind) -> str:
"""Take a docstring and convert it to markup kind if possible.
Currently only supports markdown conversion; MarkupKind can only be
plaintext or markdown as of LSP 3.16.
NOTE: Since docstring_to_markdown is a new library, I add broad exception
handling in case docstring_to_markdown.convert produces unexpected
behavior.
"""
if markup_kind == MarkupKind.Markdown:
try:
return docstring_to_markdown.convert(docstring)
except docstring_to_markdown.UnknownFormatError:
return (
"```\n" + str(docstring) + "\n```\n"
if docstring
else docstring
)
except Exception as error: # pylint: disable=broad-except
return (
docstring
+ "\n"
+ "jedi-language-server error: "
+ "Uncaught exception while converting docstring to markdown. "
+ "Please open issue at "
+ "https://github.com/pappasam/jedi-language-server/issues. "
+ f"Traceback:\n{error}"
)
return docstring
def lsp_completion_item_resolve(
item: CompletionItem,
markup_kind: MarkupKind,
) -> CompletionItem:
"""Resolve completion item using cached jedi completion data."""
completion = _MOST_RECENT_COMPLETIONS[item.label]
item.detail = completion.description
docstring = convert_docstring(completion.docstring(), markup_kind)
item.documentation = MarkupContent(kind=markup_kind, value=docstring)
return item
|
|
#
# Tables describing slots in the CPython type object
# and associated know-how.
#
import Naming
import PyrexTypes
import StringEncoding
invisible = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__']
class Signature(object):
# Method slot signature descriptor.
#
# has_dummy_arg boolean
# has_generic_args boolean
# fixed_arg_format string
# ret_format string
# error_value string
#
# The formats are strings made up of the following
# characters:
#
# 'O' Python object
# 'T' Python object of the type of 'self'
# 'v' void
# 'p' void *
# 'P' void **
# 'i' int
# 'b' bint
# 'I' int *
# 'l' long
# 'f' float
# 'd' double
# 'h' Py_hash_t
# 'z' Py_ssize_t
# 'Z' Py_ssize_t *
# 's' char *
# 'S' char **
# 'r' int used only to signal exception
# 'B' Py_buffer *
# '-' dummy 'self' argument (not used)
# '*' rest of args passed as generic Python
# arg tuple and kw dict (must be last
# char in format string)
format_map = {
'O': PyrexTypes.py_object_type,
'v': PyrexTypes.c_void_type,
'p': PyrexTypes.c_void_ptr_type,
'P': PyrexTypes.c_void_ptr_ptr_type,
'i': PyrexTypes.c_int_type,
'b': PyrexTypes.c_bint_type,
'I': PyrexTypes.c_int_ptr_type,
'l': PyrexTypes.c_long_type,
'f': PyrexTypes.c_float_type,
'd': PyrexTypes.c_double_type,
'h': PyrexTypes.c_py_hash_t_type,
'z': PyrexTypes.c_py_ssize_t_type,
'Z': PyrexTypes.c_py_ssize_t_ptr_type,
's': PyrexTypes.c_char_ptr_type,
'S': PyrexTypes.c_char_ptr_ptr_type,
'r': PyrexTypes.c_returncode_type,
'B': PyrexTypes.c_py_buffer_ptr_type,
# 'T', '-' and '*' are handled otherwise
# and are not looked up in here
}
type_to_format_map = dict([(type_, format_)
for format_, type_ in format_map.iteritems()])
error_value_map = {
'O': "NULL",
'T': "NULL",
'i': "-1",
'b': "-1",
'l': "-1",
'r': "-1",
'h': "-1",
'z': "-1",
}
def __init__(self, arg_format, ret_format):
self.has_dummy_arg = 0
self.has_generic_args = 0
if arg_format[:1] == '-':
self.has_dummy_arg = 1
arg_format = arg_format[1:]
if arg_format[-1:] == '*':
self.has_generic_args = 1
arg_format = arg_format[:-1]
self.fixed_arg_format = arg_format
self.ret_format = ret_format
self.error_value = self.error_value_map.get(ret_format, None)
self.is_staticmethod = False
def num_fixed_args(self):
return len(self.fixed_arg_format)
def is_self_arg(self, i):
# argument is 'self' for methods or 'class' for classmethods
return self.fixed_arg_format[i] == 'T'
def returns_self_type(self):
# return type is same as 'self' argument type
return self.ret_format == 'T'
def fixed_arg_type(self, i):
return self.format_map[self.fixed_arg_format[i]]
def return_type(self):
return self.format_map[self.ret_format]
def format_from_type(self, arg_type):
if arg_type.is_pyobject:
arg_type = PyrexTypes.py_object_type
return self.type_to_format_map[arg_type]
def exception_value(self):
return self.error_value_map.get(self.ret_format)
def function_type(self, self_arg_override=None):
# Construct a C function type descriptor for this signature
args = []
for i in xrange(self.num_fixed_args()):
if self_arg_override is not None and self.is_self_arg(i):
assert isinstance(self_arg_override, PyrexTypes.CFuncTypeArg)
args.append(self_arg_override)
else:
arg_type = self.fixed_arg_type(i)
args.append(PyrexTypes.CFuncTypeArg("", arg_type, None))
if self_arg_override is not None and self.returns_self_type():
ret_type = self_arg_override.type
else:
ret_type = self.return_type()
exc_value = self.exception_value()
return PyrexTypes.CFuncType(ret_type, args, exception_value = exc_value)
def method_flags(self):
if self.ret_format == "O":
full_args = self.fixed_arg_format
if self.has_dummy_arg:
full_args = "O" + full_args
if full_args in ["O", "T"]:
if self.has_generic_args:
return [method_varargs, method_keywords]
else:
return [method_noargs]
elif full_args in ["OO", "TO"] and not self.has_generic_args:
return [method_onearg]
if self.is_staticmethod:
return [method_varargs, method_keywords]
return None
class SlotDescriptor(object):
# Abstract base class for type slot descriptors.
#
# slot_name string Member name of the slot in the type object
# is_initialised_dynamically Is initialised by code in the module init function
# py3 Indicates presence of slot in Python 3
# py2 Indicates presence of slot in Python 2
# ifdef Full #ifdef string that slot is wrapped in. Using this causes py3, py2 and flags to be ignored.)
def __init__(self, slot_name, dynamic=0,
py3=True, py2=True, ifdef=None):
self.slot_name = slot_name
self.is_initialised_dynamically = dynamic
self.ifdef = ifdef
self.py3 = py3
self.py2 = py2
def preprocessor_guard_code(self):
ifdef = self.ifdef
py2 = self.py2
py3 = self.py3
guard = None
if ifdef:
guard = ("#if %s" % ifdef)
elif not py3 or py3 == '<RESERVED>':
guard = ("#if PY_MAJOR_VERSION < 3")
elif not py2:
guard = ("#if PY_MAJOR_VERSION >= 3")
return guard
def generate(self, scope, code):
if self.is_initialised_dynamically:
value = 0
else:
value = self.slot_code(scope)
preprocessor_guard = self.preprocessor_guard_code()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.putln("%s, /*%s*/" % (value, self.slot_name))
if self.py3 == '<RESERVED>':
code.putln("#else")
code.putln("0, /*reserved*/")
if preprocessor_guard:
code.putln("#endif")
# Some C implementations have trouble statically
# initialising a global with a pointer to an extern
# function, so we initialise some of the type slots
# in the module init function instead.
def generate_dynamic_init_code(self, scope, code):
if self.is_initialised_dynamically:
value = self.slot_code(scope)
if value != "0":
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
value
)
)
class FixedSlot(SlotDescriptor):
# Descriptor for a type slot with a fixed value.
#
# value string
def __init__(self, slot_name, value, py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.value = value
def slot_code(self, scope):
return self.value
class EmptySlot(FixedSlot):
# Descriptor for a type slot whose value is always 0.
def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
FixedSlot.__init__(self, slot_name, "0", py3=py3, py2=py2, ifdef=ifdef)
class MethodSlot(SlotDescriptor):
# Type slot descriptor for a user-definable method.
#
# signature Signature
# method_name string The __xxx__ name of the method
# alternatives [string] Alternative list of __xxx__ names for the method
def __init__(self, signature, slot_name, method_name, fallback=None,
py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.signature = signature
self.slot_name = slot_name
self.method_name = method_name
self.alternatives = []
method_name_to_slot[method_name] = self
#
if fallback:
self.alternatives.append(fallback)
for alt in (self.py2, self.py3):
if isinstance(alt, (tuple, list)):
slot_name, method_name = alt
self.alternatives.append(method_name)
method_name_to_slot[method_name] = self
def slot_code(self, scope):
entry = scope.lookup_here(self.method_name)
if entry and entry.func_cname:
return entry.func_cname
for method_name in self.alternatives:
entry = scope.lookup_here(method_name)
if entry and entry.func_cname:
return entry.func_cname
return "0"
class InternalMethodSlot(SlotDescriptor):
# Type slot descriptor for a method which is always
# synthesized by Cython.
#
# slot_name string Member name of the slot in the type object
def __init__(self, slot_name, **kargs):
SlotDescriptor.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
return scope.mangle_internal(self.slot_name)
class GCDependentSlot(InternalMethodSlot):
# Descriptor for a slot whose value depends on whether
# the type participates in GC.
def __init__(self, slot_name, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
if not scope.needs_gc():
return "0"
if not scope.has_pyobject_attrs:
# if the type does not have object attributes, it can
# delegate GC methods to its parent - iff the parent
# functions are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class ConstructorSlot(InternalMethodSlot):
# Descriptor for tp_new and tp_dealloc.
def __init__(self, slot_name, method, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.method = method
def slot_code(self, scope):
if scope.parent_type.base_type \
and not scope.has_pyobject_attrs \
and not scope.lookup_here(self.method):
# if the type does not have object attributes, it can
# delegate GC methods to its parent - iff the parent
# functions are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class SyntheticSlot(InternalMethodSlot):
# Type slot descriptor for a synthesized method which
# dispatches to one or more user-defined methods depending
# on its arguments. If none of the relevant methods are
# defined, the method will not be synthesized and an
# alternative default value will be placed in the type
# slot.
def __init__(self, slot_name, user_methods, default_value, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.user_methods = user_methods
self.default_value = default_value
def slot_code(self, scope):
if scope.defines_any(self.user_methods):
return InternalMethodSlot.slot_code(self, scope)
else:
return self.default_value
class TypeFlagsSlot(SlotDescriptor):
# Descriptor for the type flags slot.
def slot_code(self, scope):
value = "Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER"
if not scope.parent_type.is_final_type:
value += "|Py_TPFLAGS_BASETYPE"
if scope.needs_gc():
value += "|Py_TPFLAGS_HAVE_GC"
return value
class DocStringSlot(SlotDescriptor):
# Descriptor for the docstring slot.
def slot_code(self, scope):
if scope.doc is not None:
if scope.doc.is_unicode:
doc = scope.doc.utf8encode()
else:
doc = scope.doc.byteencode()
return '__Pyx_DOCSTR("%s")' % StringEncoding.escape_byte_string(doc)
else:
return "0"
class SuiteSlot(SlotDescriptor):
# Descriptor for a substructure of the type object.
#
# sub_slots [SlotDescriptor]
def __init__(self, sub_slots, slot_type, slot_name):
SlotDescriptor.__init__(self, slot_name)
self.sub_slots = sub_slots
self.slot_type = slot_type
substructures.append(self)
def substructure_cname(self, scope):
return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
def slot_code(self, scope):
return "&%s" % self.substructure_cname(scope)
def generate_substructure(self, scope, code):
code.putln("")
code.putln(
"static %s %s = {" % (
self.slot_type,
self.substructure_cname(scope)))
for slot in self.sub_slots:
slot.generate(scope, code)
code.putln("};")
substructures = [] # List of all SuiteSlot instances
class MethodTableSlot(SlotDescriptor):
# Slot descriptor for the method table.
def slot_code(self, scope):
return scope.method_table_cname
class MemberTableSlot(SlotDescriptor):
# Slot descriptor for the table of Python-accessible attributes.
def slot_code(self, scope):
return "0"
class GetSetSlot(SlotDescriptor):
# Slot descriptor for the table of attribute get & set methods.
def slot_code(self, scope):
if scope.property_entries:
return scope.getset_table_cname
else:
return "0"
class BaseClassSlot(SlotDescriptor):
# Slot descriptor for the base class slot.
def __init__(self, name):
SlotDescriptor.__init__(self, name, dynamic = 1)
def generate_dynamic_init_code(self, scope, code):
base_type = scope.parent_type.base_type
if base_type:
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
base_type.typeptr_cname))
# The following dictionary maps __xxx__ method names to slot descriptors.
method_name_to_slot = {}
## The following slots are (or could be) initialised with an
## extern function pointer.
#
#slots_initialised_from_extern = (
# "tp_free",
#)
#------------------------------------------------------------------------------------------
#
# Utility functions for accessing slot table data structures
#
#------------------------------------------------------------------------------------------
def get_special_method_signature(name):
# Given a method name, if it is a special method,
# return its signature, else return None.
slot = method_name_to_slot.get(name)
if slot:
return slot.signature
else:
return None
def get_property_accessor_signature(name):
# Return signature of accessor for an extension type
# property, else None.
return property_accessor_signatures.get(name)
def get_base_slot_function(scope, slot):
# Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
# that recursively climb the class hierarchy.
base_type = scope.parent_type.base_type
if scope.parent_scope is base_type.scope.parent_scope:
parent_slot = slot.slot_code(base_type.scope)
if parent_slot != '0':
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return parent_slot
return None
#------------------------------------------------------------------------------------------
#
# Signatures for generic Python functions and methods.
#
#------------------------------------------------------------------------------------------
pyfunction_signature = Signature("-*", "O")
pymethod_signature = Signature("T*", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for simple Python functions.
#
#------------------------------------------------------------------------------------------
pyfunction_noargs = Signature("-", "O")
pyfunction_onearg = Signature("-O", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for the various kinds of function that
# can appear in the type object and its substructures.
#
#------------------------------------------------------------------------------------------
unaryfunc = Signature("T", "O") # typedef PyObject * (*unaryfunc)(PyObject *);
binaryfunc = Signature("OO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ibinaryfunc = Signature("TO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ternaryfunc = Signature("OOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
iternaryfunc = Signature("TOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
callfunc = Signature("T*", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
inquiry = Signature("T", "i") # typedef int (*inquiry)(PyObject *);
lenfunc = Signature("T", "z") # typedef Py_ssize_t (*lenfunc)(PyObject *);
# typedef int (*coercion)(PyObject **, PyObject **);
intargfunc = Signature("Ti", "O") # typedef PyObject *(*intargfunc)(PyObject *, int);
ssizeargfunc = Signature("Tz", "O") # typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t);
intintargfunc = Signature("Tii", "O") # typedef PyObject *(*intintargfunc)(PyObject *, int, int);
ssizessizeargfunc = Signature("Tzz", "O") # typedef PyObject *(*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t);
intobjargproc = Signature("TiO", 'r') # typedef int(*intobjargproc)(PyObject *, int, PyObject *);
ssizeobjargproc = Signature("TzO", 'r') # typedef int(*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *);
intintobjargproc = Signature("TiiO", 'r') # typedef int(*intintobjargproc)(PyObject *, int, int, PyObject *);
ssizessizeobjargproc = Signature("TzzO", 'r') # typedef int(*ssizessizeobjargproc)(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *);
intintargproc = Signature("Tii", 'r')
ssizessizeargproc = Signature("Tzz", 'r')
objargfunc = Signature("TO", "O")
objobjargproc = Signature("TOO", 'r') # typedef int (*objobjargproc)(PyObject *, PyObject *, PyObject *);
readbufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*readbufferproc)(PyObject *, Py_ssize_t, void **);
writebufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*writebufferproc)(PyObject *, Py_ssize_t, void **);
segcountproc = Signature("TZ", "z") # typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *);
charbufferproc = Signature("TzS", "z") # typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **);
objargproc = Signature("TO", 'r') # typedef int (*objobjproc)(PyObject *, PyObject *);
# typedef int (*visitproc)(PyObject *, void *);
# typedef int (*traverseproc)(PyObject *, visitproc, void *);
destructor = Signature("T", "v") # typedef void (*destructor)(PyObject *);
# printfunc = Signature("TFi", 'r') # typedef int (*printfunc)(PyObject *, FILE *, int);
# typedef PyObject *(*getattrfunc)(PyObject *, char *);
getattrofunc = Signature("TO", "O") # typedef PyObject *(*getattrofunc)(PyObject *, PyObject *);
# typedef int (*setattrfunc)(PyObject *, char *, PyObject *);
setattrofunc = Signature("TOO", 'r') # typedef int (*setattrofunc)(PyObject *, PyObject *, PyObject *);
delattrofunc = Signature("TO", 'r')
cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
hashfunc = Signature("T", "h") # typedef Py_hash_t (*hashfunc)(PyObject *);
# typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
richcmpfunc = Signature("OOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
descrsetfunc = Signature("TOO", 'r') # typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *);
descrdelfunc = Signature("TO", 'r')
initproc = Signature("T*", 'r') # typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
# typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
# typedef PyObject *(*allocfunc)(struct _typeobject *, int);
getbufferproc = Signature("TBi", "r") # typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
releasebufferproc = Signature("TB", "v") # typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
#------------------------------------------------------------------------------------------
#
# Signatures for accessor methods of properties.
#
#------------------------------------------------------------------------------------------
property_accessor_signatures = {
'__get__': Signature("T", "O"),
'__set__': Signature("TO", 'r'),
'__del__': Signature("T", 'r')
}
#------------------------------------------------------------------------------------------
#
# Descriptor tables for the slots of the various type object
# substructures, in the order they appear in the structure.
#
#------------------------------------------------------------------------------------------
PyNumberMethods = (
MethodSlot(binaryfunc, "nb_add", "__add__"),
MethodSlot(binaryfunc, "nb_subtract", "__sub__"),
MethodSlot(binaryfunc, "nb_multiply", "__mul__"),
MethodSlot(binaryfunc, "nb_divide", "__div__", py3 = False),
MethodSlot(binaryfunc, "nb_remainder", "__mod__"),
MethodSlot(binaryfunc, "nb_divmod", "__divmod__"),
MethodSlot(ternaryfunc, "nb_power", "__pow__"),
MethodSlot(unaryfunc, "nb_negative", "__neg__"),
MethodSlot(unaryfunc, "nb_positive", "__pos__"),
MethodSlot(unaryfunc, "nb_absolute", "__abs__"),
MethodSlot(inquiry, "nb_nonzero", "__nonzero__", py3 = ("nb_bool", "__bool__")),
MethodSlot(unaryfunc, "nb_invert", "__invert__"),
MethodSlot(binaryfunc, "nb_lshift", "__lshift__"),
MethodSlot(binaryfunc, "nb_rshift", "__rshift__"),
MethodSlot(binaryfunc, "nb_and", "__and__"),
MethodSlot(binaryfunc, "nb_xor", "__xor__"),
MethodSlot(binaryfunc, "nb_or", "__or__"),
EmptySlot("nb_coerce", py3 = False),
MethodSlot(unaryfunc, "nb_int", "__int__", fallback="__long__"),
MethodSlot(unaryfunc, "nb_long", "__long__", fallback="__int__", py3 = "<RESERVED>"),
MethodSlot(unaryfunc, "nb_float", "__float__"),
MethodSlot(unaryfunc, "nb_oct", "__oct__", py3 = False),
MethodSlot(unaryfunc, "nb_hex", "__hex__", py3 = False),
# Added in release 2.0
MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"),
MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"),
MethodSlot(ibinaryfunc, "nb_inplace_multiply", "__imul__"),
MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__", py3 = False),
MethodSlot(ibinaryfunc, "nb_inplace_remainder", "__imod__"),
MethodSlot(ibinaryfunc, "nb_inplace_power", "__ipow__"), # actually ternaryfunc!!!
MethodSlot(ibinaryfunc, "nb_inplace_lshift", "__ilshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_rshift", "__irshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_and", "__iand__"),
MethodSlot(ibinaryfunc, "nb_inplace_xor", "__ixor__"),
MethodSlot(ibinaryfunc, "nb_inplace_or", "__ior__"),
# Added in release 2.2
# The following require the Py_TPFLAGS_HAVE_CLASS flag
MethodSlot(binaryfunc, "nb_floor_divide", "__floordiv__"),
MethodSlot(binaryfunc, "nb_true_divide", "__truediv__"),
MethodSlot(ibinaryfunc, "nb_inplace_floor_divide", "__ifloordiv__"),
MethodSlot(ibinaryfunc, "nb_inplace_true_divide", "__itruediv__"),
# Added in release 2.5
MethodSlot(unaryfunc, "nb_index", "__index__", ifdef = "PY_VERSION_HEX >= 0x02050000")
)
PySequenceMethods = (
MethodSlot(lenfunc, "sq_length", "__len__"),
EmptySlot("sq_concat"), # nb_add used instead
EmptySlot("sq_repeat"), # nb_multiply used instead
SyntheticSlot("sq_item", ["__getitem__"], "0"), #EmptySlot("sq_item"), # mp_subscript used instead
MethodSlot(ssizessizeargfunc, "sq_slice", "__getslice__"),
EmptySlot("sq_ass_item"), # mp_ass_subscript used instead
SyntheticSlot("sq_ass_slice", ["__setslice__", "__delslice__"], "0"),
MethodSlot(cmpfunc, "sq_contains", "__contains__"),
EmptySlot("sq_inplace_concat"), # nb_inplace_add used instead
EmptySlot("sq_inplace_repeat"), # nb_inplace_multiply used instead
)
PyMappingMethods = (
MethodSlot(lenfunc, "mp_length", "__len__"),
MethodSlot(objargfunc, "mp_subscript", "__getitem__"),
SyntheticSlot("mp_ass_subscript", ["__setitem__", "__delitem__"], "0"),
)
PyBufferProcs = (
MethodSlot(readbufferproc, "bf_getreadbuffer", "__getreadbuffer__", py3 = False),
MethodSlot(writebufferproc, "bf_getwritebuffer", "__getwritebuffer__", py3 = False),
MethodSlot(segcountproc, "bf_getsegcount", "__getsegcount__", py3 = False),
MethodSlot(charbufferproc, "bf_getcharbuffer", "__getcharbuffer__", py3 = False),
MethodSlot(getbufferproc, "bf_getbuffer", "__getbuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000"),
MethodSlot(releasebufferproc, "bf_releasebuffer", "__releasebuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000")
)
#------------------------------------------------------------------------------------------
#
# The main slot table. This table contains descriptors for all the
# top-level type slots, beginning with tp_dealloc, in the order they
# appear in the type object.
#
#------------------------------------------------------------------------------------------
slot_table = (
ConstructorSlot("tp_dealloc", '__dealloc__'),
EmptySlot("tp_print"), #MethodSlot(printfunc, "tp_print", "__print__"),
EmptySlot("tp_getattr"),
EmptySlot("tp_setattr"),
MethodSlot(cmpfunc, "tp_compare", "__cmp__", py3 = '<RESERVED>'),
MethodSlot(reprfunc, "tp_repr", "__repr__"),
SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"),
SuiteSlot(PySequenceMethods, "PySequenceMethods", "tp_as_sequence"),
SuiteSlot(PyMappingMethods, "PyMappingMethods", "tp_as_mapping"),
MethodSlot(hashfunc, "tp_hash", "__hash__"),
MethodSlot(callfunc, "tp_call", "__call__"),
MethodSlot(reprfunc, "tp_str", "__str__"),
SyntheticSlot("tp_getattro", ["__getattr__","__getattribute__"], "0"), #"PyObject_GenericGetAttr"),
SyntheticSlot("tp_setattro", ["__setattr__", "__delattr__"], "0"), #"PyObject_GenericSetAttr"),
SuiteSlot(PyBufferProcs, "PyBufferProcs", "tp_as_buffer"),
TypeFlagsSlot("tp_flags"),
DocStringSlot("tp_doc"),
GCDependentSlot("tp_traverse"),
GCDependentSlot("tp_clear"),
# Later -- synthesize a method to split into separate ops?
MethodSlot(richcmpfunc, "tp_richcompare", "__richcmp__"),
EmptySlot("tp_weaklistoffset"),
MethodSlot(getiterfunc, "tp_iter", "__iter__"),
MethodSlot(iternextfunc, "tp_iternext", "__next__"),
MethodTableSlot("tp_methods"),
MemberTableSlot("tp_members"),
GetSetSlot("tp_getset"),
BaseClassSlot("tp_base"), #EmptySlot("tp_base"),
EmptySlot("tp_dict"),
SyntheticSlot("tp_descr_get", ["__get__"], "0"),
SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"),
EmptySlot("tp_dictoffset"),
MethodSlot(initproc, "tp_init", "__init__"),
EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"),
InternalMethodSlot("tp_new"),
EmptySlot("tp_free"),
EmptySlot("tp_is_gc"),
EmptySlot("tp_bases"),
EmptySlot("tp_mro"),
EmptySlot("tp_cache"),
EmptySlot("tp_subclasses"),
EmptySlot("tp_weaklist"),
EmptySlot("tp_del"),
EmptySlot("tp_version_tag", ifdef="PY_VERSION_HEX >= 0x02060000"),
)
#------------------------------------------------------------------------------------------
#
# Descriptors for special methods which don't appear directly
# in the type object or its substructures. These methods are
# called from slot functions synthesized by Cython.
#
#------------------------------------------------------------------------------------------
MethodSlot(initproc, "", "__cinit__")
MethodSlot(destructor, "", "__dealloc__")
MethodSlot(objobjargproc, "", "__setitem__")
MethodSlot(objargproc, "", "__delitem__")
MethodSlot(ssizessizeobjargproc, "", "__setslice__")
MethodSlot(ssizessizeargproc, "", "__delslice__")
MethodSlot(getattrofunc, "", "__getattr__")
MethodSlot(setattrofunc, "", "__setattr__")
MethodSlot(delattrofunc, "", "__delattr__")
MethodSlot(descrgetfunc, "", "__get__")
MethodSlot(descrsetfunc, "", "__set__")
MethodSlot(descrdelfunc, "", "__delete__")
# Method flags for python-exposed methods.
method_noargs = "METH_NOARGS"
method_onearg = "METH_O"
method_varargs = "METH_VARARGS"
method_keywords = "METH_KEYWORDS"
method_coexist = "METH_COEXIST"
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import json
import mock
import os
from os import path
try:
import Queue
except ImportError:
import queue as Queue
import sys
import unittest
import warnings
import yaml
INV_DIR = 'playbooks/inventory'
sys.path.append(path.join(os.getcwd(), INV_DIR))
from osa_toolkit import dictutils
import dynamic_inventory
from osa_toolkit import filesystem as fs
from osa_toolkit import generate as di
from osa_toolkit import tools
TARGET_DIR = path.join(os.getcwd(), 'tests', 'inventory')
BASE_ENV_DIR = INV_DIR
CONFIGS_DIR = path.join(os.getcwd(), 'etc', 'openstack_deploy')
CONFD = os.path.join(CONFIGS_DIR, 'conf.d')
AIO_CONFIG_FILE = path.join(CONFIGS_DIR, 'openstack_user_config.yml.aio')
USER_CONFIG_FILE = path.join(TARGET_DIR, 'openstack_user_config.yml')
# These files will be placed in TARGET_DIR by the inventory functions
# They should be cleaned up between each test.
CLEANUP = [
'openstack_inventory.json',
'openstack_hostnames_ips.yml',
'backup_openstack_inventory.tar'
]
# Base config is a global configuration accessible for convenience.
# It should *not* be mutated outside of setUpModule, which populates it.
_BASE_CONFIG = {}
def get_config():
"""Return a copy of the original config so original isn't modified."""
global _BASE_CONFIG
return copy.deepcopy(_BASE_CONFIG)
def make_config():
"""Build an inventory configuration from the sample AIO files.
Take any files specified as '.aio' and load their keys into a
configuration dict and write them out to a file for consumption by
the tests.
"""
# Allow access here so we can populate the dictionary.
global _BASE_CONFIG
_BASE_CONFIG = tools.make_example_config(AIO_CONFIG_FILE, CONFD)
tools.write_example_config(USER_CONFIG_FILE, _BASE_CONFIG)
def setUpModule():
# The setUpModule function is used by the unittest framework.
make_config()
def tearDownModule():
# This file should only be removed after all tests are run,
# thus it is excluded from cleanup.
os.remove(USER_CONFIG_FILE)
def cleanup():
for f_name in CLEANUP:
f_file = path.join(TARGET_DIR, f_name)
if os.path.exists(f_file):
os.remove(f_file)
def get_inventory(clean=True, extra_args=None):
"Return the inventory mapping in a dict."
# Use the list argument to more closely mirror
# Ansible's use of the callable.
args = {'config': TARGET_DIR, 'list': True,
'environment': BASE_ENV_DIR}
if extra_args:
args.update(extra_args)
try:
inventory_string = di.main(**args)
inventory = json.loads(inventory_string)
return inventory
finally:
if clean:
# Remove the file system artifacts since we want to force
# fresh runs
cleanup()
class TestArgParser(unittest.TestCase):
def test_no_args(self):
arg_dict = dynamic_inventory.args([])
self.assertIsNone(arg_dict['config'])
self.assertEqual(arg_dict['list'], False)
def test_list_arg(self):
arg_dict = dynamic_inventory.args(['--list'])
self.assertEqual(arg_dict['list'], True)
def test_config_arg(self):
arg_dict = dynamic_inventory.args(['--config',
'/etc/openstack_deploy'])
self.assertEqual(arg_dict['config'], '/etc/openstack_deploy')
class TestAnsibleInventoryFormatConstraints(unittest.TestCase):
inventory = None
expected_groups = [
'aio1-host_containers',
'all',
'all_containers',
'aodh_alarm_evaluator',
'aodh_alarm_notifier',
'aodh_all',
'aodh_api',
'aodh_container',
'aodh_listener',
'barbican_all',
'barbican_api',
'barbican_container',
'ceilometer_all',
'ceilometer_agent_central',
'ceilometer_agent_compute',
'ceilometer_agent_notification',
'ceilometer_central_container',
'ceph_all',
'ceph-mon_all',
'ceph-mon_containers',
'ceph-mon_container',
'ceph-mon_hosts',
'ceph-mon',
'ceph-osd_all',
'ceph-osd_containers',
'ceph-osd_container',
'ceph-osd_hosts',
'ceph-osd',
'cinder_all',
'cinder_api',
'cinder_api_container',
'cinder_backup',
'cinder_scheduler',
'cinder_volume',
'cinder_volumes_container',
'compute-infra_all',
'compute-infra_containers',
'compute-infra_hosts',
'compute_all',
'compute_containers',
'compute_hosts',
'dashboard_all',
'dashboard_containers',
'dashboard_hosts',
'database_containers',
'database_hosts',
'dnsaas_all',
'dnsaas_containers',
'dnsaas_hosts',
'designate_all',
'designate_container',
'designate_api',
'designate_central',
'designate_mdns',
'designate_worker',
'designate_producer',
'designate_sink',
'etcd',
'etcd_all',
'etcd_container',
'etcd_containers',
'etcd_hosts',
'galera',
'galera_all',
'galera_container',
'glance_all',
'glance_api',
'glance_container',
'glance_registry',
'gnocchi_all',
'gnocchi_api',
'gnocchi_container',
'gnocchi_metricd',
'haproxy',
'haproxy_all',
'haproxy_container',
'haproxy_containers',
'haproxy_hosts',
'heat_all',
'heat_api',
'heat_api_cfn',
'heat_api_cloudwatch',
'heat_api_container',
'heat_engine',
'horizon',
'horizon_all',
'horizon_container',
'hosts',
'identity_all',
'identity_containers',
'identity_hosts',
'image_all',
'image_containers',
'image_hosts',
'ironic-infra_all',
'ironic-infra_containers',
'ironic-infra_hosts',
'ironic-server_containers',
'ironic-server_hosts',
'ironic_all',
'ironic_api',
'ironic_api_container',
'ironic_conductor',
'ironic_server',
'ironic_server_container',
'ironic_servers',
'ironic_compute',
'ironic_compute_container',
'ironic-compute_containers',
'ironic-compute_all',
'ironic-compute_hosts',
'key-manager_containers',
'key-manager_hosts',
'keystone',
'keystone_all',
'keystone_container',
'kvm-compute_containers',
'kvm-compute_hosts',
'log_all',
'log_containers',
'log_hosts',
'lxc_hosts',
'lxd-compute_containers',
'lxd-compute_hosts',
'magnum',
'magnum-infra_all',
'magnum-infra_containers',
'magnum-infra_hosts',
'magnum_all',
'magnum_container',
'mano_all',
'mano_containers',
'mano_hosts',
'octavia-infra_hosts',
'octavia_all',
'octavia-api',
'octavia_server_container',
'octavia-worker',
'octavia-housekeeping',
'octavia-health-manager',
'octavia-infra_containers',
'octavia-infra_all',
'powervm-compute_containers',
'powervm-compute_hosts',
'qemu-compute_containers',
'qemu-compute_hosts',
'trove_all',
'trove_api',
'trove_conductor',
'trove_taskmanager',
'trove_api_container',
'trove-infra_containers',
'trove-infra_hosts',
'trove-infra_all',
'memcached',
'memcached_all',
'memcached_container',
'memcaching_containers',
'memcaching_hosts',
'metering-alarm_all',
'metering-alarm_containers',
'metering-alarm_hosts',
'metering-compute_all',
'metering-compute_container',
'metering-compute_containers',
'metering-compute_hosts',
'metering-infra_all',
'metering-infra_containers',
'metering-infra_hosts',
'metrics_all',
'metrics_containers',
'metrics_hosts',
'molteniron',
'molteniron-server_all',
'molteniron-server_containers',
'molteniron-server_container',
'molteniron-server_hosts',
'molteniron_all',
'mq_containers',
'mq_hosts',
'network_all',
'network_containers',
'network_hosts',
'neutron_agent',
'neutron_agents_container',
'neutron_all',
'neutron_bgp_dragent',
'neutron_dhcp_agent',
'neutron_l3_agent',
'neutron_lbaas_agent',
'neutron_linuxbridge_agent',
'neutron_metadata_agent',
'neutron_metering_agent',
'neutron_openvswitch_agent',
'neutron_sriov_nic_agent',
'neutron_server',
'neutron_server_container',
'nova_all',
'nova_api_metadata',
'nova_api_os_compute',
'nova_api_container',
'nova_api_placement',
'nova_compute',
'nova_compute_container',
'nova_conductor',
'nova_console',
'nova_scheduler',
'opendaylight',
'operator_containers',
'operator_hosts',
'orchestration_all',
'orchestration_containers',
'orchestration_hosts',
'os-infra_containers',
'os-infra_hosts',
'pkg_repo',
'rabbit_mq_container',
'rabbitmq',
'rabbitmq_all',
'remote',
'remote_containers',
'repo-infra_all',
'repo-infra_containers',
'repo-infra_hosts',
'repo_all',
'repo_container',
'rsyslog',
'rsyslog_all',
'rsyslog_container',
'sahara-infra_all',
'sahara-infra_containers',
'sahara-infra_hosts',
'sahara_all',
'sahara_api',
'sahara_container',
'sahara_engine',
'shared-infra_all',
'shared-infra_containers',
'shared-infra_hosts',
'storage-infra_all',
'storage-infra_containers',
'storage-infra_hosts',
'storage_all',
'storage_containers',
'storage_hosts',
'swift-proxy_all',
'swift-proxy_containers',
'swift-proxy_hosts',
'swift-remote_containers',
'swift-remote_hosts',
'swift_acc',
'swift_acc_container',
'swift_all',
'swift_cont',
'swift_cont_container',
'swift_containers',
'swift_hosts',
'swift_obj',
'swift_obj_container',
'swift_proxy',
'swift_proxy_container',
'swift_remote',
'swift_remote_all',
'swift_remote_container',
'tacker_all',
'tacker_container',
'tacker_server',
'unbound',
'unbound_all',
'unbound_container',
'unbound_containers',
'unbound_hosts',
'utility',
'utility_all',
'utility_container'
]
@classmethod
def setUpClass(cls):
cls.inventory = get_inventory()
def test_meta(self):
meta = self.inventory['_meta']
self.assertIsNotNone(meta, "_meta missing from inventory")
self.assertIsInstance(meta, dict, "_meta is not a dict")
def test_hostvars(self):
hostvars = self.inventory['_meta']['hostvars']
self.assertIsNotNone(hostvars, "hostvars missing from _meta")
self.assertIsInstance(hostvars, dict, "hostvars is not a dict")
def test_group_vars_all(self):
group_vars_all = self.inventory['all']
self.assertIsNotNone(group_vars_all,
"group vars all missing from inventory")
self.assertIsInstance(group_vars_all, dict,
"group vars all is not a dict")
the_vars = group_vars_all['vars']
self.assertIsNotNone(the_vars,
"vars missing from group vars all")
self.assertIsInstance(the_vars, dict,
"vars in group vars all is not a dict")
def test_expected_host_groups_present(self):
for group in self.expected_groups:
the_group = self.inventory[group]
self.assertIsNotNone(the_group,
"Required host group: %s is missing "
"from inventory" % group)
self.assertIsInstance(the_group, dict)
if group != 'all':
self.assertIn('hosts', the_group)
self.assertIsInstance(the_group['hosts'], list)
def test_only_expected_host_groups_present(self):
all_keys = list(self.expected_groups)
all_keys.append('_meta')
self.assertEqual(set(all_keys), set(self.inventory.keys()))
def test_configured_groups_have_hosts(self):
config = get_config()
groups = self.inventory.keys()
for group in groups:
if group in config.keys():
self.assertTrue(0 < len(self.inventory[group]['hosts']))
class TestUserConfiguration(unittest.TestCase):
def setUp(self):
self.longMessage = True
self.loaded_user_configuration = fs.load_user_configuration(TARGET_DIR)
def test_loading_user_configuration(self):
"""Test that the user configuration can be loaded"""
self.assertIsInstance(self.loaded_user_configuration, dict)
class TestEnvironments(unittest.TestCase):
def setUp(self):
self.longMessage = True
self.loaded_environment = fs.load_environment(BASE_ENV_DIR, {})
def test_loading_environment(self):
"""Test that the environment can be loaded"""
self.assertIsInstance(self.loaded_environment, dict)
def test_envd_read(self):
"""Test that the env.d contents are inserted into the environment"""
expected_keys = [
'component_skel',
'container_skel',
'physical_skel',
]
for key in expected_keys:
self.assertIn(key, self.loaded_environment)
class TestIps(unittest.TestCase):
def setUp(self):
# Allow custom assertion errors.
self.longMessage = True
self.env = fs.load_environment(BASE_ENV_DIR, {})
@mock.patch('osa_toolkit.filesystem.load_environment')
@mock.patch('osa_toolkit.filesystem.load_user_configuration')
def test_duplicates(self, mock_load_config, mock_load_env):
"""Test that no duplicate IPs are made on any network."""
# Grab our values read from the file system just once.
mock_load_config.return_value = get_config()
mock_load_env.return_value = self.env
mock_open = mock.mock_open()
for i in range(0, 99):
# tearDown is ineffective for this loop, so clean the USED_IPs
# on each run
inventory = None
di.ip.USED_IPS = set()
# Mock out the context manager being used to write files.
# We don't need to hit the file system for this test.
with mock.patch('__main__.open', mock_open):
inventory = get_inventory()
ips = collections.defaultdict(int)
hostvars = inventory['_meta']['hostvars']
for host, var_dict in hostvars.items():
nets = var_dict['container_networks']
for net, vals in nets.items():
if 'address' in vals.keys():
addr = vals['address']
ips[addr] += 1
self.assertEqual(1, ips[addr],
msg="IP %s duplicated." % addr)
def test_empty_ip_queue(self):
q = Queue.Queue()
with self.assertRaises(SystemExit) as context:
# TODO(nrb): import and use ip module directly
di.ip.get_ip_address('test', q)
expectedLog = ("Cannot retrieve requested amount of IP addresses. "
"Increase the test range in your "
"openstack_user_config.yml.")
self.assertEqual(str(context.exception), expectedLog)
def tearDown(self):
# Since the get_ip_address function touches USED_IPS,
# and USED_IPS is currently a global var, make sure we clean it out
di.ip.USED_IPS = set()
class TestConfigCheckBase(unittest.TestCase):
def setUp(self):
self.config_changed = False
self.user_defined_config = get_config()
def delete_config_key(self, user_defined_config, key):
try:
if key in user_defined_config:
del user_defined_config[key]
elif key in user_defined_config['global_overrides']:
del user_defined_config['global_overrides'][key]
else:
raise KeyError("can't find specified key in user config")
finally:
self.write_config()
def add_config_key(self, key, value):
self.user_defined_config[key] = value
self.write_config()
def add_provider_network(self, net_name, cidr):
self.user_defined_config['cidr_networks'][net_name] = cidr
self.write_config()
def delete_provider_network(self, net_name):
del self.user_defined_config['cidr_networks'][net_name]
self.write_config()
def add_provider_network_key(self, net_name, key, value):
pns = self.user_defined_config['global_overrides']['provider_networks']
for net in pns:
if 'ip_from_q' in net['network']:
if net['network']['ip_from_q'] == net_name:
net['network'][key] = value
def delete_provider_network_key(self, net_name, key):
pns = self.user_defined_config['global_overrides']['provider_networks']
for net in pns:
if 'ip_from_q' in net['network']:
if net['network']['ip_from_q'] == net_name:
if key in net['network']:
del net['network'][key]
def write_config(self):
self.config_changed = True
# Save new user_config_file
with open(USER_CONFIG_FILE, 'wb') as f:
f.write(yaml.dump(self.user_defined_config).encode('ascii'))
def restore_config(self):
# get back our initial user config file
self.user_defined_config = get_config()
self.write_config()
def set_new_hostname(self, user_defined_config, group,
old_hostname, new_hostname):
# set a new name for the specified hostname
old_hostname_settings = user_defined_config[group].pop(old_hostname)
user_defined_config[group][new_hostname] = old_hostname_settings
self.write_config()
def set_new_ip(self, user_defined_config, group, hostname, ip):
# Sets an IP address for a specified host.
user_defined_config[group][hostname]['ip'] = ip
self.write_config()
def add_host(self, group, host_name, ip):
self.user_defined_config[group][host_name] = {'ip': ip}
self.write_config()
def tearDown(self):
if self.config_changed:
self.restore_config()
class TestConfigChecks(TestConfigCheckBase):
def test_missing_container_cidr_network(self):
self.delete_provider_network('container')
with self.assertRaises(SystemExit) as context:
get_inventory()
expectedLog = ("No container or management network specified in "
"user config.")
self.assertEqual(str(context.exception), expectedLog)
def test_management_network_malformed(self):
self.delete_provider_network_key('container', 'is_container_address')
self.delete_provider_network_key('container', 'is_ssh_address')
self.write_config()
with self.assertRaises(di.ProviderNetworkMisconfiguration) as context:
get_inventory()
expectedLog = ("Provider network with queue 'container' "
"requires 'is_container_address' and "
"'is_ssh_address' to be set to True.")
self.assertEqual(str(context.exception), expectedLog)
self.restore_config()
def test_missing_cidr_network_present_in_provider(self):
self.delete_provider_network('storage')
with self.assertRaises(SystemExit) as context:
get_inventory()
expectedLog = "can't find storage in cidr_networks"
self.assertEqual(str(context.exception), expectedLog)
def test_missing_cidr_networks_key(self):
del self.user_defined_config['cidr_networks']
self.write_config()
with self.assertRaises(SystemExit) as context:
get_inventory()
expectedLog = "No container CIDR specified in user config"
self.assertEqual(str(context.exception), expectedLog)
def test_provider_networks_check(self):
# create config file without provider networks
self.delete_config_key(self.user_defined_config, 'provider_networks')
# check if provider networks absence is Caught
with self.assertRaises(SystemExit) as context:
get_inventory()
expectedLog = "provider networks can't be found under global_overrides"
self.assertIn(expectedLog, str(context.exception))
def test_global_overrides_check(self):
# create config file without global_overrides
self.delete_config_key(self.user_defined_config, 'global_overrides')
# check if global_overrides absence is Caught
with self.assertRaises(SystemExit) as context:
get_inventory()
expectedLog = "global_overrides can't be found in user config"
self.assertEqual(str(context.exception), expectedLog)
def test_two_hosts_same_ip(self):
# Use an OrderedDict to be certain our testing order is preserved
# Even with the same hash seed, different OSes get different results,
# eg. local OS X vs gate's Linux
config = collections.OrderedDict()
config['shared-infra_hosts'] = {
'host1': {
'ip': '192.168.1.1'
}
}
config['compute_hosts'] = {
'host2': {
'ip': '192.168.1.1'
}
}
with self.assertRaises(di.MultipleHostsWithOneIPError) as context:
di._check_same_ip_to_multiple_host(config)
self.assertEqual(context.exception.ip, '192.168.1.1')
self.assertEqual(context.exception.assigned_host, 'host1')
self.assertEqual(context.exception.new_host, 'host2')
def test_two_hosts_same_ip_externally(self):
self.set_new_hostname(self.user_defined_config, "haproxy_hosts",
"aio1", "hap")
with self.assertRaises(di.MultipleHostsWithOneIPError) as context:
get_inventory()
expectedLog = ("Both host:aio1 and host:hap have "
"address:172.29.236.100 assigned. Cannot "
"assign same ip to both hosts")
self.assertEqual(str(context.exception), expectedLog)
def test_one_host_two_ips_externally(self):
# haproxy chosen because it was last in the config file as of
# writing
self.set_new_ip(self.user_defined_config, 'haproxy_hosts', 'aio1',
'172.29.236.101')
with self.assertRaises(di.MultipleIpForHostError) as context:
get_inventory()
expectedLog = ("Host aio1 has both 172.29.236.100 and 172.29.236.101 "
"assigned")
self.assertEqual(str(context.exception), expectedLog)
def test_two_ips(self):
# Use an OrderedDict to be certain our testing order is preserved
# Even with the same hash seed, different OSes get different results,
# eg. local OS X vs gate's Linux
config = collections.OrderedDict()
config['shared-infra_hosts'] = {
'host1': {
'ip': '192.168.1.1'
}
}
config['compute_hosts'] = {
'host1': {
'ip': '192.168.1.2'
}
}
with self.assertRaises(di.MultipleIpForHostError) as context:
di._check_multiple_ips_to_host(config)
self.assertEqual(context.exception.current_ip, '192.168.1.1')
self.assertEqual(context.exception.new_ip, '192.168.1.2')
self.assertEqual(context.exception.hostname, 'host1')
def test_correct_hostname_ip_map(self):
config = {
'shared-infra_hosts': {
'host1': {
'ip': '192.168.1.1'
}
},
'compute_hosts': {
'host2': {
'ip': '192.168.1.2'
}
},
}
ret = di._check_multiple_ips_to_host(config)
self.assertTrue(ret)
class TestStaticRouteConfig(TestConfigCheckBase):
def setUp(self):
super(TestStaticRouteConfig, self).setUp()
self.expectedMsg = ("Static route provider network with queue "
"'container' needs both 'cidr' and 'gateway' "
"values.")
def add_static_route(self, q_name, route_dict):
"""Adds a static route to a provider network."""
pn = self.user_defined_config['global_overrides']['provider_networks']
for net in pn:
net_dict = net['network']
q = net_dict.get('ip_from_q', None)
if q == q_name:
net_dict['static_routes'] = [route_dict]
self.write_config()
def test_setting_static_route(self):
route_dict = {'cidr': '10.176.0.0/12',
'gateway': '172.29.248.1'}
self.add_static_route('container', route_dict)
inventory = get_inventory()
# Use aio1 and 'container_address' since they're known keys.
hostvars = inventory['_meta']['hostvars']['aio1']
cont_add = hostvars['container_networks']['container_address']
self.assertIn('static_routes', cont_add)
first_route = cont_add['static_routes'][0]
self.assertIn('cidr', first_route)
self.assertIn('gateway', first_route)
def test_setting_bad_static_route_only_cidr(self):
route_dict = {'cidr': '10.176.0.0/12'}
self.add_static_route('container', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
exception = context.exception
self.assertEqual(str(exception), self.expectedMsg)
def test_setting_bad_static_route_only_gateway(self):
route_dict = {'gateway': '172.29.248.1'}
self.add_static_route('container', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
exception = context.exception
self.assertEqual(exception.message, self.expectedMsg)
def test_setting_bad_gateway_value(self):
route_dict = {'cidr': '10.176.0.0/12',
'gateway': None}
self.add_static_route('container', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
exception = context.exception
self.assertEqual(exception.message, self.expectedMsg)
def test_setting_bad_cidr_value(self):
route_dict = {'cidr': None,
'gateway': '172.29.248.1'}
self.add_static_route('container', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
exception = context.exception
self.assertEqual(exception.message, self.expectedMsg)
def test_setting_bad_cidr_gateway_value(self):
route_dict = {'cidr': None,
'gateway': None}
self.add_static_route('container', route_dict)
with self.assertRaises(di.MissingStaticRouteInfo) as context:
get_inventory()
exception = context.exception
self.assertEqual(exception.message, self.expectedMsg)
class TestGlobalOverridesConfigDeletion(TestConfigCheckBase):
def setUp(self):
super(TestGlobalOverridesConfigDeletion, self).setUp()
self.inventory = get_inventory()
def add_global_override(self, var_name, var_value):
"""Adds an arbitrary name and value to the global_overrides dict."""
overrides = self.user_defined_config['global_overrides']
overrides[var_name] = var_value
def remove_global_override(self, var_name):
"""Removes target key from the global_overrides dict."""
overrides = self.user_defined_config['global_overrides']
del overrides[var_name]
def test_global_overrides_delete_when_merge(self):
"""Vars removed from global overrides are removed from inventory"""
self.add_global_override('foo', 'bar')
di._parse_global_variables({}, self.inventory,
self.user_defined_config)
self.remove_global_override('foo')
di._parse_global_variables({}, self.inventory,
self.user_defined_config)
self.assertNotIn('foo', self.inventory['all']['vars'],
"foo var not removed from group_vars_all")
def test_global_overrides_merge(self):
self.add_global_override('foo', 'bar')
di._parse_global_variables({}, self.inventory,
self.user_defined_config)
self.assertEqual('bar', self.inventory['all']['vars']['foo'])
def test_container_cidr_key_retained(self):
user_cidr = self.user_defined_config['cidr_networks']['container']
di._parse_global_variables(user_cidr, self.inventory,
self.user_defined_config)
self.assertIn('container_cidr', self.inventory['all']['vars'])
self.assertEqual(self.inventory['all']['vars']['container_cidr'],
user_cidr)
def test_only_old_vars_deleted(self):
self.inventory['all']['vars']['foo'] = 'bar'
di._parse_global_variables('', self.inventory,
self.user_defined_config)
self.assertNotIn('foo', self.inventory['all']['vars'])
def test_empty_vars(self):
del self.inventory['all']
di._parse_global_variables('', self.inventory,
self.user_defined_config)
self.assertIn('container_cidr', self.inventory['all']['vars'])
for key in self.user_defined_config['global_overrides']:
self.assertIn(key, self.inventory['all']['vars'])
class TestEnsureInventoryUptoDate(unittest.TestCase):
def setUp(self):
self.env = fs.load_environment(BASE_ENV_DIR, {})
# Copy because we manipulate the structure in each test;
# not copying would modify the global var in the target code
self.inv = copy.deepcopy(di.INVENTORY_SKEL)
# Since we're not running skel_setup, add necessary keys
self.host_vars = self.inv['_meta']['hostvars']
# The _ensure_inventory_uptodate function depends on values inserted
# by the skel_setup function
di.skel_setup(self.env, self.inv)
def test_missing_required_host_vars(self):
self.host_vars['host1'] = {}
di._ensure_inventory_uptodate(self.inv, self.env['container_skel'])
for required_key in di.REQUIRED_HOSTVARS:
self.assertIn(required_key, self.host_vars['host1'])
def test_missing_container_name(self):
self.host_vars['host1'] = {}
di._ensure_inventory_uptodate(self.inv, self.env['container_skel'])
self.assertIn('container_name', self.host_vars['host1'])
self.assertEqual(self.host_vars['host1']['container_name'], 'host1')
def test_inserting_container_networks_is_dict(self):
self.host_vars['host1'] = {}
di._ensure_inventory_uptodate(self.inv, self.env['container_skel'])
self.assertIsInstance(self.host_vars['host1']['container_networks'],
dict)
def test_populating_inventory_info(self):
skel = self.env['container_skel']
di._ensure_inventory_uptodate(self.inv, skel)
for container_type, type_vars in skel.items():
hosts = self.inv[container_type]['hosts']
if hosts:
for host in hosts:
host_var_entries = self.inv['_meta']['hostvars'][host]
if 'properties' in type_vars:
self.assertEqual(host_var_entries['properties'],
type_vars['properties'])
def tearDown(self):
self.env = None
self.host_vars = None
self.inv = None
class OverridingEnvBase(unittest.TestCase):
def setUp(self):
self.base_env = fs.load_environment(BASE_ENV_DIR, {})
# Use the cinder configuration as our sample for override testing
with open(path.join(BASE_ENV_DIR, 'env.d', 'cinder.yml'), 'r') as f:
self.cinder_config = yaml.safe_load(f.read())
self.override_path = path.join(TARGET_DIR, 'env.d')
os.mkdir(self.override_path)
def write_override_env(self):
with open(path.join(self.override_path, 'cinder.yml'), 'w') as f:
f.write(yaml.safe_dump(self.cinder_config))
def tearDown(self):
os.remove(path.join(self.override_path, 'cinder.yml'))
os.rmdir(self.override_path)
class TestOverridingEnvVars(OverridingEnvBase):
def test_cinder_metal_override(self):
vol = self.cinder_config['container_skel']['cinder_volumes_container']
vol['properties']['is_metal'] = False
self.write_override_env()
fs.load_environment(TARGET_DIR, self.base_env)
test_vol = self.base_env['container_skel']['cinder_volumes_container']
self.assertFalse(test_vol['properties']['is_metal'])
def test_deleting_elements(self):
# Leave only the 'properties' dictionary attached to simulate writing
# a partial override file
vol = self.cinder_config['container_skel']['cinder_volumes_container']
keys = vol.keys()
to_delete = []
for key in vol.keys():
if not key == 'properties':
to_delete.append(key)
for key in to_delete:
del vol[key]
self.write_override_env()
fs.load_environment(TARGET_DIR, self.base_env)
test_vol = self.base_env['container_skel']['cinder_volumes_container']
self.assertIn('belongs_to', test_vol)
def test_adding_new_keys(self):
vol = self.cinder_config['container_skel']['cinder_volumes_container']
vol['a_new_key'] = 'Added'
self.write_override_env()
fs.load_environment(TARGET_DIR, self.base_env)
test_vol = self.base_env['container_skel']['cinder_volumes_container']
self.assertIn('a_new_key', test_vol)
self.assertEqual(test_vol['a_new_key'], 'Added')
def test_emptying_dictionaries(self):
self.cinder_config['container_skel']['cinder_volumes_container'] = {}
self.write_override_env()
fs.load_environment(TARGET_DIR, self.base_env)
test_vol = self.base_env['container_skel']['cinder_volumes_container']
self.assertNotIn('belongs_to', test_vol)
def test_emptying_lists(self):
vol = self.cinder_config['container_skel']['cinder_volumes_container']
vol['belongs_to'] = []
self.write_override_env()
fs.load_environment(TARGET_DIR, self.base_env)
test_vol = self.base_env['container_skel']['cinder_volumes_container']
self.assertEqual(test_vol['belongs_to'], [])
class TestOverridingEnvIntegration(OverridingEnvBase):
def setUp(self):
super(TestOverridingEnvIntegration, self).setUp()
self.user_defined_config = get_config()
# Inventory is necessary since keys are assumed present
self.inv, path = fs.load_inventory(TARGET_DIR, di.INVENTORY_SKEL)
def skel_setup(self):
self.environment = fs.load_environment(TARGET_DIR, self.base_env)
di.skel_setup(self.environment, self.inv)
di.skel_load(
self.environment.get('physical_skel'),
self.inv
)
def test_emptying_container_integration(self):
self.cinder_config = {}
self.cinder_config['container_skel'] = {'cinder_volumes_container': {}}
self.write_override_env()
self.skel_setup()
di.container_skel_load(
self.environment.get('container_skel'),
self.inv,
self.user_defined_config
)
test_vol = self.base_env['container_skel']['cinder_volumes_container']
self.assertNotIn('belongs_to', test_vol)
self.assertNotIn('contains', test_vol)
def test_empty_contains(self):
vol = self.cinder_config['container_skel']['cinder_volumes_container']
vol['contains'] = []
self.write_override_env()
self.skel_setup()
di.container_skel_load(
self.environment.get('container_skel'),
self.inv,
self.user_defined_config
)
test_vol = self.base_env['container_skel']['cinder_volumes_container']
self.assertEqual(test_vol['contains'], [])
def test_empty_belongs_to(self):
vol = self.cinder_config['container_skel']['cinder_volumes_container']
vol['belongs_to'] = []
self.write_override_env()
self.skel_setup()
di.container_skel_load(
self.environment.get('container_skel'),
self.inv,
self.user_defined_config
)
test_vol = self.base_env['container_skel']['cinder_volumes_container']
self.assertEqual(test_vol['belongs_to'], [])
def tearDown(self):
super(TestOverridingEnvIntegration, self).tearDown()
self.user_defined_config = None
self.inv = None
class TestSetUsedIPS(unittest.TestCase):
def setUp(self):
# Clean up the used ips in case other tests didn't.
di.ip.USED_IPS = set()
# Create a fake inventory just for this test.
self.inventory = {'_meta': {'hostvars': {
'host1': {'container_networks': {
'net': {'address': '172.12.1.1'}
}},
'host2': {'container_networks': {
'net': {'address': '172.12.1.2'}
}},
}}}
def test_adding_inventory_used_ips(self):
config = {'used_ips': None}
# TODO(nrb): This is a smell, needs to set more directly
di.ip.set_used_ips(config, self.inventory)
self.assertEqual(len(di.ip.USED_IPS), 2)
self.assertIn('172.12.1.1', di.ip.USED_IPS)
self.assertIn('172.12.1.2', di.ip.USED_IPS)
def tearDown(self):
di.ip.USED_IPS = set()
class TestConfigCheckFunctional(TestConfigCheckBase):
def duplicate_ip(self):
ip = self.user_defined_config['log_hosts']['aio1']
self.user_defined_config['log_hosts']['bogus'] = ip
def test_checking_good_config(self):
output = di.main(config=TARGET_DIR, check=True,
environment=BASE_ENV_DIR)
self.assertEqual(output, 'Configuration ok!')
def test_duplicated_ip(self):
self.duplicate_ip()
self.write_config()
with self.assertRaises(di.MultipleHostsWithOneIPError) as context:
di.main(config=TARGET_DIR, check=True, environment=BASE_ENV_DIR)
self.assertEqual(context.exception.ip, '172.29.236.100')
class TestNetworkEntry(unittest.TestCase):
def test_all_args_filled(self):
entry = di.network_entry(True, 'eth1', 'br-mgmt', 'my_type', '1700')
self.assertNotIn('interface', entry.keys())
self.assertEqual(entry['bridge'], 'br-mgmt')
self.assertEqual(entry['type'], 'my_type')
self.assertEqual(entry['mtu'], '1700')
def test_container_dict(self):
entry = di.network_entry(False, 'eth1', 'br-mgmt', 'my_type', '1700')
self.assertEqual(entry['interface'], 'eth1')
class TestDebugLogging(unittest.TestCase):
@mock.patch('osa_toolkit.generate.logging')
@mock.patch('osa_toolkit.generate.logger')
def test_logging_enabled(self, mock_logger, mock_logging):
# Shadow the real value so tests don't complain about it
mock_logging.DEBUG = 10
get_inventory(extra_args={"debug": True})
self.assertTrue(mock_logging.basicConfig.called)
self.assertTrue(mock_logger.info.called)
self.assertTrue(mock_logger.debug.called)
@mock.patch('osa_toolkit.generate.logging')
@mock.patch('osa_toolkit.generate.logger')
def test_logging_disabled(self, mock_logger, mock_logging):
get_inventory(extra_args={"debug": False})
self.assertFalse(mock_logging.basicConfig.called)
# Even though logging is disabled, we still call these
# all over the place; they just choose not to do anything.
# NOTE: No info messages are published when debug is False
self.assertTrue(mock_logger.debug.called)
class TestLxcHosts(TestConfigCheckBase):
def test_lxc_hosts_group_present(self):
inventory = get_inventory()
self.assertIn('lxc_hosts', inventory)
def test_lxc_hosts_only_inserted_once(self):
inventory = get_inventory()
self.assertEqual(1, len(inventory['lxc_hosts']['hosts']))
def test_lxc_hosts_members(self):
self.add_host('shared-infra_hosts', 'aio2', '172.29.236.101')
inventory = get_inventory()
self.assertIn('aio2', inventory['lxc_hosts']['hosts'])
self.assertIn('aio1', inventory['lxc_hosts']['hosts'])
def test_lxc_hosts_in_config_raises_error(self):
self.add_config_key('lxc_hosts', {})
with self.assertRaises(di.LxcHostsDefined):
get_inventory()
def test_host_without_containers(self):
self.add_host('compute_hosts', 'compute1', '172.29.236.102')
inventory = get_inventory()
self.assertNotIn('compute1', inventory['lxc_hosts']['hosts'])
def test_cleaning_bad_hosts(self):
self.add_host('compute_hosts', 'compute1', '172.29.236.102')
inventory = get_inventory()
# insert compute1 into lxc_hosts, which mimicks bug behavior
inventory['lxc_hosts']['hosts'].append('compute1')
faked_path = INV_DIR
with mock.patch('osa_toolkit.filesystem.load_inventory') as inv_mock:
inv_mock.return_value = (inventory, faked_path)
new_inventory = get_inventory()
# host should no longer be in lxc_hosts
self.assertNotIn('compute1', new_inventory['lxc_hosts']['hosts'])
def test_emptying_lxc_hosts(self):
"""If lxc_hosts is deleted between runs, it should re-populate"""
inventory = get_inventory()
original_lxc_hosts = inventory.pop('lxc_hosts')
self.assertNotIn('lxc_hosts', inventory.keys())
faked_path = INV_DIR
with mock.patch('osa_toolkit.filesystem.load_inventory') as inv_mock:
inv_mock.return_value = (inventory, faked_path)
new_inventory = get_inventory()
self.assertEqual(original_lxc_hosts, new_inventory['lxc_hosts'])
class TestConfigMatchesEnvironment(unittest.TestCase):
def setUp(self):
self.env = fs.load_environment(BASE_ENV_DIR, {})
def test_matching_keys(self):
config = get_config()
result = di._check_all_conf_groups_present(config, self.env)
self.assertTrue(result)
def test_failed_match(self):
bad_config = get_config()
bad_config['bogus_key'] = []
result = di._check_all_conf_groups_present(bad_config, self.env)
self.assertFalse(result)
def test_extra_config_key_warning(self):
bad_config = get_config()
bad_config['bogus_key'] = []
with warnings.catch_warnings(record=True) as wl:
di._check_all_conf_groups_present(bad_config, self.env)
self.assertEqual(1, len(wl))
self.assertIn('bogus_key', str(wl[0].message))
def test_multiple_extra_keys(self):
bad_config = get_config()
bad_config['bogus_key1'] = []
bad_config['bogus_key2'] = []
with warnings.catch_warnings(record=True) as wl:
di._check_all_conf_groups_present(bad_config, self.env)
self.assertEqual(2, len(wl))
warn_msgs = [str(warn.message) for warn in wl]
warn_msgs.sort()
self.assertIn('bogus_key1', warn_msgs[0])
self.assertIn('bogus_key2', warn_msgs[1])
def test_confirm_exclusions(self):
"""Ensure the excluded keys in the function are present."""
config = get_config()
excluded_keys = ('global_overrides', 'cidr_networks', 'used_ips')
for key in excluded_keys:
config[key] = 'sentinel value'
with warnings.catch_warnings(record=True) as wl:
di._check_all_conf_groups_present(config, self.env)
self.assertEqual(0, len(wl))
for key in excluded_keys:
self.assertIn(key, config.keys())
class TestInventoryGroupConstraints(unittest.TestCase):
def setUp(self):
self.env = fs.load_environment(BASE_ENV_DIR, {})
def test_group_with_hosts_dont_have_children(self):
"""Require that groups have children groups or hosts, not both."""
inventory = get_inventory()
# This should only work on groups, but stuff like '_meta' and 'all'
# are in here, too.
for key, values in inventory.items():
# The keys for children/hosts can exist, the important part is being empty lists.
has_children = bool(inventory.get('children'))
has_hosts = bool(inventory.get('hosts'))
self.assertFalse(has_children and has_hosts)
def _create_bad_env(self, env):
# This environment setup is used because it was reported with
# bug #1646136
override = """
physical_skel:
local-compute_containers:
belongs_to:
- compute_containers
local-compute_hosts:
belongs_to:
- compute_hosts
rbd-compute_containers:
belongs_to:
- compute_containers
rbd-compute_hosts:
belongs_to:
- compute_hosts
"""
bad_env = yaml.safe_load(override)
# This is essentially what load_environment does, after all the file
# system walking
dictutils.merge_dict(env, bad_env)
return env
def test_group_with_hosts_and_children_fails(self):
"""Integration test making sure the whole script fails."""
env = self._create_bad_env(self.env)
config = get_config()
kwargs = {
'load_environment': mock.DEFAULT,
'load_user_configuration': mock.DEFAULT
}
with mock.patch.multiple('osa_toolkit.filesystem', **kwargs) as mocks:
mocks['load_environment'].return_value = env
mocks['load_user_configuration'].return_value = config
with self.assertRaises(di.GroupConflict) as context:
get_inventory()
def test_group_validation_unit(self):
env = self._create_bad_env(self.env)
config = get_config()
with self.assertRaises(di.GroupConflict):
di._check_group_branches(config, env['physical_skel'])
def test_group_validation_no_config(self):
result = di._check_group_branches(None, self.env)
self.assertTrue(result)
def test_group_validation_passes_defaults(self):
config = get_config()
result = di._check_group_branches(config, self.env['physical_skel'])
self.assertTrue(result)
class TestL3ProviderNetworkConfig(TestConfigCheckBase):
def setUp(self):
super(TestL3ProviderNetworkConfig, self).setUp()
self.delete_provider_network('container')
self.add_provider_network('pod1_container', '172.29.236.0/22')
self.add_provider_network_key('container', 'ip_from_q',
'pod1_container')
self.add_provider_network_key('pod1_container', 'address_prefix',
'management')
self.add_provider_network_key('pod1_container', 'reference_group',
'pod1_hosts')
self.add_config_key('pod1_hosts', {})
self.add_host('pod1_hosts', 'aio2', '172.29.236.101')
self.add_host('compute_hosts', 'aio2', '172.29.236.101')
self.write_config()
self.inventory = get_inventory()
def test_address_prefix_name_applied(self):
aio2_host_vars = self.inventory['_meta']['hostvars']['aio2']
aio2_container_networks = aio2_host_vars['container_networks']
self.assertIsInstance(aio2_container_networks['management_address'],
dict)
def test_host_outside_reference_group_excluded(self):
aio1_host_vars = self.inventory['_meta']['hostvars']['aio1']
aio1_container_networks = aio1_host_vars['container_networks']
self.assertNotIn('management_address', aio1_container_networks)
if __name__ == '__main__':
unittest.main(catchbreak=True)
|
|
#
# Protein Engineering Analysis Tool Structure Analysis (PEATSA)
# Copyright (C) 2010 Michael Johnston & Jens Erik Nielsen
#
# Author: Michael Johnston
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
'''Contains classes and functions for processing a submission from the WebApp's main web page'''
import subprocess, os, urlparse, urllib2, string, time, StringIO, sys
import PEATSA.Core as Core
import UtilityFunctions, Exceptions, Data
def GetPDB(pdbCode, dict={}):
'''Fetches a pdb from the pdb. Returns a stream to the pdb contents.
If dict is provided it contains the key 'stream' on success whose value is the stream.
On fail it contains two keys 'error' and 'description'''
url = "http://www.rcsb.org//pdb/download/downloadFile.do?fileFormat=pdb&compression=NO&structureId=" + pdbCode
try:
#URLLib API changed in 2.6
if sys.version_info[:2] == (2,5):
import socket
socket.setdefaulttimeout(10)
stream = urllib2.urlopen(url)
else:
stream = urllib2.urlopen(url, None, timeout=10)
#Check for an error
info = stream.info()
status = info.status
if status is not "":
stream = None
dict['error'] = 'Error status %s' % str(status)
elif not info.has_key('content-disposition'):
stream = None
dict['error'] = 'Request for %s returned nothing' % pdbCode
else:
lines = stream.readlines()
string = "".join(lines)
stream = StringIO.StringIO(string)
dict['stream'] = stream
except urllib2.HTTPError, data:
dict['error'] = 'Unable to retrive structure for pdb code %s from the Protein Data Bank. ' % pdbCode
dict['description'] = 'Reason: %s' % data
stream = None
except urllib2.URLError, data:
dict['error'] = 'Unable to retrive structure for pdb code %s from the Protein Data Bank. ' % pdbCode
dict['description'] = 'Reason: %s' % data.reason
stream = None
except Exception, data:
dict['error'] = 'Unable to retrive structure for pdb code %s from the Protein Data Bank. ' % pdbCode
dict['description'] = 'Reason: Encountered unexpected exception %s' % data
stream = None
return stream
def CreateMutationString(mutationData, webStorageDirectory, jobInputDirectory, job):
'''Creates the mutation part of the command line arguments based on the mutationData dictionary'''
mutationType = mutationData['Type']
mutationString = "--%s=%s"
if mutationType == 'mutationList':
#Write out the file to the web storage directory
filename = os.path.join(webStorageDirectory, 'mutationList')
stream = open(filename, 'w+')
stream.write(mutationData['Data'].read())
try:
#Fixme - catch mutation list exception
stream.seek(0)
mutationList = Core.Data.mutationListFileFromStream(stream)
job.setMutationListFile(mutationList)
stream.close()
except:
pass
#Create the string
#Note we must give the path to the mutationFile as it will
#be when the job is run
filename = os.path.join(jobInputDirectory, 'mutationList')
mutationString = mutationString % (mutationType, filename)
elif mutationType == 'mutation':
mutationString = mutationString % (mutationType, mutationData['Data'])
job.setMutation(mutationData['Data'])
return mutationString
def CreateCalculationString(calculations, ligandFile):
'''Creates the calculation parts of the command line arguments bassed on the calculations list'''
calculationString = ""
if calculations.count('scan') != 0:
calculationString = calculationString + "--scan "
if calculations.count('stability') != 0:
calculationString = calculationString + " --stability"
if ligandFile is not None and calculations.count('binding') != 0:
calculationString = calculationString + " --ligand=%s" % ligandFile
return calculationString
def ConstructErrorURL(domain="PDT.UnknownDomain",
description="Unexpected Error",
detailedDescription="Unknown error while processing job",
recoverySuggestion="Please contact the developers with details of what you were doing"):
'''Constructs an URL for the WebApps error page
Automatically determines the server name and port from os.envrion
Parameters
domain - The domain of the error - Indicates what part of the program failed
description - A short string which very briefly describes what happened
detailedDescription - A longer string elaborating on the error
recoverySuggestion - A string explaining what to do, if known
Return
A string containing an URL'''
#Add error data as GET data - Possibly a better way?
#e.g. generate html here?
dataDict = {"domain":domain,
"description":description,
"detailedDescription":detailedDescription,
"recoverySuggestion":recoverySuggestion}
data = ["%s=%s" % (key, dataDict[key]) for key in dataDict.keys()]
query = "&".join(data)
location = os.environ['SERVER_NAME'] + ":" + os.environ['SERVER_PORT']
components = ("http", location, "PEATSA/Pages/Error.php", "", query, "")
return urlparse.urlunparse(components)
def ProcessPDBFilename(filename):
'''Processes the input string to be of the form "PDBID.pdb"
This is done in the following way:
- filename is split into base + extension
- Trailing whitespace and underlines are removed
- All punctuation (except for underlines) is removed
- All spaces are replaces with underlines
- The base is lower-cased and appended with .pdb
If the file does not have the '.pdb' extension (in any mixture of cases)
an empty string is returned'''
filename = os.path.basename(filename)
extension = os.path.splitext(filename)[1]
if extension.lower() != ".pdb":
return ""
pdbId = os.path.splitext(filename)[0]
#Strip stuff
pdbId = pdbId.strip()
pdbId = pdbId.strip("_")
#Replace underlines with spaces
#This is so these aren't removed in the next step
pdbId = pdbId.replace("_", " ")
#Remove all punctuation characters
for character in string.punctuation:
pdbId = pdbId.replace(character, "")
#Put the underlines back - this also replaces any
#preexisting spaces with underlines
pdbId = pdbId.replace(" ", "_")
pdbId = pdbId.lower()
return pdbId + '.pdb'
class JobConstructor:
'''Setup a WebApp job using data submitted by the user.
This includes creating output directories, placing files in the
correct location, and forming the command line to be executed.
Many of the options controlling the JobConstructor instances behaviour
are given in the WebApps configuration file.
Principal attributes
- options - The web app options, read from the configuration file
- job - A WebApp.Data.Job instance representing the job
- connection - A connection to the webapp db
- runString - The command line for the job'''
def __init__(self, formData, construct=False):
'''Constructor
Parameters:
formData - A FormData instance
construct - If True this method calls construct immediately,
otherwise construct must be called at a later stage.
This allows parameters to be modified.'''
self.formData = formData
self.runString = None
self.errorData = None
self.job = None
self.jobManager = None
try:
self.options = UtilityFunctions.DefaultWebAppConfiguration()
self.connection = UtilityFunctions.ConnectionFromConfiguration(self.options)
self.webServerDirectory = self.options.get('WEB APPLICATION', 'webServerDirectory')
self.uploadLimit = int(self.options.get('WEB APPLICATION', 'uploadLimit'))
except Core.Exceptions.EnvironmentError, data:
self._setErrorData(data)
return
#Check if the uploaded files exceed the uploadLimit
if self._checkUploadedFiles():
#Connect to the db
self.jobManager = Data.JobManager(self.connection)
if construct is True:
self.construct()
def __del__(self):
self.connection.close()
def __str__(self):
if self.runString != None:
return "JobConstructor - Job not created yet"
else:
return "JobConstructor - Run string:\n\t%s" % self.runString
def _checkUploadedFiles(self):
'''Checks all the uploaded files to see if they are within limits'''
#If a PKA code wasn't provided a
#pdb file must be present - check the file size
if not self.formData.isPKACodePresent():
content = self.formData.pdbFileStream().read()
if len(content) > self.uploadLimit:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error with pdb.",
"detailedDescription":"Filesize exceeds size limit (%.2lfMB)" % (self.uploadLimit/(1024.0*1024.0)),
"recoverySuggestion":"Unfortunately we only can accept files under this limit."}
if len(content) == 0:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error with pdb.",
"detailedDescription":"No data in file",
"recoverySuggestion":"Check that the correct file was provided."}
if self.errorData is not None and self.formData.isLigandPresent():
content = self.formData.ligandFileStream().read()
if len(content) > self.uploadLimit:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error with uploaded ligand.",
"detailedDescription":"Filesize exceeds upload limit (%.2lfMB)" % (self.uploadLimit/(1024.0*1024.0)),
"recoverySuggestion":"Unfortunately we only can accept files under this limit."}
if self.errorData is not None and self.formData.isMutationList():
content = self.formData. mutationListFileStream().read()
if len(content) > self.uploadLimit:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error with uploaded ligand.",
"detailedDescription":"Filesize exceeds upload limit (%.2lfMB)" % (self.uploadLimit/(1024.0*1024.0)),
"recoverySuggestion":"Unfortunately we only can accept files under this limit."}
if self.errorData is not None:
return False
else:
return True
def _setErrorData(self, data):
'''Convenience method for creating errorData due to a configuration error.
The exact reason for the error is provided by data.'''
self.errorData = {"domain":"PDT.ConfigurationDomain",
"description":"Error initialising job submission environment.",
"detailedDescription":data,
"recoverySuggestion":"This is a bug - please contact the developers."}
def _writeStringToFile(self, string, filename):
'''Convenience method for writing to a file'''
stream = open(filename, "w+")
stream.write(string)
stream.close()
def construct(self):
'''Performs all necessary steps for setting up a WebApp job based on the data submitted by a user via the WebApp main page.
This basically involves three steps.
- Creation of a entry for the Job in the WebApp database
- Outputing the job files uploaded by the user to correct locations
- Assembing the command line string that will be exectuted when runBackend() is called.
Check the result of errorData() to see if there were any problems with construction'''
#For each job run a instance of the Job class is created using the JobManager object.
#This creates the necessary entries in the MySQL database.
#The Job instance contains info on the job and allows the data stored for the job to be modified.
self.job = self.jobManager.createJob(self.formData.pdbId(), self.formData.calculations())
#Create the input/output directory names
try:
jobOutputDirectory = self.options.get('WEB APPLICATION', 'jobOutputDirectory')
jobInputDirectory = self.options.get('WEB APPLICATION', 'jobInputDirectory')
pKaDataDirectory = self.options.get('WEB APPLICATION', 'pKaDataDirectory')
backendExecutable = self.options.get('WEB APPLICATION', 'launchScript')
except Core.Exceptions.EnvironmentError, data:
self._setErrorData(data)
return
#Get the various IO directories that will be used by the job
#If the job is to be run on the local host then jobInputDirectory must
#be the same as webServerDirectory
webStorageDir = os.path.join(self.webServerDirectory, self.job.identification)
outputDir = os.path.join(jobOutputDirectory, self.job.identification + '_Out')
if self.formData.isPKACodePresent():
workingDir = os.path.join(pKaDataDirectory, self.formData.pKaCode())
else:
workingDir = os.path.join(jobOutputDirectory, self.job.identification + '_Work')
inputDir = os.path.join(jobInputDirectory, self.job.identification + '_Out')
os.mkdir(webStorageDir)
#If this is not a delta pKa calculation we have to write
#Otherwise write the uploaded/download one to a file
if not self.formData.isPKACodePresent():
#To be deprecated
filename = os.path.join(webStorageDir, '%s.pdb' % self.formData.pdbId())
stream = self.formData.pdbFileStream()
self._writeStringToFile(stream.read(), filename)
pdbFilename = self.formData.pdbFilename()
#Add structure to db
stream.seek(0)
self.job.setStructure(stream.read())
#Check the provided pdb
error = False
try:
structure = self.job.protoolStructure()
except Exceptions.FileFormatError, data:
error = True
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in with submission.",
"detailedDescription": "There is an error in the format of the pdb file",
"recoverySuggestion": 'Check the file to ensure its format is correct'}
self.job.setError(description=self.errorData['description'],
detailedDescription=self.errorData['detailedDescription'])
self.job.setState('Finished')
if error is True:
return
if structure.hasMissingMainChainAtoms():
missing = structure.hasMissingMainChainAtoms()
missing = ", ".join(missing)
suggestion = "The residues with missing atoms are: %s.<br>" % missing
suggestion = suggestion + "PEAT-SA requires that all main-chain heavy atoms are present in the structure.<br>"
suggestion = suggestion + "You could try submitting a fragment of the structure that meets this requirement."
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted pdb structure.",
"detailedDescription":"The supplied structure is missing main-chain heavy atoms",
"recoverySuggestion": suggestion}
self.job.setError(description=self.errorData['description'],
detailedDescription=self.errorData['detailedDescription'])
self.job.setState('Finished')
return
elif structure.hasChainBreak():
suggestion = "PEAT-SA requires that all chains in submitted structures are complete.<br>"
suggestion = suggestion + "You could try submitting a fragment of the structure that meets this requirement."
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted pdb structure.",
"detailedDescription":"The supplied structure contains at least one chain-break.",
"recoverySuggestion": suggestion}
self.job.setError(description=self.errorData['description'],
detailedDescription=self.errorData['detailedDescription'])
self.job.setState('Finished')
return
else:
pdbFilename = os.path.join(workingDir, self.formData.pdbFilename())
if self.formData.isLigandPresent():
filename = os.path.join(webStorageDir, '%s' % self.formData.ligandFilename())
stream = self.formData.ligandFileStream()
self._writeStringToFile(stream.read(), filename)
stream.seek(0)
self.job.setLigand(stream.read())
#Add email address - This could be just 'Unknown' if none was provided
self.job.setEmail(self.formData.email())
#Create the mutation string.
#This also writes out the mutation file if neccessary
mutationString = CreateMutationString(self.formData.mutationData, webStorageDir, inputDir, self.job)
#if the mutationData is a mutation-list check their are actually some mutations in it
if self.job.isMutationList():
mutationListFile = self.job.mutationListFile()
if mutationListFile.numberOfMutants() == 0:
suggestion = "Return to the submission page and enter some mutation codes in the text-box or upload a file"
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted mutation list.",
"detailedDescription":"The supplied mutation list contains no mutation codes.",
"recoverySuggestion": suggestion}
self.job.setError(description=self.errorData['description'],
detailedDescription=self.errorData['detailedDescription'])
self.job.setState('Finished')
return
calculationString = CreateCalculationString(self.formData.calculations(), self.formData.ligandFilename())
#Create the run string
self.runString = "%s -p %s -w %s -o %s -j %s -v %s %s" % (backendExecutable, os.path.join(inputDir, pdbFilename),
workingDir, outputDir, self.job.identification,
calculationString, mutationString)
if self.formData.isIonisableGroupsPresent():
self.runString += " --ionisableGroups=%s" % self.formData.ionisableGroups()
def runBackend(self):
'''Executes the command string for the job via Popen
Returns:
An URL which specifies a page giving information about the Job
or None if construct() has not been called.'''
if self.runString == None:
return
try:
#Put selected calculations into state Queued regardless of whether this following works or not.
#This avoid a possible a race condition if the backed is launched quickly between who
#modified the jobs state first
states = self.job.calculationStates()
for calculation in states.keys():
if states[calculation] != "NotSelected":
self.job.setCalculationState(calculation, "Queued")
#Start the job running
#FIXME - Wont really be able to use files for a log
standardOut = open(os.path.join(self.webServerDirectory, "PDTWebRuns.log"), "a")
standardOut.write("\n----------------------------------------------------------\n")
standardOut.write("\nRunning Job %s\nDate %s\nWebScript command line %s\n\n" % (self.job.identification, self.job.date, self.runString))
standardOut.flush()
standardError = open(os.path.join(self.webServerDirectory, "PDTWebErrors.log"), "a")
process = subprocess.Popen(self.runString, shell=True, stdout=standardOut, stderr=standardError)
#Wait until the job is running
time.sleep(1.0)
process.poll()
if process.returncode != None and process.returncode != 0:
string = "Unable to launch job - launch script exited with error %d" % process.returncode
standardError.write(string)
raise Exceptions.SubmissionException, string
standardOut.close()
standardError.close()
#Constrtuct the url for the processing job page
#Pass the information on what is being calculated on aswell
#The elements here are scheme, location, hierarchical path, parameters, query, fragment
location = os.environ['SERVER_NAME'] + ":" + os.environ['SERVER_PORT']
components = ("http", location, "PEATSA/Pages/Results.php", "", "jobId=%s" % self.job.identification, "")
resultURL = urlparse.urlunparse(components)
except BaseException, data:
if hasattr(data, "child_traceback"):
errorString = "Exception %s. \n Child traceback %s" % (data, data.child_traceback)
else:
errorString = "Exception - %s" % data
#Delete job information from the db if it exists
if self.job is not None:
self.jobManager.deleteJob(self.job)
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error when attempting to run job.",
"detailedDescription":errorString,
"recoverySuggestion":"This is a bug - please contact the developers."}
resultURL = self.errorURL()
return resultURL
def errorURL(self):
'''Returns an URL for the WebApp error page if there was an error with the form data.
On loading this URL the user will be presented with information regarding what went wrong.
If there was no problem this methods return None'''
if self.errorData is None:
return None
return ConstructErrorURL(domain=self.errorData["domain"],
description=self.errorData["description"],
detailedDescription=self.errorData["detailedDescription"],
recoverySuggestion=self.errorData["recoverySuggestion"])
def error(self):
'''See FormData.errorURL docs for information'''
return self.errorData
class FormData:
'''Class representing the form data submitted from the WebApp main page'''
def __init__(self, formData):
'''Initialises the Data class.
formData must be an instance of the cgi.FieldStorage class'''
self.errorData = None
self.formData = formData
self.pdbStream = None
self._processMutationData()
self._checkSubmittedData()
if self.errorData is None:
self._setPDBStream()
def _setPDBStream(self):
'''Assigns a stream to the pdb data to the pdbStream ivar.
If the stream cannot be created it sets an error.
Note, a stream is only created if a delta-pKa calculation is not requested.
In this case the pdb file to be used is already available'''
if self.isPDBFilePresent():
self.pdbStream = self.formData["pdbFile"].file
self.pdbStream.seek(0)
elif self.isPDBCodePresent():
data = {}
self.pdbStream = GetPDB(self.pdbId(), dict=data)
if data.has_key('error'):
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":data['error'],
"detailedDescription": data['description'],
"recoverySuggestion":"Check the supplied code is valid"}
else:
self.pdbStream.seek(0)
def _checkSubmittedData(self):
'''Performs a series of checks on the submitted data'''
if not self.isPDBFilePresent() and not (self.isPDBCodePresent() or self.isPKACodePresent()):
#No pdb supplied - error
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"No PDB file was uploaded and no PDB code was provided. Hard to do a calculation on nothing!",
"recoverySuggestion":"Head back to the main page and upload a PDB or provide a PDB code."}
elif not self.isCalculationDataPresent():
#No calculation specified
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"At least one calculation type must be selected.",
"recoverySuggestion":"Head back to the main page and choose some calculations."}
elif not self.isMutationDataPresent():
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"The mutations to perform must be specified.",
"recoverySuggestion":"Head back to the main page and choose some mutations or upload a mutation file."}
elif self.calculations().count('binding') == 1 and not self.isLigandPresent():
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"Binding selected but no ligand provided.",
"recoverySuggestion":"Head back to the main page and upload a ligand."}
elif self.calculations().count('scan') == 1 and not self.isPKACodePresent():
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"pKa Scan selected but no pKa calculation code provided.",
"recoverySuggestion":"In order to perform a scan you must have previously completed a pKa calculation."}
elif self.calculations().count('scan') == 0 and self.isPKACodePresent():
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"pKa calculation code provided but pKa scan not selected.",
"recoverySuggestion":"Please select delta pKa option if you want to perform a scan."}
#If theres been an error at this stage return now
if self.errorData is not None:
return
#Check the submitted PDB filename
# In order to standardize the names of the directories
# (so each can be identified with a specific id) the following operations are required
# - The filename must be of the form PDBID.pdb
# - The PDBID must be all lowercase
# - The PDBID must not containing any punctuation marks except for underscores
# - No spaces allowed
if self.isPDBFilePresent():
self.standardizedPDBFilename = ProcessPDBFilename(self.formData["pdbFile"].filename)
elif self.isPDBCodePresent():
self.standardizedPDBFilename = ProcessPDBFilename(self.formData.getvalue("pdbCode") + ".pdb")
elif self.isPKACodePresent():
self.standardizedPDBFilename = self.pKaCode() + ".pdb"
#Check that after the processing pdbFilename is not just an extension
if self.standardizedPDBFilename == "" or self.standardizedPDBFilename[0] == ".":
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"The filename of the uploaded pdb is invalid.",
"recoverySuggestion":"Go back to the main page and check the naming guidelines for uploaded pdb files."}
#Check the ligand file extension is mol2 (if it exists).
if self.isLigandPresent():
components = os.path.splitext(self.ligandFilename())
if len(components) != 2:
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"The filename of the uploaded ligand is invalid (missing extension).",
"recoverySuggestion":"Go back to the main page and check the naming guidelines for uploaded ligand files."}
elif components[1].lower() != ".mol2":
self.errorData = {"domain":"PDT.SubmissionDomain",
"description":"Error in submitted form data.",
"detailedDescription":"The filename of the uploaded ligand is invalid - %s." % components[1],
"recoverySuggestion":"The filename extension must be mol2"}
def _processMutationData(self):
self.mutationData = {"Type":"Unknown", "Data":"Unknown"}
if self.isMutationDataPresent():
#Process the mutations
if self.isMutationList():
self.mutationData['Type']='mutationList'
#See if they uploaded a file or typed one in
if self.formData["mutationFile"].filename:
#The file element is a stream
self.mutationData['Data']=self.formData["mutationFile"].file
else:
list = self.formData.getvalue('mutationListArea')
#Create a file-like stream object for the list
self.mutationData['Data'] = StringIO.StringIO(list)
else:
#Must be Resiude Scan since otherwise we wouldn't be here
self.mutationData['Type']='mutation'
self.mutationData['Data']=self.formData["residue"].value
def error(self):
'''Returns a dictionary containing details on any errors with the form data
The dictionary has the following keys
- domain
- description
- detailedDescription
- recoverySuggestion
The method returns None if there is no problem with the form data'''
return self.errorData
def errorURL(self):
'''Returns an URL for the WebApp error page if there was an error with the form data.
On loading this URL the user will be presented with information regarding what went wrong.
If there was no problem this methods return None'''
if self.errorData is None:
return None
return ConstructErrorURL(domain=self.errorData["domain"],
description=self.errorData["description"],
detailedDescription=self.errorData["detailedDescription"],
recoverySuggestion=self.errorData["recoverySuggestion"])
def isPDBFilePresent(self):
'''Returns True if the form data contains a PDB file'''
pdbProvided = False
if self.formData.has_key("pdbFile"):
if self.formData["pdbFile"].filename != "":
pdbProvided = True;
return pdbProvided
def isLigandPresent(self):
'''Returns True if formData contains a PDB file'''
provided = False
if self.formData.has_key("ligandFile"):
if self.formData["ligandFile"].filename != "":
provided = True;
return provided
def isCodePresent(self):
'''Returns True if the code field contains text'''
codeProvided = False
if self.formData.getvalue("pdbCode") != "":
codeProvided = True
return codeProvided
def isPDBCodePresent(self):
'''Returns True if the form data contains a PDB code.
The form data contains a PDB code is their is an entry in the
code field and "Scan" is not selected'''
pdbProvided = False
if self.isCodePresent() and not self.isDeltaPKACalculationRequested():
pdbProvided = True
return pdbProvided
def isPKACodePresent(self):
'''Returns True of formData contain a pKa code
The form data contains a pKa code if "Scan" is selected
and their is an entry in the code field'''
pKaProvided = False
if self.isCodePresent() and self.isDeltaPKACalculationRequested():
pKaProvided = True
return pKaProvided
def isDeltaPKACalculationRequested(self):
'''Returns True if a delta pKa calculation was requested'''
provided = False
if self.calculations().count('scan') == 1:
provided = True
return provided
def isMutationDataPresent(self):
'''Returns True if formData contains mutation information'''
mutationProvided = False
mutationChoice = self.formData.getlist("mutation")
if len(mutationChoice) == 1:
mutationProvided = True
return mutationProvided
def isCalculationDataPresent(self):
'''Returns True if data on what calculations to perform is present'''
present = False
if len(self.calculations()) != 0:
present = True
return present
def isIonisableGroupsPresent(self):
'''Returns True if ionisable groups were specified AND a dpKa calculation was requested'''
present = False
if self.isDeltaPKACalculationRequested():
string = self.ionisableGroups()
if string != "" and string.lower() != "all":
present = True
return present
def isMutationList(self):
'''Returns True if the formData contains a mutationList file'''
mutationList = False
mutationChoice = self.formData.getlist("mutation")
if mutationChoice.count('mutationFile') != 0:
mutationList=True
return mutationList
def calculations(self):
'''Returns a list containing the names of the calculations requested'''
return self.formData.getlist("calculation")
def pdbFilename(self):
'''Returns the filename of the pdb - note this does not include a path'''
return self.standardizedPDBFilename
def pdbId(self):
return os.path.splitext(self.pdbFilename())[0]
def pdbFileStream(self):
'''Returns an opened stream to the pdbFile'''
self.pdbStream.seek(0)
return self.pdbStream
def ligandFilename(self):
filename = self.formData["ligandFile"].filename
filename = os.path.basename(filename)
return self.formData["ligandFile"].filename
def ligandFileStream(self):
'''Returns an opened stream to the ligand file'''
stream = self.formData["ligandFile"].file
stream.seek(0)
return stream
def mutationListFileStream(self):
'''Returns an opened stream to the mutationList file'''
if self.mutationData['Type'] == 'mutationList':
stream = self.mutationData["Data"]
stream.seek(0)
return stream
def pKaCode(self):
'''Returns the pKa code.
If none is present returns None.
A pKa code is deemed present if the code field is filled
and a delta-pKa calculation is requested'''
retval = None
if self.isPKACodePresent():
retval = self.formData.getvalue("pdbCode")
return retval
def email(self):
'''Returns the email address if their is one'''
retval = self.formData.getvalue("email")
if retval == "":
retval = "Unknown"
return retval
def ionisableGroups(self):
return self.formData.getvalue("ionisableGroups")
|
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import sys
import shutil
import tarfile
import logging
import subprocess
from uuid import uuid4
from functools import wraps
from datetime import datetime
from contextlib import contextmanager
from mock import patch
from cloudify_cli.constants import DEFAULT_TENANT_NAME
from cloudify._compat import StringIO
import cloudify.utils
import cloudify.exceptions
from cloudify import ctx as op_ctx
from cloudify.decorators import operation, workflow
from cloudify.state import workflow_ctx as workflow_ctx
from cloudify.workflows import tasks as workflow_tasks
from cloudify_rest_client.nodes import Node
from cloudify_rest_client.executions import Execution
from cloudify_rest_client.maintenance import Maintenance
from cloudify_rest_client.node_instances import NodeInstance
def mock_fabric_sudo(command, *args, **kwargs):
subprocess.check_call(command.split(' '))
def mock_fabric_put(local_path, remote_path, *args, **kwargs):
shutil.copy(local_path, remote_path)
def execution_mock(status, wf_id='mock_wf'):
return Execution({
'status': status,
'workflow_id': wf_id,
'deployment_id': 'deployment-id',
'blueprint_id': 'blueprint-id',
'error': '',
'id': uuid4(),
'created_at': datetime.now().isoformat()[:-3],
'parameters': {
'param1': 'value1'
},
'visibility': 'private',
'created_by': 'admin',
'tenant_name': DEFAULT_TENANT_NAME
})
def mock_log_message_prefix(event):
return event['event_name']
@operation
def mock_op(param, custom_param=None, **kwargs):
props = op_ctx.instance.runtime_properties
props['param'] = param
props['custom_param'] = custom_param
props['provider_context'] = op_ctx.provider_context
@workflow
def mock_workflow(param, custom_param=None, **kwargs):
for node in workflow_ctx.nodes:
for instance in node.instances:
instance.execute_operation('test.op', kwargs={
'param': param,
'custom_param': custom_param
})
@workflow
def logging_workflow(**kwargs):
kwargs.pop('ctx', None)
graph = workflow_ctx.graph_mode()
instance = next(workflow_ctx.node_instances)
task = instance.execute_operation('test.op', kwargs=kwargs)
def on_failure(tsk):
return workflow_tasks.HandlerResult.ignore()
task.on_failure = on_failure
graph.add_task(task)
graph.execute()
@operation
def logging_operation(level, message, error=False, user_cause=False, **kwargs):
if error:
causes = []
if user_cause:
try:
raise RuntimeError(message)
except RuntimeError:
_, ex, tb = sys.exc_info()
causes.append(cloudify.utils.exception_to_error_cause(
ex, tb))
raise cloudify.exceptions.NonRecoverableError(message, causes=causes)
else:
level = getattr(logging, level)
op_ctx.logger.log(level, message)
def counter(func):
@wraps(func)
def tmp(*_):
tmp.count += 1
return func()
tmp.count = 0
return tmp
@counter
def mock_activated_status():
if mock_activated_status.count % 2 == 1:
return Maintenance({'status': 'deactivated'})
return Maintenance({'status': 'activated'})
def mock_is_timeout(*_):
return True
def node_instance_get_mock():
return NodeInstance({
'id': uuid4(),
'deployment_id': 'deployment_id',
'host_id': 'host_id',
'node_id': 'node_id',
'state': 'started',
'runtime_properties': {
'floating_ip': '127.0.0.1'
},
'visibility': 'private',
'created_by': 'admin',
'tenant_name': DEFAULT_TENANT_NAME,
'version': 1
})
def node_get_mock():
return Node({
'id': uuid4(),
'deployment_id': 'deployment-id',
'blueprint_id': 'blueprint_id',
'host_id': 'host_id',
'type': 'Compute',
'type_hierarchy': ['cloudify.nodes.Root', 'Compute'],
'number_of_instances': '1',
'planned_number_of_instances': '2',
'properties': {
'port': '8080'
},
'operations': {},
'visibility': 'private',
'created_by': 'admin',
'tenant_name': DEFAULT_TENANT_NAME
})
def make_tarfile(output_filename, source_dir, write_type='w'):
with tarfile.open(output_filename, write_type) as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
@contextmanager
def mock_stdout():
stdout = StringIO()
with patch('sys.stdout', stdout):
yield stdout
class MockPagination(dict):
def __init__(self, total=0):
self.total = total
class MockMetadata(dict):
def __init__(self, pagination=MockPagination()):
self.pagination = pagination
class MockListResponse(object):
def __init__(self, items=None, _=None):
self.items = items or []
self.metadata = MockMetadata()
def __iter__(self):
return iter(self.items)
def __getitem__(self, index):
return self.items[index]
def __len__(self):
return len(self.items)
def sort(self, cmp=None, key=None, reverse=False):
return self.items.sort(cmp, key, reverse)
|
|
# Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import os
from multiprocessing import Process, Queue, cpu_count
from whoosh.compat import xrange, iteritems, pickle
from whoosh.codec import base
from whoosh.writing import PostingPool, SegmentWriter
from whoosh.externalsort import imerge
from whoosh.util import random_name
def finish_subsegment(writer, k=64):
# Tell the pool to finish up the current file
writer.pool.save()
# Tell the pool to merge any and all runs in the pool until there
# is only one run remaining. "k" is an optional parameter passed
# from the parent which sets the maximum number of files to open
# while reducing.
writer.pool.reduce_to(1, k)
# The filename of the single remaining run
runname = writer.pool.runs[0]
# The indexed field names
fieldnames = writer.pool.fieldnames
# The segment object (parent can use this to re-open the files created
# by the sub-writer)
segment = writer._partial_segment()
return runname, fieldnames, segment
# Multiprocessing Writer
class SubWriterTask(Process):
# This is a Process object that takes "jobs" off a job Queue, processes
# them, and when it's done, puts a summary of its work on a results Queue
def __init__(self, storage, indexname, jobqueue, resultqueue, kwargs,
multisegment):
Process.__init__(self)
self.storage = storage
self.indexname = indexname
self.jobqueue = jobqueue
self.resultqueue = resultqueue
self.kwargs = kwargs
self.multisegment = multisegment
self.running = True
def run(self):
# This is the main loop of the process. OK, so the way this works is
# kind of brittle and stupid, but I had to figure out how to use the
# multiprocessing module, work around bugs, and address performance
# issues, so there is at least some reasoning behind some of this
# The "parent" task farms individual documents out to the subtasks for
# indexing. You could pickle the actual documents and put them in the
# queue, but that is not very performant. Instead, we assume the tasks
# share a filesystem and use that to pass the information around. The
# parent task writes a certain number of documents to a file, then puts
# the filename on the "job queue". A subtask gets the filename off the
# queue and reads through the file processing the documents.
jobqueue = self.jobqueue
resultqueue = self.resultqueue
multisegment = self.multisegment
# Open a placeholder object representing the index
ix = self.storage.open_index(self.indexname)
# Open a writer for the index. The _lk=False parameter means to not try
# to lock the index (the parent object that started me takes care of
# locking the index)
writer = self.writer = SegmentWriter(ix, _lk=False, **self.kwargs)
# If the parent task calls cancel() on me, it will set self.running to
# False, so I'll notice the next time through the loop
while self.running:
# Take an object off the job queue
jobinfo = jobqueue.get()
# If the object is None, it means the parent task wants me to
# finish up
if jobinfo is None:
break
# The object from the queue is a tuple of (filename,
# number_of_docs_in_file). Pass those two pieces of information as
# arguments to _process_file().
self._process_file(*jobinfo)
if not self.running:
# I was cancelled, so I'll cancel my underlying writer
writer.cancel()
else:
if multisegment:
# Actually finish the segment and return it with no run
runname = None
fieldnames = writer.pool.fieldnames
segment = writer._finalize_segment()
else:
# Merge all runs in the writer's pool into one run, close the
# segment, and return the run name and the segment
k = self.kwargs.get("k", 64)
runname, fieldnames, segment = finish_subsegment(writer, k)
# Put the results (the run filename and the segment object) on the
# result queue
resultqueue.put((runname, fieldnames, segment), timeout=5)
def _process_file(self, filename, doc_count):
# This method processes a "job file" written out by the parent task. A
# job file is a series of pickled (code, arguments) tuples. Currently
# the only command codes is 0=add_document
writer = self.writer
tempstorage = writer.temp_storage()
load = pickle.load
with tempstorage.open_file(filename).raw_file() as f:
for _ in xrange(doc_count):
# Load the next pickled tuple from the file
code, args = load(f)
assert code == 0
writer.add_document(**args)
# Remove the job file
tempstorage.delete_file(filename)
def cancel(self):
self.running = False
class MpWriter(SegmentWriter):
def __init__(self, ix, procs=None, batchsize=100, subargs=None,
multisegment=False, **kwargs):
# This is the "main" writer that will aggregate the results created by
# the sub-tasks
SegmentWriter.__init__(self, ix, **kwargs)
self.procs = procs or cpu_count()
# The maximum number of documents in each job file submitted to the
# sub-tasks
self.batchsize = batchsize
# You can use keyword arguments or the "subargs" argument to pass
# keyword arguments to the sub-writers
self.subargs = subargs if subargs else kwargs
# If multisegment is True, don't merge the segments created by the
# sub-writers, just add them directly to the TOC
self.multisegment = multisegment
# A list to hold the sub-task Process objects
self.tasks = []
# A queue to pass the filenames of job files to the sub-tasks
self.jobqueue = Queue(self.procs * 4)
# A queue to get back the final results of the sub-tasks
self.resultqueue = Queue()
# A buffer for documents before they are flushed to a job file
self.docbuffer = []
self._grouping = 0
self._added_sub = False
def _new_task(self):
task = SubWriterTask(self.storage, self.indexname,
self.jobqueue, self.resultqueue, self.subargs,
self.multisegment)
self.tasks.append(task)
task.start()
return task
def _enqueue(self):
# Flush the documents stored in self.docbuffer to a file and put the
# filename on the job queue
docbuffer = self.docbuffer
dump = pickle.dump
length = len(docbuffer)
filename = "%s.doclist" % random_name()
with self.temp_storage().create_file(filename).raw_file() as f:
for item in docbuffer:
dump(item, f, -1)
if len(self.tasks) < self.procs:
self._new_task()
jobinfo = (filename, length)
self.jobqueue.put(jobinfo)
self.docbuffer = []
def cancel(self):
try:
for task in self.tasks:
task.cancel()
finally:
SegmentWriter.cancel(self)
def start_group(self):
self._grouping += 1
def end_group(self):
if not self._grouping:
raise Exception("Unbalanced end_group")
self._grouping -= 1
def add_document(self, **fields):
# Add the document to the docbuffer
self.docbuffer.append((0, fields))
# If the buffer is full, flush it to the job queue
if not self._grouping and len(self.docbuffer) >= self.batchsize:
self._enqueue()
self._added_sub = True
def _read_and_renumber_run(self, path, offset):
# Note that SortingPool._read_run() automatically deletes the run file
# when it's finished
gen = self.pool._read_run(path)
# If offset is 0, just return the items unchanged
if not offset:
return gen
else:
# Otherwise, add the offset to each docnum
return ((fname, text, docnum + offset, weight, value)
for fname, text, docnum, weight, value in gen)
def commit(self, mergetype=None, optimize=None, merge=None):
if self._added_sub:
# If documents have been added to sub-writers, use the parallel
# merge commit code
self._commit(mergetype, optimize, merge)
else:
# Otherwise, just do a regular-old commit
SegmentWriter.commit(self, mergetype=mergetype, optimize=optimize,
merge=merge)
def _commit(self, mergetype, optimize, merge):
# Index the remaining documents in the doc buffer
if self.docbuffer:
self._enqueue()
# Tell the tasks to finish
for task in self.tasks:
self.jobqueue.put(None)
# Merge existing segments
finalsegments = self._merge_segments(mergetype, optimize, merge)
# Wait for the subtasks to finish
for task in self.tasks:
task.join()
# Pull a (run_file_name, fieldnames, segment) tuple off the result
# queue for each sub-task, representing the final results of the task
results = []
for task in self.tasks:
results.append(self.resultqueue.get(timeout=5))
if self.multisegment:
# If we're not merging the segments, we don't care about the runname
# and fieldnames in the results... just pull out the segments and
# add them to the list of final segments
finalsegments += [s for _, _, s in results]
if self._added:
finalsegments.append(self._finalize_segment())
else:
self._close_segment()
assert self.perdocwriter.is_closed
else:
# Merge the posting sources from the sub-writers and my
# postings into this writer
self._merge_subsegments(results, mergetype)
self._close_segment()
self._assemble_segment()
finalsegments.append(self.get_segment())
assert self.perdocwriter.is_closed
self._commit_toc(finalsegments)
self._finish()
def _merge_subsegments(self, results, mergetype):
schema = self.schema
schemanames = set(schema.names())
storage = self.storage
codec = self.codec
sources = []
# If information was added to this writer the conventional (e.g.
# through add_reader or merging segments), add it as an extra source
if self._added:
sources.append(self.pool.iter_postings())
pdrs = []
for runname, fieldnames, segment in results:
fieldnames = set(fieldnames) | schemanames
pdr = codec.per_document_reader(storage, segment)
pdrs.append(pdr)
basedoc = self.docnum
docmap = self.write_per_doc(fieldnames, pdr)
assert docmap is None
items = self._read_and_renumber_run(runname, basedoc)
sources.append(items)
# Create a MultiLengths object combining the length files from the
# subtask segments
self.perdocwriter.close()
pdrs.insert(0, self.per_document_reader())
mpdr = base.MultiPerDocumentReader(pdrs)
try:
# Merge the iterators into the field writer
self.fieldwriter.add_postings(schema, mpdr, imerge(sources))
finally:
mpdr.close()
self._added = True
class SerialMpWriter(MpWriter):
# A non-parallel version of the MpWriter for testing purposes
def __init__(self, ix, procs=None, batchsize=100, subargs=None, **kwargs):
SegmentWriter.__init__(self, ix, **kwargs)
self.procs = procs or cpu_count()
self.batchsize = batchsize
self.subargs = subargs if subargs else kwargs
self.tasks = [SegmentWriter(ix, _lk=False, **self.subargs)
for _ in xrange(self.procs)]
self.pointer = 0
self._added_sub = False
def add_document(self, **fields):
self.tasks[self.pointer].add_document(**fields)
self.pointer = (self.pointer + 1) % len(self.tasks)
self._added_sub = True
def _commit(self, mergetype, optimize, merge):
# Pull a (run_file_name, segment) tuple off the result queue for each
# sub-task, representing the final results of the task
# Merge existing segments
finalsegments = self._merge_segments(mergetype, optimize, merge)
results = []
for writer in self.tasks:
results.append(finish_subsegment(writer))
self._merge_subsegments(results, mergetype)
self._close_segment()
self._assemble_segment()
finalsegments.append(self.get_segment())
self._commit_toc(finalsegments)
self._finish()
# For compatibility with old multiproc module
class MultiSegmentWriter(MpWriter):
def __init__(self, *args, **kwargs):
MpWriter.__init__(self, *args, **kwargs)
self.multisegment = True
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""ResNet50 model for Keras.
# Reference:
- [Deep Residual Learning for Image
Recognition](https://arxiv.org/abs/1512.03385)
Adapted from code contributed by BigMoyan.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import layers
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import preprocess_input # pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs
from tensorflow.python.keras._impl.keras.layers import Activation
from tensorflow.python.keras._impl.keras.layers import AveragePooling2D
from tensorflow.python.keras._impl.keras.layers import BatchNormalization
from tensorflow.python.keras._impl.keras.layers import Conv2D
from tensorflow.python.keras._impl.keras.layers import Dense
from tensorflow.python.keras._impl.keras.layers import Flatten
from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras._impl.keras.layers import Input
from tensorflow.python.keras._impl.keras.layers import MaxPooling2D
from tensorflow.python.keras._impl.keras.models import Model
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
Arguments:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(
filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2,
2)):
"""conv_block is the block that has a conv layer at shortcut.
Arguments:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Tuple of integers.
Returns:
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with
strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(
filters1, (1, 1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(
filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(
filters3, (1, 1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
def ResNet50(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the ResNet50 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 197.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=224,
min_size=197,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
x = Conv2D(64, (7, 7),
strides=(2, 2), padding='same', name='conv1')(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(classes, activation='softmax', name='fc1000')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='resnet50')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
else:
weights_path = get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
|
|
from enum import Enum
class LaserPathType(Enum):
hit = "hit"
bounce = "bounce"
through = "pass"
# destroy = "destroy"
class MoveType(Enum):
rotate = "ROTATE"
move = "MOVE"
swap = "SWAP"
class TeamColor(Enum):
red = "red"
silver = "silver"
blank = "blank"
@staticmethod
def opposite_color(color):
if color is TeamColor.blank:
return TeamColor.blank
elif color is TeamColor.silver:
return TeamColor.red
else:
return TeamColor.silver
class Orientation(Enum):
"""Class for orientations"""
up = 0
left = 270
right = 90
down = 180
none = -1
@staticmethod
def from_position(position1, position2):
return Orientation.up
@staticmethod
def next_position(position, direction):
if direction is Orientation.up:
return Position(position.x, position.y - 1)
if direction is Orientation.down:
return Position(position.x, position.y + 1)
if direction is Orientation.left:
return Position(position.x - 1, position.y)
if direction is Orientation.right:
return Position(position.x + 1, position.y)
@staticmethod
def delta(orientation, delta):
delta %= 360
if orientation is Orientation.none:
return Orientation.none
else:
if orientation.value + delta < 0:
return Orientation((orientation.value + 360 + delta) % 360)
else:
return Orientation((orientation.value + delta) % 360)
class Position(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "(" + str(self.x) + "," + str(self.y) + ")"
def to_dictionary(self):
return {"x": self.x, "y": self.y}
@staticmethod
def from_dictionary(value):
return Position(value["x"], value["y"])
class PieceType(Enum):
"""Class for piece types and valid orientations"""
pharaoh = "pharaoh"
anubis = "anubis"
pyramid = "pyramid"
scarab = "scarab"
sphinx = "sphinx"
@staticmethod
def can_move(from_piece, to_piece):
"""
Determine if a move is valid
:param from_piece: Square
:param to_piece: Square
:return: Boolean
"""
# Check Place Color (square we are entering is blank or same color)
if to_piece.color is not TeamColor.blank and from_piece.piece.color is not to_piece.color:
return False
# Make sure it's a movable piece
if from_piece.piece.type is PieceType.sphinx:
return False
# Make sure pieces are swappable
if to_piece.piece is not None:
# scarab can swap with pyramid and anubis
if from_piece.piece.type is PieceType.scarab:
if to_piece.piece.type is PieceType.pyramid or to_piece.piece.type is PieceType.anubis:
return True
else:
return False
else:
return False
else:
return True
@staticmethod
def can_rotate(piece, target_orientation):
"""
Returns true if a piece can rotate
:param piece: Piece
:param target_orientation: Orientation
:return: Boolean
"""
if target_orientation is Orientation.none:
return False
if abs(piece.orientation.value - target_orientation.value) == 180: # 270 or 90 are the allowed values
return False
if piece.type is PieceType.sphinx:
if piece.color is TeamColor.silver:
return target_orientation is Orientation.down or target_orientation is Orientation.right
else:
return target_orientation is Orientation.up or target_orientation is Orientation.left
return True
@staticmethod
def bounce_direction(piece, light_direction):
if piece.type is PieceType.pyramid:
if light_direction is Orientation.up:
if piece.orientation is Orientation.down:
return Orientation.left
elif piece.orientation is Orientation.right:
return Orientation.right
else:
return None
elif light_direction is Orientation.down:
if piece.orientation is Orientation.left:
return Orientation.left
elif piece.orientation is Orientation.up:
return Orientation.right
else:
return None
elif light_direction is Orientation.left:
if piece.orientation is Orientation.right:
return Orientation.down
elif piece.orientation is Orientation.up:
return Orientation.up
else:
return None
elif light_direction is Orientation.right:
if piece.orientation is Orientation.down:
return Orientation.down
elif piece.orientation is Orientation.left:
return Orientation.up
else:
return None
if piece.type is PieceType.scarab:
if piece.orientation is Orientation.down or piece.orientation is Orientation.up:
if light_direction is Orientation.up:
return Orientation.left
elif light_direction is Orientation.down:
return Orientation.right
elif light_direction is Orientation.left:
return Orientation.up
elif light_direction is Orientation.right:
return Orientation.down
elif piece.orientation is Orientation.left or piece.orientation is Orientation.right:
if light_direction is Orientation.up:
return Orientation.right
elif light_direction is Orientation.down:
return Orientation.left
elif light_direction is Orientation.left:
return Orientation.down
elif light_direction is Orientation.right:
return Orientation.up
return None
@staticmethod
def valid_rotations(piece):
current_orientation = piece.orientation
if piece.type is PieceType.pharaoh:
return []
elif piece.type is PieceType.sphinx:
if current_orientation is Orientation.down:
return [Orientation.right]
elif current_orientation is Orientation.right:
return [Orientation.down]
elif current_orientation is Orientation.up:
return [Orientation.left]
else:
return [Orientation.up]
else:
return [Orientation.delta(current_orientation, -90), Orientation.delta(current_orientation, 90)]
@staticmethod
def can_swap(piece_from, piece_to):
if piece_from.type is PieceType.sphinx:
return False
if piece_to is None:
return True
return piece_from.type is PieceType.scarab and \
(piece_to.type is PieceType.pyramid or piece_to.type is PieceType.anubis)
class LaserPathNode(object):
def __init__(self, path_type, position, direction):
self.type = path_type
self.position = position
self.direction = direction
def to_dictionary(self):
return {
"type": self.type.value,
"position": self.position.to_dictionary(),
"direction": self.direction.value
}
@staticmethod
def from_dictionary(value):
path_type = LaserPathType(value["type"])
position = Position.from_dictionary(value["position"])
direction = Orientation(value["direction"])
return LaserPathNode(path_type, position, direction)
def __str__(self):
return "( " + str(self.type.value) + ", " + str(self.position) + ", " + str(self.direction.value) + ")"
class Move(object):
def __init__(self, move_type, move_position, move_value):
self.type = move_type
self.position = move_position
self.value = move_value
def to_dictionary(self):
value = self.value
if isinstance(value, Position):
value = value.to_dictionary()
if isinstance(value, Orientation):
value = value.value
return {"type": self.type.value, "position": self.position.to_dictionary(), "value": value}
@staticmethod
def from_dictionary(value):
move_type = MoveType(value["type"])
position = Position(value["position"]["x"], value["position"]["y"])
value = value["value"] # Notice the aliasing...
if move_type is not MoveType.rotate:
value = Position(value["x"], value["y"])
else:
value = Orientation(value)
return Move(move_type, position, value)
def __str__(self):
return "{ T: " + str(self.type.value) + ", P: " + str(self.position) + ", V: " + str(self.value) + "}"
class Piece(object):
"""Generic Piece Class"""
def __init__(self, piece_type, color, orientation=Orientation.none):
self.color = color
self.type = piece_type
self.orientation = orientation
def to_dictionary(self):
return {"type": self.type.value, "color": self.color.value, "orientation": self.orientation.value}
@staticmethod
def from_dictionary(value):
return Piece(PieceType(value["type"]), TeamColor(value["color"]), Orientation(value["orientation"]))
def __str__(self):
return "(" + str(self.color.value) + "," + str(self.type.value) + "," + str(self.orientation.value) + ")"
class Square(object):
def __init__(self, square_type, position):
self.color = square_type
self.piece = None
self.position = position
def get_moves(self, board):
if self.piece is None:
return []
# Get Squares
squares = [board.get(self.position.x + 1, self.position.y),
board.get(self.position.x - 1, self.position.y),
board.get(self.position.x, self.position.y + 1),
board.get(self.position.x, self.position.y - 1),
board.get(self.position.x + 1, self.position.y - 1),
board.get(self.position.x + 1, self.position.y + 1),
board.get(self.position.x - 1, self.position.y + 1),
board.get(self.position.x - 1, self.position.y - 1)]
squares = [x for x in squares if x is not None and (x.color is TeamColor.blank or x.color is self.piece.color)]
moves = []
# Get Valid Moves
for square in squares:
if PieceType.can_swap(self.piece, square.piece):
if square.piece is None:
moves.append(Move(MoveType.move, self.position, square.position))
else:
moves.append(Move(MoveType.swap, self.position, square.position))
# Get Valid Rotations
rotations = PieceType.valid_rotations(self.piece)
for rotation in rotations:
moves.append(Move(MoveType.rotate, self.position, rotation))
return moves
def to_dictionary(self):
value = {"position": self.position.to_dictionary(), "color": self.color.value}
if self.piece is not None:
value["piece"] = self.piece.to_dictionary()
return value
|
|
###
# Copyright (c) 2002-2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class TopicTestCase(ChannelPluginTestCase):
plugins = ('Topic','User',)
def testRemove(self):
self.assertError('topic remove 1')
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertError('topic remove 0')
self.assertNotError('topic remove 3')
self.assertNotError('topic remove 2')
self.assertNotError('topic remove 1')
self.assertError('topic remove 1')
def testRemoveMultiple(self):
self.assertError('topic remove 1 2')
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
_ = self.getMsg('topic add derp')
_ = self.getMsg('topic add cheese')
self.assertNotError('topic remove 1 2')
self.assertNotError('topic remove -1 1')
self.assertError('topic remove -99 1')
def testReplace(self):
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic replace 1 oof', 'oof.*bar.*baz')
self.assertRegexp('topic replace -1 zab', 'oof.*bar.*zab')
self.assertRegexp('topic replace 2 lorem ipsum',
'oof.*lorem ipsum.*zab')
self.assertRegexp('topic replace 2 rab', 'oof.*rab.*zab')
def testGet(self):
self.assertError('topic get 1')
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic get 1', '^foo')
self.assertError('topic get 0')
def testAdd(self):
self.assertError('topic add #floorgle')
m = self.getMsg('topic add foo')
self.assertEqual(m.command, 'TOPIC')
self.assertEqual(m.args[0], self.channel)
self.assertEqual(m.args[1], 'foo')
m = self.getMsg('topic add bar')
self.assertEqual(m.command, 'TOPIC')
self.assertEqual(m.args[0], self.channel)
self.assertEqual(m.args[1], 'foo | bar')
def testManageCapabilities(self):
try:
self.irc.feedMsg(ircmsgs.mode(self.channel, args=('+o', self.nick),
prefix=self.prefix))
self.irc.feedMsg(ircmsgs.mode(self.channel, args=('+t'),
prefix=self.prefix))
world.testing = False
origuser = self.prefix
self.prefix = 'stuff!stuff@stuff'
self.assertNotError('register nottester stuff', private=True)
self.assertError('topic add foo')
origconf = conf.supybot.plugins.Topic.requireManageCapability()
conf.supybot.plugins.Topic.requireManageCapability.setValue('')
self.assertNotError('topic add foo')
finally:
world.testing = True
self.prefix = origuser
conf.supybot.plugins.Topic.requireManageCapability.setValue(origconf)
def testInsert(self):
m = self.getMsg('topic add foo')
self.assertEqual(m.args[1], 'foo')
m = self.getMsg('topic insert bar')
self.assertEqual(m.args[1], 'bar | foo')
def testChange(self):
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic change -1 s/baz/biff/',
r'foo.*bar.*biff')
self.assertRegexp('topic change 2 s/bar/baz/',
r'foo.*baz.*biff')
self.assertRegexp('topic change 1 s/foo/bar/',
r'bar.*baz.*biff')
self.assertRegexp('topic change -2 s/baz/bazz/',
r'bar.*bazz.*biff')
self.assertError('topic change 0 s/baz/biff/')
def testConfig(self):
try:
original = conf.supybot.plugins.Topic.separator()
conf.supybot.plugins.Topic.separator.setValue(' <==> ')
_ = self.getMsg('topic add foo')
m = self.getMsg('topic add bar')
self.failUnless('<==>' in m.args[1])
finally:
conf.supybot.plugins.Topic.separator.setValue(original)
def testReorder(self):
_ = self.getMsg('topic add foo')
_ = self.getMsg('topic add bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic reorder 2 1 3', r'bar.*foo.*baz')
self.assertRegexp('topic reorder 3 -2 1', r'baz.*foo.*bar')
self.assertError('topic reorder 0 1 2')
self.assertError('topic reorder 1 -2 2')
self.assertError('topic reorder 1 2')
self.assertError('topic reorder 2 3 4')
self.assertError('topic reorder 1 2 2')
self.assertError('topic reorder 1 1 2 3')
_ = self.getMsg('topic remove 1')
_ = self.getMsg('topic remove 1')
self.assertError('topic reorder 1')
_ = self.getMsg('topic remove 1')
self.assertError('topic reorder 0')
def testList(self):
_ = self.getMsg('topic add foo')
self.assertRegexp('topic list', '1: foo')
_ = self.getMsg('topic add bar')
self.assertRegexp('topic list', '1: foo.*2: bar')
_ = self.getMsg('topic add baz')
self.assertRegexp('topic list', '1: foo.* 2: bar.* and 3: baz')
def testSet(self):
_ = self.getMsg('topic add foo')
self.assertRegexp('topic set -1 bar', 'bar')
self.assertNotRegexp('topic set -1 baz', 'bar')
self.assertResponse('topic set foo bar baz', 'foo bar baz')
# Catch a bug we had where setting topic 1 would reset the whole topic
orig = conf.supybot.plugins.Topic.format()
sep = conf.supybot.plugins.Topic.separator()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic add baz', 'foo bar baz%sbaz' % sep)
self.assertResponse('topic set 1 bar', 'bar%sbaz' % sep)
finally:
conf.supybot.plugins.Topic.format.setValue(orig)
def testRestore(self):
self.getMsg('topic set foo')
self.assertResponse('topic restore', 'foo')
self.getMsg('topic remove 1')
restoreError = 'Error: I haven\'t yet set the topic in #test.'
self.assertResponse('topic restore', restoreError)
def testRefresh(self):
self.getMsg('topic set foo')
self.assertResponse('topic refresh', 'foo')
self.getMsg('topic remove 1')
refreshError = 'Error: I haven\'t yet set the topic in #test.'
self.assertResponse('topic refresh', refreshError)
def testUndo(self):
try:
original = conf.supybot.plugins.Topic.format()
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic set ""', '')
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo | bar')
self.assertResponse('topic add baz', 'foo | bar | baz')
self.assertResponse('topic undo', 'foo | bar')
self.assertResponse('topic undo', 'foo')
self.assertResponse('topic undo', '')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testUndoRedo(self):
try:
original = conf.supybot.plugins.Topic.format()
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic set ""', '')
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo | bar')
self.assertResponse('topic add baz', 'foo | bar | baz')
self.assertResponse('topic undo', 'foo | bar')
self.assertResponse('topic undo', 'foo')
self.assertResponse('topic undo', '')
self.assertResponse('topic redo', 'foo')
self.assertResponse('topic redo', 'foo | bar')
self.assertResponse('topic redo', 'foo | bar | baz')
self.assertResponse('topic undo', 'foo | bar')
self.assertResponse('topic undo', 'foo')
self.assertResponse('topic redo', 'foo | bar')
self.assertResponse('topic undo', 'foo')
self.assertResponse('topic redo', 'foo | bar')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testSwap(self):
original = conf.supybot.plugins.Topic.format()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic set ""', '')
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo | bar')
self.assertResponse('topic add baz', 'foo | bar | baz')
self.assertResponse('topic swap 1 2', 'bar | foo | baz')
self.assertResponse('topic swap 1 -1', 'baz | foo | bar')
self.assertError('topic swap -1 -1')
self.assertError('topic swap 2 -2')
self.assertError('topic swap 1 -3')
self.assertError('topic swap -2 2')
self.assertError('topic swap -3 1')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testDefault(self):
self.assertError('topic default')
try:
original = conf.supybot.plugins.Topic.default()
conf.supybot.plugins.Topic.default.setValue('foo bar baz')
self.assertResponse('topic default', 'foo bar baz')
finally:
conf.supybot.plugins.Topic.default.setValue(original)
def testTopic(self):
original = conf.supybot.plugins.Topic.format()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertError('topic addd') # Error to send too many args.
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo | bar')
self.assertResponse('topic', 'foo | bar')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testSeparator(self):
original = conf.supybot.plugins.Topic.format()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.assertResponse('topic add foo', 'foo')
self.assertResponse('topic add bar', 'foo | bar')
self.assertResponse('topic add baz', 'foo | bar | baz')
self.assertResponse('topic separator ::', 'foo :: bar :: baz')
self.assertResponse('topic separator ||', 'foo || bar || baz')
self.assertResponse('topic separator |', 'foo | bar | baz')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
def testFit(self):
original = conf.supybot.plugins.Topic.format()
try:
conf.supybot.plugins.Topic.format.setValue('$topic')
self.irc.state.supported['TOPICLEN'] = 20
self.assertResponse('topic fit foo', 'foo')
self.assertResponse('topic fit bar', 'foo | bar')
self.assertResponse('topic fit baz', 'foo | bar | baz')
self.assertResponse('topic fit qux', 'bar | baz | qux')
finally:
conf.supybot.plugins.Topic.format.setValue(original)
self.irc.state.supported.pop('TOPICLEN', None)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Test the PB change source.
"""
import mock
from twisted.trial import unittest
from twisted.internet import defer
from buildbot.changes import pb
from buildbot.test.util import changesource, pbmanager
from buildbot.util import epoch2datetime
class TestPBChangeSource(
changesource.ChangeSourceMixin,
pbmanager.PBManagerMixin,
unittest.TestCase):
def setUp(self):
self.setUpPBChangeSource()
d = self.setUpChangeSource()
@d.addCallback
def setup(_):
self.master.pbmanager = self.pbmanager
return d
def test_registration_no_slaveport(self):
return self._test_registration(None,
user='alice', passwd='sekrit')
def test_registration_global_slaveport(self):
return self._test_registration(('9999', 'alice', 'sekrit'),
slavePort='9999', user='alice', passwd='sekrit')
def test_registration_custom_port(self):
return self._test_registration(('8888', 'alice', 'sekrit'),
user='alice', passwd='sekrit', port='8888')
def test_registration_no_userpass(self):
return self._test_registration(('9939', 'change', 'changepw'),
slavePort='9939')
def test_registration_no_userpass_no_global(self):
return self._test_registration(None)
@defer.inlineCallbacks
def _test_registration(self, exp_registration, slavePort=None,
**constr_kwargs):
config = mock.Mock()
config.slavePortnum = slavePort
self.attachChangeSource(pb.PBChangeSource(**constr_kwargs))
self.startChangeSource()
yield self.changesource.reconfigService(config)
if exp_registration:
self.assertRegistered(*exp_registration)
else:
self.assertNotRegistered()
yield self.stopChangeSource()
if exp_registration:
self.assertUnregistered(*exp_registration)
self.assertEqual(self.changesource.registration, None)
def test_perspective(self):
self.attachChangeSource(pb.PBChangeSource('alice', 'sekrit', port='8888'))
persp = self.changesource.getPerspective(mock.Mock(), 'alice')
self.assertIsInstance(persp, pb.ChangePerspective)
def test_describe(self):
cs = pb.PBChangeSource()
self.assertSubstring("PBChangeSource", cs.describe())
def test_describe_prefix(self):
cs = pb.PBChangeSource(prefix="xyz")
self.assertSubstring("PBChangeSource", cs.describe())
self.assertSubstring("xyz", cs.describe())
def test_describe_int(self):
cs = pb.PBChangeSource(port=9989)
self.assertSubstring("PBChangeSource", cs.describe())
@defer.inlineCallbacks
def test_reconfigService_no_change(self):
config = mock.Mock()
self.attachChangeSource(pb.PBChangeSource(port='9876'))
self.startChangeSource()
yield self.changesource.reconfigService(config)
self.assertRegistered('9876', 'change', 'changepw')
yield self.stopChangeSource()
self.assertUnregistered('9876', 'change', 'changepw')
@defer.inlineCallbacks
def test_reconfigService_default_changed(self):
config = mock.Mock()
config.slavePortnum = '9876'
self.attachChangeSource(pb.PBChangeSource())
self.startChangeSource()
yield self.changesource.reconfigService(config)
self.assertRegistered('9876', 'change', 'changepw')
config.slavePortnum = '1234'
yield self.changesource.reconfigService(config)
self.assertUnregistered('9876', 'change', 'changepw')
self.assertRegistered('1234', 'change', 'changepw')
yield self.stopChangeSource()
self.assertUnregistered('1234', 'change', 'changepw')
class TestChangePerspective(unittest.TestCase):
def setUp(self):
self.added_changes = []
self.master = mock.Mock()
def addChange(**chdict):
self.added_changes.append(chdict)
return defer.succeed(mock.Mock())
self.master.addChange = addChange
def test_addChange_noprefix(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(who="bar", files=['a']))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author="bar", files=['a']) ])
d.addCallback(check)
return d
def test_addChange_codebase(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(who="bar", files=[], codebase='cb'))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author="bar", files=[], codebase='cb') ])
d.addCallback(check)
return d
def test_addChange_prefix(self):
cp = pb.ChangePerspective(self.master, 'xx/')
d = cp.perspective_addChange(
dict(who="bar", files=['xx/a', 'yy/b']))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author="bar", files=['a']) ])
d.addCallback(check)
return d
def test_addChange_sanitize_None(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(
dict(project=None, revlink=None, repository=None)
)
def check(_):
self.assertEqual(self.added_changes,
[ dict(project="", revlink="", repository="",
files=[]) ])
d.addCallback(check)
return d
def test_addChange_when_None(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(
dict(when=None)
)
def check(_):
self.assertEqual(self.added_changes,
[ dict(when_timestamp=None, files=[]) ])
d.addCallback(check)
return d
def test_addChange_files_tuple(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(
dict(files=('a', 'b'))
)
def check(_):
self.assertEqual(self.added_changes,
[ dict(files=['a', 'b']) ])
d.addCallback(check)
return d
def test_addChange_unicode(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(author=u"\N{SNOWMAN}",
comments=u"\N{SNOWMAN}",
files=[u'\N{VERY MUCH GREATER-THAN}']))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author=u"\N{SNOWMAN}",
comments=u"\N{SNOWMAN}",
files=[u'\N{VERY MUCH GREATER-THAN}']) ])
d.addCallback(check)
return d
def test_addChange_unicode_as_bytestring(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(author=u"\N{SNOWMAN}".encode('utf8'),
comments=u"\N{SNOWMAN}".encode('utf8'),
files=[u'\N{VERY MUCH GREATER-THAN}'.encode('utf8')]))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author=u"\N{SNOWMAN}",
comments=u"\N{SNOWMAN}",
files=[u'\N{VERY MUCH GREATER-THAN}']) ])
d.addCallback(check)
return d
def test_addChange_non_utf8_bytestring(self):
cp = pb.ChangePerspective(self.master, None)
bogus_utf8 = '\xff\xff\xff\xff'
replacement = bogus_utf8.decode('utf8', 'replace')
d = cp.perspective_addChange(dict(author=bogus_utf8, files=['a']))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author=replacement, files=['a']) ])
d.addCallback(check)
return d
def test_addChange_old_param_names(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(isdir=1, who='me', when=1234,
files=[]))
def check(_):
self.assertEqual(self.added_changes,
[ dict(is_dir=1, author='me', files=[],
when_timestamp=epoch2datetime(1234)) ])
d.addCallback(check)
return d
def test_createUserObject_git_src(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(who="c <h@c>", src='git'))
def check_change(_):
self.assertEqual(self.added_changes, [ dict(author="c <h@c>",
files=[],
src='git') ])
d.addCallback(check_change)
return d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.