repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bealdav/OCB | addons/base_gengo/controller/gengo_callback.py | 362 | 2326 | # -*- coding: utf-8 -*-
import openerp
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from werkzeug.wrappers import BaseResponse as Response
import json
class website_gengo(http.Controller):
def get_gengo_key(self, cr):
icp = request.registry['ir.config_parameter']
return icp.get_param(cr, SUPERUSER_ID, request.registry['base.gengo.translations'].GENGO_KEY, default="")
@http.route('/website/gengo_callback', type='http', auth='none')
def gengo_callback(self, **post):
print "IN website/gengo_callback"
cr, uid, context = request.cr, openerp.SUPERUSER_ID, request.context
translation_pool = request.registry['ir.translation']
if post and post.get('job') and post.get('pgk'):
if post.get('pgk') != self.get_gengo_key(cr):
return Response("Bad authentication", status=104)
job = json.loads(post['job'], 'utf-8')
tid = job.get('custom_data', False)
if (job.get('status') == 'approved') and tid:
term = translation_pool.browse(cr, uid, int(tid), context=context)
if term.src != job.get('body_src'):
return Response("Text Altered - Not saved", status=418)
domain = [
'|',
('id', "=", int(tid)),
'&', '&', '&', '&', '&',
('state', '=', term.state),
('gengo_translation', '=', term.gengo_translation),
('src', "=", term.src),
('type', "=", term.type),
('name', "=", term.name),
('lang', "=", term.lang),
#('order_id', "=", term.order_id),
]
all_ir_tanslations = translation_pool.search(cr, uid, domain, context=context or {})
if all_ir_tanslations:
vals = {'state': 'translated', 'value': job.get('body_tgt')}
translation_pool.write(cr, uid, all_ir_tanslations, vals, context=context)
return Response("OK", status=200)
else:
return Response("No terms found", status=412)
return Response("Not saved", status=418)
| agpl-3.0 |
google-research/google-research | meta_reward_learning/semantic_parsing/nsm/executor_factory.py | 1 | 35727 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to interact with knowledge graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from six.moves import zip
class Executor(object):
"""Executors implements the basic subroutines and provide
the API to the computer.
"""
def get_api(self, config):
"""Provide API to the computer."""
raise NotImplementedError()
def get_simple_type_hierarchy():
type_hierarchy = {
'entity_list': ['atom_list'],
'list': [],
'num_list': ['ordered_list'],
'datetime_list': ['ordered_list'],
'ordered_list': ['atom_list'],
'atom_list': ['list'],
'string_list': ['list'],
'string_property': ['property'],
# Atom as it doesn't have sub-parts.
'ordered_property': ['atom_property'],
'entity_property': ['atom_property'],
'atom_property': ['property'],
'datetime_property': ['ordered_property'],
'num_property': ['ordered_property'],
'num': [],
'int': ['num'],
'property': [],
'symbol': [],
'function': ['head'],
'head': [],
'primitive_function': ['function'],
'global_primitive_function': ['primitive_function'],
'<ERROR>': []
}
return type_hierarchy
class SimpleKGExecutor(Executor):
"""This executor assumes that the knowledge graph is
encoded as a dictionary.
"""
def __init__(self, kg_info):
"""Given a knowledge graph, the number properties and
the datetime properties, initialize an executor that
implements the basic subroutines.
Args:
kg_info: a dictionary with three keys.
"""
self.kg = kg_info['kg']
self.num_props = kg_info['num_props']
self.datetime_props = kg_info['datetime_props']
self.props = kg_info['props']
def hop(self, entities, prop, keep_dup=False):
"""Get the property of a list of entities."""
if keep_dup:
result = []
else:
result = set()
for ent in entities:
try:
if keep_dup:
result += self.kg[ent][prop]
else:
result = result.union(self.kg[ent][prop])
except KeyError:
continue
return list(result)
def filter_equal(self, ents_1, ents_2, prop):
"""From ents_1, filter out the entities whose property equal to ents_2."""
result = []
for ent in ents_1:
if set(self.hop([ent], prop)) == set(ents_2):
result.append(ent)
return result
def filter_not_equal(self, ents_1, ents_2, prop):
"""From ents_1, filter out the entities whose property equal to ents_2."""
result = []
for ent in ents_1:
if set(self.hop([ent], prop)) != set(ents_2):
result.append(ent)
return result
def get_num_prop_val(self, ent, prop):
"""Get the value of an entities' number property."""
# If there are multiple values, then take the first one.
prop_str_list = self.hop([ent], prop)
try:
prop_str = prop_str_list[0]
prop_val = float(prop_str)
except (ValueError, IndexError):
prop_val = None
return prop_val
def get_datetime_prop_val(self, ent, prop):
"""Get the value of an entities' date time property."""
# If there are multiple values, then take the first one.
prop_str_list = self.hop([ent], prop)
try:
prop_str = prop_str_list[0]
if prop_str[0] == '-':
sign = -1
prop_str = prop_str[1:]
else:
sign = 1
result = [float(n) for n in prop_str.replace('x', '0').split('-')]
day = 0
for n, unit in zip(result, [365, 30, 1]):
day += n * unit
day *= sign
prop_val = day
except (ValueError, IndexError):
prop_val = None
return prop_val
def sort_select(self, entities, prop, ind):
"""Sort the entities using prop then select the i-th one."""
if prop in self.num_props:
get_val = self.get_num_prop_val
elif prop in self.datetime_props:
get_val = self.get_datetime_prop_val
else:
raise ValueError(prop)
vals = []
new_ents = []
for ent in entities:
val = get_val(ent, prop)
if val is not None:
new_ents.append(ent)
vals.append(val)
ent_vals = list(zip(new_ents, vals))
best_ent_val = sorted(ent_vals, key=lambda x: x[1])[ind]
best_score = best_ent_val[1]
result = [ent for ent, val in ent_vals if val == best_score]
return result
# return [best_ent_val[0]]
def argmax(self, entities, prop):
return self.sort_select(entities, prop, -1)
def argmin(self, entities, prop):
return self.sort_select(entities, prop, 0)
def valid_props(self,
source_mids,
token_val_dict,
target_mids=None,
condition_fn=None):
connected_props = self.get_props(
source_mids, target_mids, condition_fn=condition_fn)
valid_tks = []
for tk, prop in six.iteritems(token_val_dict):
if prop in connected_props:
valid_tks.append(tk)
return valid_tks
def is_connected(self, source_ents, target_ents, prop):
return set(self.hop(source_ents, prop)) == set(target_ents)
def get_props(self,
source_ents,
target_ents=None,
debug=False,
condition_fn=None):
"""Get the properties that goes from source to targets."""
props = set()
if condition_fn is None:
condition_fn = self.is_connected
if debug:
print('=' * 100)
for ent in source_ents:
if debug:
print('@' * 20)
print(ent)
if ent in self.kg:
ent_props = list(self.kg[ent].keys())
if target_ents is not None:
for p in ent_props:
if debug:
print()
print(p)
print(self.hop([ent], p))
# if set(self.hop([ent], p)) == set(target_ents):
if condition_fn([ent], target_ents, p):
props.add(p)
else:
props = props.union(ent_props)
if debug:
print('in get props')
print(source_ents)
print(target_ents)
print(props)
print('=' * 100)
return list(props)
def autocomplete_hop(self, exp, tokens, token_vals):
l = len(exp)
if l == 2: # second argument is a property.
source_mids = exp[1]
token_val_dict = dict(list(zip(tokens, token_vals)))
valid_tks = self.valid_props(source_mids, token_val_dict)
else:
valid_tks = tokens
return valid_tks
def autocomplete_argm(self, exp, tokens, token_vals, debug=False):
l = len(exp)
if l == 1: # first argument has more than one entity.
valid_tks = [tk for tk, val in zip(tokens, token_vals) if len(val) > 1]
elif l == 2: # second argument is a property.
source_mids = exp[1]
token_val_dict = dict(list(zip(tokens, token_vals)))
valid_tks = self.valid_props(source_mids, token_val_dict)
else:
valid_tks = tokens
if debug:
print('*' * 30)
print(exp)
print(tokens)
print(valid_tks)
print('*' * 30)
return valid_tks
def autocomplete_filter_equal(self, exp, tokens, token_vals, debug=False):
l = len(exp)
if l == 1:
valid_tks = [tk for tk, val in zip(tokens, token_vals) if len(val) > 1]
elif l == 2:
valid_tks = []
for tk, val in zip(tokens, token_vals):
# The second argument must have some connection with
# the first argument.
if self.get_props(exp[1], val):
valid_tks.append(tk)
elif l == 3:
token_val_dict = dict(list(zip(tokens, token_vals)))
valid_tks = self.valid_props(exp[1], token_val_dict, exp[2])
else:
raise ValueError('Expression is too long: {}'.format(l))
if debug:
print()
print('+' * 30)
print('in filter equal')
print(exp)
print(tokens)
print(valid_tks)
print('+' * 30)
return valid_tks
def get_api(self):
func_dict = collections.OrderedDict()
func_dict['hop'] = dict(
name='hop',
args=[{
'types': ['entity_list']
}, {
'types': ['property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_hop,
value=self.hop)
func_dict['filter_equal'] = dict(
name='filter_equal',
args=[{
'types': ['entity_list']
}, {
'types': ['entity_list']
}, {
'types': ['property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_filter_equal,
value=self.filter_equal)
func_dict['argmax'] = dict(
name='argmax',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_argm,
value=self.argmax)
func_dict['argmin'] = dict(
name='argmin',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_argm,
value=self.argmin)
constant_dict = collections.OrderedDict()
for p in self.props:
if p in self.num_props:
tp = 'num_property'
elif p in self.datetime_props:
tp = 'datetime_property'
else:
tp = 'string_property'
constant_dict[p] = dict(value=p, type=tp, name=p)
type_hierarchy = get_simple_type_hierarchy()
return dict(
type_hierarchy=type_hierarchy,
func_dict=func_dict,
constant_dict=constant_dict)
class TableExecutor(SimpleKGExecutor):
"""The executor for writing programs that processes simple Tables."""
def __init__(self, table_info):
super(TableExecutor, self).__init__(table_info)
self.n_rows = len(table_info['row_ents'])
def filter_ge(self, ents_1, nums, prop):
"""Filter out entities whose prop >= nums."""
result = []
for ent in ents_1:
vals = set(self.hop([ent], prop))
for val in vals:
if all([(val >= x) for x in nums]):
result.append(ent)
break
return result
def filter_greater(self, ents_1, nums, prop):
"""Filter out entities whose prop > nums."""
result = []
for ent in ents_1:
vals = set(self.hop([ent], prop))
for val in vals:
if all([(val > x) for x in nums]):
result.append(ent)
break
return result
def filter_le(self, ents_1, nums, prop):
"""Filter out entities whose prop <= nums."""
result = []
for ent in ents_1:
vals = set(self.hop([ent], prop))
for val in vals:
if all([(val <= x) for x in nums]):
result.append(ent)
break
return result
def filter_less(self, ents_1, nums, prop):
"""Filter out entities whose prop < nums."""
result = []
for ent in ents_1:
vals = set(self.hop([ent], prop))
for val in vals:
if all([(val < x) for x in nums]):
result.append(ent)
break
return result
def filter_str_contain_any(self, ents, string_list, prop):
"""Filter out entities whose prop contains any of the strings."""
result = []
for ent in ents:
str_val_list = self.hop([ent], prop)
assert len(str_val_list) == 1
str_val = str_val_list[0]
for string in string_list:
if string in str_val:
result.append(ent)
break
return result
def filter_str_contain_not_any(self, ents, string_list, prop):
"""Filter out entities, whose prop doesn't contain any of the strings."""
result = []
for ent in ents:
str_val_list = self.hop([ent], prop)
# Make sure that entity only has one value for the prop.
assert len(str_val_list) == 1
str_val = str_val_list[0]
# If any one of the string is contained by the cell,
# then pass. Only add to the result when none of the
# string is in the cell.
for string in string_list:
if string in str_val:
break
else:
result.append(ent)
return result
def autocomplete_filter_str_contain_any(self,
exp,
tokens,
token_vals,
debug=False):
"""Auto-complete for filter_str_contain_any function."""
l = len(exp)
if l == 1:
valid_tks = [tk for tk, val in zip(tokens, token_vals) if len(val) > 1]
elif l == 2:
# Since all the strings are in the table, no need to
# filter any more. Keep the ones that have at least
# one string in it.
valid_tks = [tk for tk, val in zip(tokens, token_vals) if len(val) > 0]
elif l == 3:
valid_tks = []
token_val_dict = dict(list(zip(tokens, token_vals)))
source_ents = exp[1]
string_list = exp[2]
for tk in tokens:
is_valid = False
prop = token_val_dict[tk]
str_val_list = self.hop(source_ents, prop)
# If one of the str_val contains any one of the
# string, then we can use this property.
for str_val in str_val_list:
for string in string_list:
if string in str_val:
is_valid = True
break
if is_valid:
valid_tks.append(tk)
break
else:
raise ValueError('Expression is too long: {}'.format(l))
if debug:
print()
print('+' * 30)
print('in filter equal')
print(exp)
print(tokens)
print(valid_tks)
print('+' * 30)
return valid_tks
# Next and previous
def next(self, rows):
"""Select all the rows that is right below the given rows respectively."""
assert rows
assert rows[0][:4] == 'row_'
# row are in the pattern of row_0, row_1.
row_ids = [int(row_str[4:]) for row_str in rows]
new_row_ids = [(i + 1) for i in row_ids if i + 1 < self.n_rows]
if new_row_ids:
result_rows = ['row_{}'.format(i) for i in new_row_ids]
# result_rows = ['row_{}'.format(max(new_row_ids))]
else:
result_rows = []
return result_rows
def previous(self, rows):
"""Select all the rows that is right above the given rows respectively."""
assert rows
assert rows[0][:4] == 'row_'
row_ids = [int(row_str[4:]) for row_str in rows]
new_row_ids = [(i - 1) for i in row_ids if i - 1 >= 0]
if new_row_ids:
result_rows = ['row_{}'.format(i) for i in new_row_ids]
# result_rows = ['row_{}'.format(min(new_row_ids))]
else:
result_rows = []
return result_rows
def autocomplete_next(self, exp, tokens, token_vals):
"""Autocompletion for next function."""
l = len(exp)
if l == 1:
# If there are any non-empty result, then it is available.
valid_tks = []
for tk, val in zip(tokens, token_vals):
if len(val) > 0 and tk != 'all_rows' and self.next(val):
valid_tks.append(tk)
else:
raise ValueError('Wrong length: {}.'.format(l))
return valid_tks
def autocomplete_previous(self, exp, tokens, token_vals):
"""Autocompletion for previous function."""
l = len(exp)
if l == 1:
# If there are any non-empty result, then it is available.
valid_tks = []
for tk, val in zip(tokens, token_vals):
if len(val) > 0 and tk != 'all_rows' and self.previous(val):
valid_tks.append(tk)
else:
raise ValueError('Wrong length: {}.'.format(l))
return valid_tks
# First and last
def first(self, rows):
"""Take the first row (the one with minimum index) in all the rows."""
assert len(rows) > 1
assert rows[0][:4] == 'row_'
# Return the row with the smallest id.
row_ids = [int(row_str[4:]) for row_str in rows]
result_row_id = min(row_ids)
result_rows = ['row_{}'.format(result_row_id)]
return result_rows
def last(self, rows):
"""Take the last row (the one with maximum index) in all the rows."""
assert len(rows) > 1
assert rows[0][:4] == 'row_'
# Return the row with the largest id.
row_ids = [int(row_str[4:]) for row_str in rows]
result_row_id = max(row_ids)
result_rows = ['row_{}'.format(result_row_id)]
return result_rows
def autocomplete_first_last(self, exp, tokens, token_vals):
"""Autocompletion for both first and last."""
l = len(exp)
if l == 1:
# Only use first or last when you have more than one
# entity.
valid_tks = [tk for tk, val in zip(tokens, token_vals) if len(val) > 1]
else:
raise ValueError('Wrong length: {}.'.format(l))
return valid_tks
# Aggregation functions.
def count(self, ents):
return [len(ents)]
def maximum(self, ents, prop):
vals = self.hop(ents, prop)
return [max(vals)]
def minimum(self, ents, prop):
vals = self.hop(ents, prop)
try:
result = [min(vals)]
except Exception as e:
print(ents, prop)
raise e
return result
def mode(self, ents, prop):
"""Return the value that appears the most in the prop of the entities."""
vals = self.hop(ents, prop, keep_dup=True)
count_dict = {}
for v in vals:
if v in count_dict:
count_dict[v] += 1
else:
count_dict[v] = 1
max_count = 0
max_val_list = []
for val, count in six.iteritems(count_dict):
if count > max_count:
max_count = count
max_val_list = [val]
elif count == max_count:
max_val_list.append(val)
return max_val_list
def sum(self, ents, prop):
vals = self.hop(ents, prop, keep_dup=True)
return [sum(vals)]
def average(self, ents, prop):
vals = self.hop(ents, prop, keep_dup=True)
return [float(sum(vals)) / len(vals)]
def autocomplete_aggregation(self, exp, tokens, token_vals):
"""Autocomplete for aggregation functions."""
l = len(exp)
if l == 1:
# Only use aggregation when you have more than one
# entity, otherwise just use hop.
valid_tks = [tk for tk, val in zip(tokens, token_vals) if len(val) > 1]
else:
# For the second argument, all the props with the
# right type (ordered_property) can be used.
props = set(self.get_props(exp[1]))
valid_tks = []
for tk, val in zip(tokens, token_vals):
if val in props:
valid_tks.append(tk)
return valid_tks
def same(self, ents, prop, namespace):
"""Find the entities that has the prop as the given entity."""
# Can only work with one entity.
assert len(ents) == 1
vals_1 = self.hop(ents, prop)
all_rows = namespace['all_rows']['value']
same_ents = self.filter_equal(all_rows, vals_1, prop)
# Remove itself.
same_ents.remove(ents[0])
return same_ents
def autocomplete_same(self, exp, tokens, token_vals, namespace):
"""Autocomplete for same function."""
l = len(exp)
if l == 1:
valid_tks = [tk for tk, val in zip(tokens, token_vals) if len(val) == 1]
elif l == 2:
props = set(self.get_props(exp[1]))
valid_tks = []
for tk, val in zip(tokens, token_vals):
if val in props:
valid_tks.append(tk)
else:
raise ValueError('Wrong length {}'.format(l))
return valid_tks
def diff(self, ents_1, ents_2, prop):
"""Return the difference of two entities in prop."""
assert len(ents_1) == 1
assert len(ents_2) == 1
val_1 = self.hop(ents_1, prop)[0]
val_2 = self.hop(ents_2, prop)[0]
return [abs(val_1 - val_2)]
def autocomplete_diff(self, exp, tokens, token_vals):
"""Autocomplete for diff function."""
l = len(exp)
if l == 1:
valid_tks = [tk for tk, val in zip(tokens, token_vals) if len(val) == 1]
# There must be at least two valid variables to apply
# diff.
if len(valid_tks) < 2:
valid_tks = []
elif l == 2:
valid_tks = [
tk for tk, val in zip(tokens, token_vals)
if (len(val) == 1 and val != exp[1])
]
else:
props = set(self.get_props(exp[1]))
props = props.intersection(self.get_props(exp[2]))
valid_tks = []
for tk, val in zip(tokens, token_vals):
if val in props:
valid_tks.append(tk)
return valid_tks
def return_all_tokens(self, unused_exp, tokens, unused_token_vals):
return tokens
def get_api(self):
"""Get the functions, constants and type hierarchy."""
func_dict = collections.OrderedDict()
def hop_return_type_fn(arg1_type, arg2_type):
if arg2_type == 'num_property':
return 'num_list'
elif arg2_type == 'string_property':
return 'string_list'
elif arg2_type == 'datetime_property':
return 'datetime_list'
elif arg2_type == 'entity_property':
return 'entity_list'
else:
raise ValueError('Unknown type {}'.format(arg2_type))
func_dict['hop'] = dict(
name='hop',
args=[{
'types': ['entity_list']
}, {
'types': ['property']
}],
return_type=hop_return_type_fn,
autocomplete=self.autocomplete_hop,
type='primitive_function',
value=self.hop)
# Only use filter equal for number and date and
# entities. Use filter_str_contain for string values.
func_dict['filter_eq'] = dict(
name='filter_eq',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_filter_equal,
type='primitive_function',
value=self.filter_equal)
func_dict['filter_not_eq'] = dict(
name='filter_not_eq',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_filter_equal,
type='primitive_function',
value=self.filter_not_equal)
func_dict['argmax'] = dict(
name='argmax',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_argm,
type='primitive_function',
value=self.argmax)
func_dict['argmin'] = dict(
name='argmin',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_argm,
type='primitive_function',
value=self.argmin)
func_dict['same'] = dict(
name='same',
args=[{
'types': ['entity_list']
}, {
'types': ['property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_same,
type='global_primitive_function',
value=self.same)
func_dict['first'] = dict(
name='first',
args=[{
'types': ['entity_list']
}],
return_type='entity_list',
autocomplete=self.autocomplete_first_last,
type='primitive_function',
value=self.first)
func_dict['last'] = dict(
name='last',
args=[{
'types': ['entity_list']
}],
return_type='entity_list',
autocomplete=self.autocomplete_first_last,
type='primitive_function',
value=self.last)
func_dict['next'] = dict(
name='next',
args=[{
'types': ['entity_list']
}],
return_type='entity_list',
autocomplete=self.autocomplete_next,
type='primitive_function',
value=self.next)
func_dict['previous'] = dict(
name='previous',
args=[{
'types': ['entity_list']
}],
return_type='entity_list',
autocomplete=self.autocomplete_previous,
type='primitive_function',
value=self.previous)
func_dict['count'] = dict(
name='count',
args=[{
'types': ['entity_list']
}],
return_type='num',
autocomplete=self.return_all_tokens,
type='primitive_function',
value=self.count)
func_dict['filter_str_contain_any'] = dict(
name='filter_str_contain_any',
args=[{
'types': ['entity_list']
}, {
'types': ['string_list']
}, {
'types': ['string_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_filter_str_contain_any,
type='primitive_function',
value=self.filter_str_contain_any)
func_dict['filter_str_contain_not_any'] = dict(
name='filter_str_contain_not_any',
args=[{
'types': ['entity_list']
}, {
'types': ['string_list']
}, {
'types': ['string_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_filter_str_contain_any,
type='primitive_function',
value=self.filter_str_contain_not_any)
func_dict['filter_ge'] = dict(
name='filter_ge',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.return_all_tokens,
type='primitive_function',
value=self.filter_ge)
func_dict['filter_greater'] = dict(
name='filter_greater',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.return_all_tokens,
type='primitive_function',
value=self.filter_greater)
func_dict['filter_le'] = dict(
name='filter_le',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.return_all_tokens,
type='primitive_function',
value=self.filter_le)
func_dict['filter_less'] = dict(
name='filter_less',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.return_all_tokens,
type='primitive_function',
value=self.filter_less)
# aggregation functions.
for k, f in zip(['maximum', 'minimum'], [self.maximum, self.minimum]):
func_dict[k] = dict(
name=k,
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_property']
}],
return_type='ordered_list',
autocomplete=self.autocomplete_aggregation,
type='primitive_function',
value=f)
func_dict['mode'] = dict(
name='mode',
args=[{
'types': ['entity_list']
}, {
'types': ['property']
}],
return_type=hop_return_type_fn,
autocomplete=self.autocomplete_aggregation,
type='primitive_function',
value=self.mode)
func_dict['average'] = dict(
name='average',
args=[{
'types': ['entity_list']
}, {
'types': ['num_property']
}],
return_type='num',
autocomplete=self.autocomplete_aggregation,
type='primitive_function',
value=self.average)
func_dict['sum'] = dict(
name='sum',
args=[{
'types': ['entity_list']
}, {
'types': ['num_property']
}],
return_type='num',
autocomplete=self.autocomplete_aggregation,
type='primitive_function',
value=self.sum)
func_dict['diff'] = dict(
name='diff',
args=[{
'types': ['entity_list']
}, {
'types': ['entity_list']
}, {
'types': ['num_property']
}],
return_type='num',
autocomplete=self.autocomplete_diff,
type='primitive_function',
value=self.diff)
constant_dict = collections.OrderedDict()
for p in self.props:
if p in self.num_props:
tp = 'num_property'
elif p in self.datetime_props:
tp = 'datetime_property'
elif p.split('-')[-1] == 'entity':
tp = 'entity_property'
else:
tp = 'string_property'
constant_dict[p] = dict(value=p, type=tp, name=p)
type_hierarchy = get_simple_type_hierarchy()
return dict(
type_hierarchy=type_hierarchy,
func_dict=func_dict,
constant_dict=constant_dict)
def is_number(obj):
return isinstance(obj, int) or isinstance(obj, float)
class WikiTableExecutor(TableExecutor):
pass
class WikiSQLExecutor(TableExecutor):
def __init__(self,
table_info,
use_filter_str_contain=True,
use_filter_str_equal=False):
super(TableExecutor, self).__init__(table_info)
self.n_rows = len(table_info['row_ents'])
self.use_filter_str_equal = use_filter_str_equal
self.use_filter_str_contain = use_filter_str_contain
def hop(self, entities, prop, keep_dup=True):
"""Get the property of a list of entities."""
# Note this changes keep_dup=True as default, which is
# different from WikiTableQuestions experiments.
if keep_dup:
result = []
else:
result = set()
for ent in entities:
try:
if keep_dup:
result += self.kg[ent][prop]
else:
result = result.union(self.kg[ent][prop])
except KeyError:
continue
return list(result)
def get_api(self):
"""Get the functions, constants and type hierarchy."""
func_dict = collections.OrderedDict()
def hop_return_type_fn(arg1_type, arg2_type):
if arg2_type == 'num_property':
return 'num_list'
elif arg2_type == 'string_property':
return 'string_list'
elif arg2_type == 'datetime_property':
return 'datetime_list'
elif arg2_type == 'entity_property':
return 'entity_list'
else:
raise ValueError('Unknown type {}'.format(arg2_type))
func_dict['hop'] = dict(
name='hop',
args=[{
'types': ['entity_list']
}, {
'types': ['property']
}],
return_type=hop_return_type_fn,
autocomplete=self.autocomplete_hop,
type='primitive_function',
value=self.hop)
if self.use_filter_str_equal:
# Allow equal to work on every type.
func_dict['filter_eq'] = dict(
name='filter_eq',
args=[{
'types': ['entity_list']
}, {
'types': ['entity_list']
}, {
'types': ['property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_filter_equal,
type='primitive_function',
value=self.filter_equal)
else:
# Only use filter equal for number and date and
# entities. Use filter_str_contain for string values.
func_dict['filter_eq'] = dict(
name='filter_eq',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_filter_equal,
type='primitive_function',
value=self.filter_equal)
if self.use_filter_str_contain:
func_dict['filter_str_contain_any'] = dict(
name='filter_str_contain_any',
args=[{
'types': ['entity_list']
}, {
'types': ['string_list']
}, {
'types': ['string_property']
}],
return_type='entity_list',
autocomplete=self.autocomplete_filter_str_contain_any,
type='primitive_function',
value=self.filter_str_contain_any)
func_dict['filter_greater'] = dict(
name='filter_greater',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.return_all_tokens,
type='primitive_function',
value=self.filter_greater)
func_dict['filter_less'] = dict(
name='filter_less',
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_list']
}, {
'types': ['ordered_property']
}],
return_type='entity_list',
autocomplete=self.return_all_tokens,
type='primitive_function',
value=self.filter_less)
func_dict['count'] = dict(
name='count',
args=[{
'types': ['entity_list']
}],
return_type='num',
autocomplete=self.return_all_tokens,
type='primitive_function',
value=self.count)
# aggregation functions.
for k, f in zip(['maximum', 'minimum'], [self.maximum, self.minimum]):
func_dict[k] = dict(
name=k,
args=[{
'types': ['entity_list']
}, {
'types': ['ordered_property']
}],
return_type='ordered_list',
autocomplete=self.autocomplete_aggregation,
type='primitive_function',
value=f)
func_dict['average'] = dict(
name='average',
args=[{
'types': ['entity_list']
}, {
'types': ['num_property']
}],
return_type='num',
autocomplete=self.autocomplete_aggregation,
type='primitive_function',
value=self.average)
func_dict['sum'] = dict(
name='sum',
args=[{
'types': ['entity_list']
}, {
'types': ['num_property']
}],
return_type='num',
autocomplete=self.autocomplete_aggregation,
type='primitive_function',
value=self.sum)
constant_dict = collections.OrderedDict()
for p in self.props:
if p in self.num_props:
tp = 'num_property'
elif p in self.datetime_props:
tp = 'datetime_property'
elif p.split('-')[-1] == 'entity':
tp = 'entity_property'
else:
tp = 'string_property'
constant_dict[p] = dict(value=p, type=tp, name=p)
type_hierarchy = get_simple_type_hierarchy()
return dict(
type_hierarchy=type_hierarchy,
func_dict=func_dict,
constant_dict=constant_dict)
| apache-2.0 |
wzbozon/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
Vladimirek/DPexamples | omronDemo/py/www2plc.py | 1 | 1278 | from dataplicity.client.task import Task, onsignal
from omronTcpFins import OmronPLC
class Www2plc(Task):
"""PLC data writer"""
def pre_startup(self):
"""Called prior to running the project"""
# self.conf contains the data- constants from the conf
self.livecfg = self.conf.get('valsetconfig')
@onsignal('settings_update', 'valueset')
def on_settings_update(self, name, settings):
"""Catches the 'settings_update' signal for 'valueset'"""
# This signal is sent on startup and whenever settings are changed by the server
self.plcip = settings.get(self.livecfg, 'splcip')
self.plcport = settings.get_integer(self.livecfg, 'splcport', 9600)
self.memadr = settings.get(self.livecfg, 'smemaddr', "A0")
self.savevalue = settings.get_float(self.livecfg, 'savevalue', 0.0)
self.log.debug(" SettingValue updated: valueset {}:{}".format(self.memadr, self.savevalue))
#write data to Omron PLC:
plc = OmronPLC( )
plc.openFins( self.plcip, self.plcport)
plc.writeFloat( self.memadr, self.savevalue)
plc.close()
def poll(self):
"""Called on a schedule defined in dataplicity.conf"""
pass #nothing to do regullary
| mit |
bitifirefly/edx-platform | lms/djangoapps/mobile_api/course_info/views.py | 80 | 3082 | """
Views for course info API
"""
from django.http import Http404
from rest_framework import generics
from rest_framework.response import Response
from courseware.courses import get_course_info_section_module
from static_replace import make_static_urls_absolute, replace_static_urls
from openedx.core.lib.xblock_utils import get_course_update_items
from ..utils import mobile_view, mobile_course_access
@mobile_view()
class CourseUpdatesList(generics.ListAPIView):
"""
**Use Case**
Get the content for course updates.
**Example Request**
GET /api/mobile/v0.5/course_info/{organization}/{course_number}/{course_run}/updates
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with an array of course updates. Each course update
contains the following values.
* content: The content, as an HTML string, of the course update.
* date: The date of the course update.
* id: The unique identifier of the update.
* status: Whether the update is visible or not.
"""
@mobile_course_access()
def list(self, request, course, *args, **kwargs):
course_updates_module = get_course_info_section_module(request, course, 'updates')
update_items = get_course_update_items(course_updates_module)
updates_to_show = [
update for update in update_items
if update.get("status") != "deleted"
]
for item in updates_to_show:
content = item['content']
content = replace_static_urls(
content,
course_id=course.id,
static_asset_path=course.static_asset_path)
item['content'] = make_static_urls_absolute(request, content)
return Response(updates_to_show)
@mobile_view()
class CourseHandoutsList(generics.ListAPIView):
"""
**Use Case**
Get the HTML for course handouts.
**Example Request**
GET /api/mobile/v0.5/course_info/{organization}/{course_number}/{course_run}/handouts
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with the following value.
* handouts_html: The HTML for course handouts.
"""
@mobile_course_access()
def list(self, request, course, *args, **kwargs):
course_handouts_module = get_course_info_section_module(request, course, 'handouts')
if course_handouts_module:
handouts_html = course_handouts_module.data
handouts_html = replace_static_urls(
handouts_html,
course_id=course.id,
static_asset_path=course.static_asset_path)
handouts_html = make_static_urls_absolute(self.request, handouts_html)
return Response({'handouts_html': handouts_html})
else:
# course_handouts_module could be None if there are no handouts
raise Http404(u"No handouts for {}".format(unicode(course.id)))
| agpl-3.0 |
ravindrapanda/tensorflow | tensorflow/contrib/tensorboard/plugins/__init__.py | 148 | 1069 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensorboard plugins module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.tensorboard.plugins
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.contrib.tensorboard.plugins import trace
| apache-2.0 |
lanyuwen/openthread | tools/harness-automation/cases_R140/router_9_2_12.py | 18 | 1879 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Router_9_2_12(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '9 2 12'
golden_devices_required = 3
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
deparkes/gds2ecp | seidel.py | 1 | 21210 | #
# Poly2Tri
# Copyright (c) 2009, Mason Green
# http://code.google.com/p/poly2tri/
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# self list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# self list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# Neither the name of Poly2Tri nor the names of its contributors may be
# used to endorse or promote products derived from self software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from random import shuffle
from math import atan2
##
## Based on Raimund Seidel'e paper "A simple and fast incremental randomized
## algorithm for computing trapezoidal decompositions and for triangulating polygons"
## (Ported from poly2tri)
##
# Shear transform. May effect numerical robustness
SHEAR = 1e-3
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.next, self.prev = None, None
def __sub__(self, other):
if isinstance(other, Point):
return Point(self.x - other.x, self.y - other.y)
else:
return Point(self.x - other, self.y - other)
def __add__(self, other):
if isinstance(other, Point):
return Point(self.x + other.x, self.y + other.y)
else:
return Point(self.x + other, self.y + other)
def __mul__(self, f):
return Point(self.x * f, self.y * f)
def __div__(self, a):
return Point(self.x / a, self.y / a)
def cross(self, p):
return self.x * p.y - self.y * p.x
def dot(self, p):
return self.x * p.x + self.y * p.y
def length(self):
return sqrt(self.x * self.x + self.y * self.y)
def normalize(self):
return self / self.length()
def less(self, p):
return self.x < p.x
def neq(self, other):
return other.x != self.x or other.y != self.y
def clone(self):
return Point(self.x, self.y)
def orient2d(pa, pb, pc):
acx = pa.x - pc.x;
bcx = pb.x - pc.x;
acy = pa.y - pc.y;
bcy = pb.y - pc.y;
return acx * bcy - acy * bcx;
class Edge(object):
def __init__(self, p, q):
self.p = p
self.q = q
self.slope = (q.y - p.y) / (q.x - p.x) if q.x - p.x != 0 else 0
self.b = p.y - (p.x * self.slope)
self.above, self.below = None, None
self.mpoints = [p, q]
def is_above(self, point):
return orient2d(self.p, self.q, point) < 0
def is_below(self, point):
return orient2d(self.p, self.q, point) > 0
def add_mpoint(self, point):
for mp in self.mpoints:
if not mp.neq(point): return
self.mpoints.append(point)
class Trapezoid(object):
def __init__(self, left_point, right_point, top, bottom):
self.left_point = left_point
self.right_point = right_point
self.top = top
self.bottom = bottom
self.upper_left = None
self.upper_right = None
self.lower_left = None
self.lower_right = None
self.inside = True
self.sink = None
self.key = hash(self)
def update_left(self, ul, ll):
self.upper_left = ul
if ul != None: ul.upper_right = self
self.lower_left = ll
if ll != None: ll.lower_right = self
def update_right(self, ur, lr):
self.upper_right = ur
if ur != None: ur.upper_left = self
self.lower_right = lr
if lr != None: lr.lower_left = self
def update_left_right(self, ul, ll, ur, lr):
self.upper_left = ul
if ul != None: ul.upper_right = self
self.lower_left = ll
if ll != None: ll.lower_right = self
self.upper_right = ur
if ur != None: ur.upper_left = self
self.lower_right = lr
if lr != None: lr.lower_left = self
def trim_neighbors(self):
if self.inside:
self.inside = False
if self.upper_left != None: self.upper_left.trim_neighbors()
if self.lower_left != None: self.lower_left.trim_neighbors()
if self.upper_right != None: self.upper_right.trim_neighbors()
if self.lower_right != None: self.lower_right.trim_neighbors()
def contains(self, point):
return (point.x > self.left_point.x and point.x < self.right_point.x and
self.top.is_above(point) and self.bottom.is_below(point))
def vertices(self):
v1 = line_intersect(self.top, self.left_point.x)
v2 = line_intersect(self.bottom, self.left_point.x)
v3 = line_intersect(self.bottom, self.right_point.x)
v4 = line_intersect(self.top, self.right_point.x)
return v1, v2, v3, v4
def add_points(self):
if self.left_point is not self.bottom.p:
self.bottom.add_mpoint(self.left_point)
if self.right_point is not self.bottom.q:
self.bottom.add_mpoint(self.right_point)
if self.left_point is not self.top.p:
self.top.add_mpoint(self.left_point)
if self.right_point is not self.top.q:
self.top.add_mpoint(self.right_point)
def area(self):
p = list(self.vertices())
x0 = p[0][0]; y0 = p[0][1]
x1 = p[1][0]; y1 = p[1][1]
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in self.segments(p)))
def segments(self, p):
return zip(p, p[1:] + [p[0]])
def line_intersect(edge, x):
y = edge.slope * x + edge.b
return x, y
class Triangulator(object):
##
## Number of points should be > 3
##
def __init__(self, poly_line):
self.polygons = []
self.trapezoids = []
self.xmono_poly = []
self.edge_list = self.init_edges(poly_line)
self.trapezoidal_map = TrapezoidalMap()
self.bounding_box = self.trapezoidal_map.bounding_box(self.edge_list)
self.query_graph = QueryGraph(isink(self.bounding_box))
self.process()
def triangles(self):
triangles = []
for p in self.polygons:
verts = []
for v in p:
verts.append((v.x, v.y))
triangles.append(verts)
return triangles
def trapezoid_map(self):
return self.trapezoidal_map.map
# Build the trapezoidal map and query graph
def process(self):
for edge in self.edge_list:
traps = self.query_graph.follow_edge(edge)
for t in traps:
# Remove old trapezods
del self.trapezoidal_map.map[t.key]
# Bisect old trapezoids and create new
cp = t.contains(edge.p)
cq = t.contains(edge.q)
if cp and cq:
tlist = self.trapezoidal_map.case1(t, edge)
self.query_graph.case1(t.sink, edge, tlist)
elif cp and not cq:
tlist = self.trapezoidal_map.case2(t, edge)
self.query_graph.case2(t.sink, edge, tlist)
elif not cp and not cq:
tlist = self.trapezoidal_map.case3(t, edge)
self.query_graph.case3(t.sink, edge, tlist)
else:
tlist = self.trapezoidal_map.case4(t, edge)
self.query_graph.case4(t.sink, edge, tlist)
# Add new trapezoids to map
for t in tlist:
self.trapezoidal_map.map[t.key] = t
self.trapezoidal_map.clear()
# Mark outside trapezoids w/ depth-first search
for k, t in self.trapezoidal_map.map.items():
self.mark_outside(t)
# Collect interior trapezoids
for k, t in self.trapezoidal_map.map.items():
if t.inside:
self.trapezoids.append(t)
t.add_points()
# Generate the triangles
self.create_mountains()
def mono_polies(self):
polies = []
for x in self.xmono_poly:
polies.append(x.monoPoly)
return polies
def create_mountains(self):
for edge in self.edge_list:
if len(edge.mpoints) > 2:
mountain = MonotoneMountain()
points = merge_sort(edge.mpoints)
for p in points:
mountain.add(p)
mountain.process()
for t in mountain.triangles:
self.polygons.append(t)
self.xmono_poly.append(mountain)
def mark_outside(self, t):
if t.top is self.bounding_box.top or t.bottom is self.bounding_box.bottom:
t.trim_neighbors()
def init_edges(self, points):
edge_list = []
size = len(points)
for i in range(size):
j = i + 1 if i < size-1 else 0
p = points[i][0], points[i][1]
q = points[j][0], points[j][1]
edge_list.append((p, q))
return self.order_edges(edge_list)
def order_edges(self, edge_list):
edges = []
for e in edge_list:
p = shear_transform(e[0])
q = shear_transform(e[1])
if p.x > q.x:
edges.append(Edge(q, p))
else:
edges.append(Edge(p, q))
# Randomized incremental algorithm
shuffle(edges)
return edges
def shear_transform(point):
return Point(point[0] + SHEAR * point[1], point[1])
def merge_sort(l):
if len(l)>1 :
lleft = merge_sort(l[:len(l)/2])
lright = merge_sort(l[len(l)/2:])
p1, p2, p = 0, 0, 0
while p1<len(lleft) and p2<len(lright):
if lleft[p1].x < lright[p2].x:
l[p]=lleft[p1]
p+=1
p1+=1
else:
l[p]=lright[p2]
p+=1
p2+=1
if p1<len(lleft):l[p:]=lleft[p1:]
elif p2<len(lright):l[p:]=lright[p2:]
else : print "internal error"
return l
class TrapezoidalMap(object):
def __init__(self):
self.map = {}
self.margin = 50.0
self.bcross = None
self.tcross = None
def clear(self):
self.bcross = None
self.tcross = None
def case1(self, t, e):
trapezoids = []
trapezoids.append(Trapezoid(t.left_point, e.p, t.top, t.bottom))
trapezoids.append(Trapezoid(e.p, e.q, t.top, e))
trapezoids.append(Trapezoid(e.p, e.q, e, t.bottom))
trapezoids.append(Trapezoid(e.q, t.right_point, t.top, t.bottom))
trapezoids[0].update_left(t.upper_left, t.lower_left)
trapezoids[1].update_left_right(trapezoids[0], None, trapezoids[3], None)
trapezoids[2].update_left_right(None, trapezoids[0], None, trapezoids[3])
trapezoids[3].update_right(t.upper_right, t.lower_right)
return trapezoids
def case2(self, t, e):
rp = e.q if e.q.x == t.right_point.x else t.right_point
trapezoids = []
trapezoids.append(Trapezoid(t.left_point, e.p, t.top, t.bottom))
trapezoids.append(Trapezoid(e.p, rp, t.top, e))
trapezoids.append(Trapezoid(e.p, rp, e, t.bottom))
trapezoids[0].update_left(t.upper_left, t.lower_left)
trapezoids[1].update_left_right(trapezoids[0], None, t.upper_right, None)
trapezoids[2].update_left_right(None, trapezoids[0], None, t.lower_right)
self.bcross = t.bottom
self.tcross = t.top
e.above = trapezoids[1]
e.below = trapezoids[2]
return trapezoids
def case3(self, t, e):
lp = e.p if e.p.x == t.left_point.x else t.left_point
rp = e.q if e.q.x == t.right_point.x else t.right_point
trapezoids = []
if self.tcross is t.top:
trapezoids.append(t.upper_left)
trapezoids[0].update_right(t.upper_right, None)
trapezoids[0].right_point = rp
else:
trapezoids.append(Trapezoid(lp, rp, t.top, e))
trapezoids[0].update_left_right(t.upper_left, e.above, t.upper_right, None)
if self.bcross is t.bottom:
trapezoids.append(t.lower_left)
trapezoids[1].update_right(None, t.lower_right)
trapezoids[1].right_point = rp
else:
trapezoids.append(Trapezoid(lp, rp, e, t.bottom))
trapezoids[1].update_left_right(e.below, t.lower_left, None, t.lower_right)
self.bcross = t.bottom
self.tcross = t.top
e.above = trapezoids[0]
e.below = trapezoids[1]
return trapezoids
def case4(self, t, e):
lp = e.p if e.p.x == t.left_point.x else t.left_point
trapezoids = []
if self.tcross is t.top:
trapezoids.append(t.upper_left)
trapezoids[0].right_point = e.q
else:
trapezoids.append(Trapezoid(lp, e.q, t.top, e))
trapezoids[0].update_left(t.upper_left, e.above)
if self.bcross is t.bottom:
trapezoids.append(t.lower_left)
trapezoids[1].right_point = e.q
else:
trapezoids.append(Trapezoid(lp, e.q, e, t.bottom))
trapezoids[1].update_left(e.below, t.lower_left)
trapezoids.append(Trapezoid(e.q, t.right_point, t.top, t.bottom))
trapezoids[2].update_left_right(trapezoids[0], trapezoids[1], t.upper_right, t.lower_right)
return trapezoids
def bounding_box(self, edges):
margin = self.margin
max = edges[0].p + margin
min = edges[0].q - margin
for e in edges:
if e.p.x > max.x: max = Point(e.p.x + margin, max.y)
if e.p.y > max.y: max = Point(max.x, e.p.y + margin)
if e.q.x > max.x: max = Point(e.q.x + margin, max.y)
if e.q.y > max.y: max = Point(max.x, e.q.y + margin)
if e.p.x < min.x: min = Point(e.p.x - margin, min.y)
if e.p.y < min.y: min = Point(min.x, e.p.y - margin)
if e.q.x < min.x: min = Point(e.q.x - margin, min.y)
if e.q.y < min.y: min = Point(min.x, e.q.y - margin)
top = Edge(Point(min.x, max.y), Point(max.x, max.y))
bottom = Edge(Point(min.x, min.y), Point(max.x, min.y))
left = top.p
right = top.q
trap = Trapezoid(left, right, top, bottom)
self.map[trap.key] = trap
return trap
class Node(object):
def __init__(self, lchild, rchild):
self.parent_list = []
self.lchild = lchild
self.rchild = rchild
if lchild != None:
lchild.parent_list.append(self)
if rchild != None:
rchild.parent_list.append(self)
def replace(self, node):
for parent in node.parent_list:
if parent.lchild is node:
parent.lchild = self
else:
parent.rchild = self
self.parent_list += node.parent_list
class Sink(Node):
def __init__(self, trapezoid):
super(Sink, self).__init__(None, None)
self.trapezoid = trapezoid
trapezoid.sink = self
def locate(self, edge):
return self
def isink(trapezoid):
if trapezoid.sink is None:
return Sink(trapezoid)
return trapezoid.sink
class XNode(Node):
def __init__(self, point, lchild, rchild):
super(XNode, self).__init__(lchild, rchild)
self.point = point
def locate(self, edge):
if edge.p.x >= self.point.x:
return self.rchild.locate(edge)
return self.lchild.locate(edge)
class YNode(Node):
def __init__(self, edge, lchild, rchild):
super(YNode, self).__init__(lchild, rchild)
self.edge = edge
def locate(self, edge):
if self.edge.is_above(edge.p):
return self.rchild.locate(edge)
if self.edge.is_below(edge.p):
return self.lchild.locate(edge)
if edge.slope < self.edge.slope:
return self.rchild.locate(edge)
return self.lchild.locate(edge)
class QueryGraph:
def __init__(self, head):
self.head = head
def locate(self, edge):
return self.head.locate(edge).trapezoid
def follow_edge(self, edge):
trapezoids = [self.locate(edge)]
while(edge.q.x > trapezoids[-1].right_point.x):
if edge.is_above(trapezoids[-1].right_point):
trapezoids.append(trapezoids[-1].upper_right)
else:
trapezoids.append(trapezoids[-1].lower_right)
return trapezoids
def replace(self, sink, node):
if sink.parent_list:
node.replace(sink)
else:
self.head = node
def case1(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[1]), isink(tlist[2]))
qNode = XNode(edge.q, yNode, isink(tlist[3]))
pNode = XNode(edge.p, isink(tlist[0]), qNode)
self.replace(sink, pNode)
def case2(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[1]), isink(tlist[2]))
pNode = XNode(edge.p, isink(tlist[0]), yNode)
self.replace(sink, pNode)
def case3(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[0]), isink(tlist[1]))
self.replace(sink, yNode)
def case4(self, sink, edge, tlist):
yNode = YNode(edge, isink(tlist[0]), isink(tlist[1]))
qNode = XNode(edge.q, yNode, isink(tlist[2]))
self.replace(sink, qNode)
PI_SLOP = 3.1
class MonotoneMountain:
def __init__(self):
self.size = 0
self.tail = None
self.head = None
self.positive = False
self.convex_points = set()
self.mono_poly = []
self.triangles = []
self.convex_polies = []
def add(self, point):
if self.size is 0:
self.head = point
self.size = 1
elif self.size is 1:
self.tail = point
self.tail.prev = self.head
self.head.next = self.tail
self.size = 2
else:
self.tail.next = point
point.prev = self.tail
self.tail = point
self.size += 1
def remove(self, point):
next = point.next
prev = point.prev
point.prev.next = next
point.next.prev = prev
self.size -= 1
def process(self):
self.positive = self.angle_sign()
self.gen_mono_poly()
p = self.head.next
while p.neq(self.tail):
a = self.angle(p)
if a >= PI_SLOP or a <= -PI_SLOP or a == 0:
self.remove(p)
elif self.is_convex(p):
self.convex_points.add(p)
p = p.next
self.triangulate()
def triangulate(self):
while self.convex_points:
ear = self.convex_points.pop()
a = ear.prev
b = ear
c = ear.next
triangle = (a, b, c)
self.triangles.append(triangle)
self.remove(ear)
if self.valid(a):
self.convex_points.add(a)
if self.valid(c):
self.convex_points.add(c)
#assert self.size <= 3, "Triangulation bug, please report"
def valid(self, p):
return p.neq(self.head) and p.neq(self.tail) and self.is_convex(p)
def gen_mono_poly(self):
p = self.head
while(p != None):
self.mono_poly.append(p)
p = p.next
def angle(self, p):
a = p.next - p
b = p.prev - p
return atan2(a.cross(b), a.dot(b))
def angle_sign(self):
a = self.head.next - self.head
b = self.tail - self.head
return atan2(a.cross(b), a.dot(b)) >= 0
def is_convex(self, p):
if self.positive != (self.angle(p) >= 0):
return False
return True | gpl-2.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/test/test_uuid.py | 14 | 22527 | import unittest
from test import test_support
import io
import os
import uuid
def importable(name):
try:
__import__(name)
return True
except:
return False
class TestUUID(unittest.TestCase):
last_node = None
source2node = {}
def test_UUID(self):
equal = self.assertEqual
ascending = []
for (string, curly, hex, bytes, bytes_le, fields, integer, urn,
time, clock_seq, variant, version) in [
('00000000-0000-0000-0000-000000000000',
'{00000000-0000-0000-0000-000000000000}',
'00000000000000000000000000000000',
'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0',
'\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0',
(0, 0, 0, 0, 0, 0),
0,
'urn:uuid:00000000-0000-0000-0000-000000000000',
0, 0, uuid.RESERVED_NCS, None),
('00010203-0405-0607-0809-0a0b0c0d0e0f',
'{00010203-0405-0607-0809-0a0b0c0d0e0f}',
'000102030405060708090a0b0c0d0e0f',
'\0\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\x0d\x0e\x0f',
'\x03\x02\x01\0\x05\x04\x07\x06\x08\t\n\x0b\x0c\x0d\x0e\x0f',
(0x00010203L, 0x0405, 0x0607, 8, 9, 0x0a0b0c0d0e0fL),
0x000102030405060708090a0b0c0d0e0fL,
'urn:uuid:00010203-0405-0607-0809-0a0b0c0d0e0f',
0x607040500010203L, 0x809, uuid.RESERVED_NCS, None),
('02d9e6d5-9467-382e-8f9b-9300a64ac3cd',
'{02d9e6d5-9467-382e-8f9b-9300a64ac3cd}',
'02d9e6d59467382e8f9b9300a64ac3cd',
'\x02\xd9\xe6\xd5\x94\x67\x38\x2e\x8f\x9b\x93\x00\xa6\x4a\xc3\xcd',
'\xd5\xe6\xd9\x02\x67\x94\x2e\x38\x8f\x9b\x93\x00\xa6\x4a\xc3\xcd',
(0x02d9e6d5L, 0x9467, 0x382e, 0x8f, 0x9b, 0x9300a64ac3cdL),
0x02d9e6d59467382e8f9b9300a64ac3cdL,
'urn:uuid:02d9e6d5-9467-382e-8f9b-9300a64ac3cd',
0x82e946702d9e6d5L, 0xf9b, uuid.RFC_4122, 3),
('12345678-1234-5678-1234-567812345678',
'{12345678-1234-5678-1234-567812345678}',
'12345678123456781234567812345678',
'\x12\x34\x56\x78'*4,
'\x78\x56\x34\x12\x34\x12\x78\x56\x12\x34\x56\x78\x12\x34\x56\x78',
(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678),
0x12345678123456781234567812345678,
'urn:uuid:12345678-1234-5678-1234-567812345678',
0x678123412345678L, 0x1234, uuid.RESERVED_NCS, None),
('6ba7b810-9dad-11d1-80b4-00c04fd430c8',
'{6ba7b810-9dad-11d1-80b4-00c04fd430c8}',
'6ba7b8109dad11d180b400c04fd430c8',
'\x6b\xa7\xb8\x10\x9d\xad\x11\xd1\x80\xb4\x00\xc0\x4f\xd4\x30\xc8',
'\x10\xb8\xa7\x6b\xad\x9d\xd1\x11\x80\xb4\x00\xc0\x4f\xd4\x30\xc8',
(0x6ba7b810L, 0x9dad, 0x11d1, 0x80, 0xb4, 0x00c04fd430c8L),
0x6ba7b8109dad11d180b400c04fd430c8L,
'urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8',
0x1d19dad6ba7b810L, 0xb4, uuid.RFC_4122, 1),
('6ba7b811-9dad-11d1-80b4-00c04fd430c8',
'{6ba7b811-9dad-11d1-80b4-00c04fd430c8}',
'6ba7b8119dad11d180b400c04fd430c8',
'\x6b\xa7\xb8\x11\x9d\xad\x11\xd1\x80\xb4\x00\xc0\x4f\xd4\x30\xc8',
'\x11\xb8\xa7\x6b\xad\x9d\xd1\x11\x80\xb4\x00\xc0\x4f\xd4\x30\xc8',
(0x6ba7b811L, 0x9dad, 0x11d1, 0x80, 0xb4, 0x00c04fd430c8L),
0x6ba7b8119dad11d180b400c04fd430c8L,
'urn:uuid:6ba7b811-9dad-11d1-80b4-00c04fd430c8',
0x1d19dad6ba7b811L, 0xb4, uuid.RFC_4122, 1),
('6ba7b812-9dad-11d1-80b4-00c04fd430c8',
'{6ba7b812-9dad-11d1-80b4-00c04fd430c8}',
'6ba7b8129dad11d180b400c04fd430c8',
'\x6b\xa7\xb8\x12\x9d\xad\x11\xd1\x80\xb4\x00\xc0\x4f\xd4\x30\xc8',
'\x12\xb8\xa7\x6b\xad\x9d\xd1\x11\x80\xb4\x00\xc0\x4f\xd4\x30\xc8',
(0x6ba7b812L, 0x9dad, 0x11d1, 0x80, 0xb4, 0x00c04fd430c8L),
0x6ba7b8129dad11d180b400c04fd430c8L,
'urn:uuid:6ba7b812-9dad-11d1-80b4-00c04fd430c8',
0x1d19dad6ba7b812L, 0xb4, uuid.RFC_4122, 1),
('6ba7b814-9dad-11d1-80b4-00c04fd430c8',
'{6ba7b814-9dad-11d1-80b4-00c04fd430c8}',
'6ba7b8149dad11d180b400c04fd430c8',
'\x6b\xa7\xb8\x14\x9d\xad\x11\xd1\x80\xb4\x00\xc0\x4f\xd4\x30\xc8',
'\x14\xb8\xa7\x6b\xad\x9d\xd1\x11\x80\xb4\x00\xc0\x4f\xd4\x30\xc8',
(0x6ba7b814L, 0x9dad, 0x11d1, 0x80, 0xb4, 0x00c04fd430c8L),
0x6ba7b8149dad11d180b400c04fd430c8L,
'urn:uuid:6ba7b814-9dad-11d1-80b4-00c04fd430c8',
0x1d19dad6ba7b814L, 0xb4, uuid.RFC_4122, 1),
('7d444840-9dc0-11d1-b245-5ffdce74fad2',
'{7d444840-9dc0-11d1-b245-5ffdce74fad2}',
'7d4448409dc011d1b2455ffdce74fad2',
'\x7d\x44\x48\x40\x9d\xc0\x11\xd1\xb2\x45\x5f\xfd\xce\x74\xfa\xd2',
'\x40\x48\x44\x7d\xc0\x9d\xd1\x11\xb2\x45\x5f\xfd\xce\x74\xfa\xd2',
(0x7d444840L, 0x9dc0, 0x11d1, 0xb2, 0x45, 0x5ffdce74fad2L),
0x7d4448409dc011d1b2455ffdce74fad2L,
'urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2',
0x1d19dc07d444840L, 0x3245, uuid.RFC_4122, 1),
('e902893a-9d22-3c7e-a7b8-d6e313b71d9f',
'{e902893a-9d22-3c7e-a7b8-d6e313b71d9f}',
'e902893a9d223c7ea7b8d6e313b71d9f',
'\xe9\x02\x89\x3a\x9d\x22\x3c\x7e\xa7\xb8\xd6\xe3\x13\xb7\x1d\x9f',
'\x3a\x89\x02\xe9\x22\x9d\x7e\x3c\xa7\xb8\xd6\xe3\x13\xb7\x1d\x9f',
(0xe902893aL, 0x9d22, 0x3c7e, 0xa7, 0xb8, 0xd6e313b71d9fL),
0xe902893a9d223c7ea7b8d6e313b71d9fL,
'urn:uuid:e902893a-9d22-3c7e-a7b8-d6e313b71d9f',
0xc7e9d22e902893aL, 0x27b8, uuid.RFC_4122, 3),
('eb424026-6f54-4ef8-a4d0-bb658a1fc6cf',
'{eb424026-6f54-4ef8-a4d0-bb658a1fc6cf}',
'eb4240266f544ef8a4d0bb658a1fc6cf',
'\xeb\x42\x40\x26\x6f\x54\x4e\xf8\xa4\xd0\xbb\x65\x8a\x1f\xc6\xcf',
'\x26\x40\x42\xeb\x54\x6f\xf8\x4e\xa4\xd0\xbb\x65\x8a\x1f\xc6\xcf',
(0xeb424026L, 0x6f54, 0x4ef8, 0xa4, 0xd0, 0xbb658a1fc6cfL),
0xeb4240266f544ef8a4d0bb658a1fc6cfL,
'urn:uuid:eb424026-6f54-4ef8-a4d0-bb658a1fc6cf',
0xef86f54eb424026L, 0x24d0, uuid.RFC_4122, 4),
('f81d4fae-7dec-11d0-a765-00a0c91e6bf6',
'{f81d4fae-7dec-11d0-a765-00a0c91e6bf6}',
'f81d4fae7dec11d0a76500a0c91e6bf6',
'\xf8\x1d\x4f\xae\x7d\xec\x11\xd0\xa7\x65\x00\xa0\xc9\x1e\x6b\xf6',
'\xae\x4f\x1d\xf8\xec\x7d\xd0\x11\xa7\x65\x00\xa0\xc9\x1e\x6b\xf6',
(0xf81d4faeL, 0x7dec, 0x11d0, 0xa7, 0x65, 0x00a0c91e6bf6L),
0xf81d4fae7dec11d0a76500a0c91e6bf6L,
'urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6',
0x1d07decf81d4faeL, 0x2765, uuid.RFC_4122, 1),
('fffefdfc-fffe-fffe-fffe-fffefdfcfbfa',
'{fffefdfc-fffe-fffe-fffe-fffefdfcfbfa}',
'fffefdfcfffefffefffefffefdfcfbfa',
'\xff\xfe\xfd\xfc\xff\xfe\xff\xfe\xff\xfe\xff\xfe\xfd\xfc\xfb\xfa',
'\xfc\xfd\xfe\xff\xfe\xff\xfe\xff\xff\xfe\xff\xfe\xfd\xfc\xfb\xfa',
(0xfffefdfcL, 0xfffe, 0xfffe, 0xff, 0xfe, 0xfffefdfcfbfaL),
0xfffefdfcfffefffefffefffefdfcfbfaL,
'urn:uuid:fffefdfc-fffe-fffe-fffe-fffefdfcfbfa',
0xffefffefffefdfcL, 0x3ffe, uuid.RESERVED_FUTURE, None),
('ffffffff-ffff-ffff-ffff-ffffffffffff',
'{ffffffff-ffff-ffff-ffff-ffffffffffff}',
'ffffffffffffffffffffffffffffffff',
'\xff'*16,
'\xff'*16,
(0xffffffffL, 0xffffL, 0xffffL, 0xff, 0xff, 0xffffffffffffL),
0xffffffffffffffffffffffffffffffffL,
'urn:uuid:ffffffff-ffff-ffff-ffff-ffffffffffff',
0xfffffffffffffffL, 0x3fff, uuid.RESERVED_FUTURE, None),
]:
equivalents = []
# Construct each UUID in several different ways.
for u in [uuid.UUID(string), uuid.UUID(curly), uuid.UUID(hex),
uuid.UUID(bytes=bytes), uuid.UUID(bytes_le=bytes_le),
uuid.UUID(fields=fields), uuid.UUID(int=integer),
uuid.UUID(urn)]:
# Test all conversions and properties of the UUID object.
equal(str(u), string)
equal(int(u), integer)
equal(u.bytes, bytes)
equal(u.bytes_le, bytes_le)
equal(u.fields, fields)
equal(u.time_low, fields[0])
equal(u.time_mid, fields[1])
equal(u.time_hi_version, fields[2])
equal(u.clock_seq_hi_variant, fields[3])
equal(u.clock_seq_low, fields[4])
equal(u.node, fields[5])
equal(u.hex, hex)
equal(u.int, integer)
equal(u.urn, urn)
equal(u.time, time)
equal(u.clock_seq, clock_seq)
equal(u.variant, variant)
equal(u.version, version)
equivalents.append(u)
# Different construction methods should give the same UUID.
for u in equivalents:
for v in equivalents:
equal(u, v)
ascending.append(u)
# Test comparison of UUIDs.
for i in range(len(ascending)):
for j in range(len(ascending)):
equal(cmp(i, j), cmp(ascending[i], ascending[j]))
# Test sorting of UUIDs (above list is in ascending order).
resorted = ascending[:]
resorted.reverse()
resorted.sort()
equal(ascending, resorted)
def test_exceptions(self):
badvalue = lambda f: self.assertRaises(ValueError, f)
badtype = lambda f: self.assertRaises(TypeError, f)
# Badly formed hex strings.
badvalue(lambda: uuid.UUID(''))
badvalue(lambda: uuid.UUID('abc'))
badvalue(lambda: uuid.UUID('1234567812345678123456781234567'))
badvalue(lambda: uuid.UUID('123456781234567812345678123456789'))
badvalue(lambda: uuid.UUID('123456781234567812345678z2345678'))
# Badly formed bytes.
badvalue(lambda: uuid.UUID(bytes='abc'))
badvalue(lambda: uuid.UUID(bytes='\0'*15))
badvalue(lambda: uuid.UUID(bytes='\0'*17))
# Badly formed bytes_le.
badvalue(lambda: uuid.UUID(bytes_le='abc'))
badvalue(lambda: uuid.UUID(bytes_le='\0'*15))
badvalue(lambda: uuid.UUID(bytes_le='\0'*17))
# Badly formed fields.
badvalue(lambda: uuid.UUID(fields=(1,)))
badvalue(lambda: uuid.UUID(fields=(1, 2, 3, 4, 5)))
badvalue(lambda: uuid.UUID(fields=(1, 2, 3, 4, 5, 6, 7)))
# Field values out of range.
badvalue(lambda: uuid.UUID(fields=(-1, 0, 0, 0, 0, 0)))
badvalue(lambda: uuid.UUID(fields=(0x100000000L, 0, 0, 0, 0, 0)))
badvalue(lambda: uuid.UUID(fields=(0, -1, 0, 0, 0, 0)))
badvalue(lambda: uuid.UUID(fields=(0, 0x10000L, 0, 0, 0, 0)))
badvalue(lambda: uuid.UUID(fields=(0, 0, -1, 0, 0, 0)))
badvalue(lambda: uuid.UUID(fields=(0, 0, 0x10000L, 0, 0, 0)))
badvalue(lambda: uuid.UUID(fields=(0, 0, 0, -1, 0, 0)))
badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0x100L, 0, 0)))
badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0, -1, 0)))
badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0, 0x100L, 0)))
badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0, 0, -1)))
badvalue(lambda: uuid.UUID(fields=(0, 0, 0, 0, 0, 0x1000000000000L)))
# Version number out of range.
badvalue(lambda: uuid.UUID('00'*16, version=0))
badvalue(lambda: uuid.UUID('00'*16, version=6))
# Integer value out of range.
badvalue(lambda: uuid.UUID(int=-1))
badvalue(lambda: uuid.UUID(int=1<<128L))
# Must supply exactly one of hex, bytes, fields, int.
h, b, f, i = '00'*16, '\0'*16, (0, 0, 0, 0, 0, 0), 0
uuid.UUID(h)
uuid.UUID(hex=h)
uuid.UUID(bytes=b)
uuid.UUID(bytes_le=b)
uuid.UUID(fields=f)
uuid.UUID(int=i)
# Wrong number of arguments (positional).
badtype(lambda: uuid.UUID())
badtype(lambda: uuid.UUID(h, b))
badtype(lambda: uuid.UUID(h, b, b))
badtype(lambda: uuid.UUID(h, b, b, f))
badtype(lambda: uuid.UUID(h, b, b, f, i))
# Duplicate arguments.
for hh in [[], [('hex', h)]]:
for bb in [[], [('bytes', b)]]:
for bble in [[], [('bytes_le', b)]]:
for ii in [[], [('int', i)]]:
for ff in [[], [('fields', f)]]:
args = dict(hh + bb + bble + ii + ff)
if len(args) != 0:
badtype(lambda: uuid.UUID(h, **args))
if len(args) != 1:
badtype(lambda: uuid.UUID(**args))
# Immutability.
u = uuid.UUID(h)
badtype(lambda: setattr(u, 'hex', h))
badtype(lambda: setattr(u, 'bytes', b))
badtype(lambda: setattr(u, 'bytes_le', b))
badtype(lambda: setattr(u, 'fields', f))
badtype(lambda: setattr(u, 'int', i))
badtype(lambda: setattr(u, 'time_low', 0))
badtype(lambda: setattr(u, 'time_mid', 0))
badtype(lambda: setattr(u, 'time_hi_version', 0))
badtype(lambda: setattr(u, 'time_hi_version', 0))
badtype(lambda: setattr(u, 'clock_seq_hi_variant', 0))
badtype(lambda: setattr(u, 'clock_seq_low', 0))
badtype(lambda: setattr(u, 'node', 0))
def check_node(self, node, source):
message = "%012x is not an RFC 4122 node ID" % node
self.assertTrue(0 < node, message)
self.assertTrue(node < (1L << 48), message)
TestUUID.source2node[source] = node
if TestUUID.last_node:
if TestUUID.last_node != node:
msg = "different sources disagree on node:\n"
for s, n in TestUUID.source2node.iteritems():
msg += " from source %r, node was %012x\n" % (s, n)
# There's actually no reason to expect the MAC addresses
# to agree across various methods -- e.g., a box may have
# multiple network interfaces, and different ways of getting
# a MAC address may favor different HW.
##self.fail(msg)
else:
TestUUID.last_node = node
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_ifconfig_getnode(self):
node = uuid._ifconfig_getnode()
if node is not None:
self.check_node(node, 'ifconfig')
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_arp_getnode(self):
node = uuid._arp_getnode()
if node is not None:
self.check_node(node, 'arp')
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_lanscan_getnode(self):
node = uuid._lanscan_getnode()
if node is not None:
self.check_node(node, 'lanscan')
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_netstat_getnode(self):
node = uuid._netstat_getnode()
if node is not None:
self.check_node(node, 'netstat')
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_ipconfig_getnode(self):
node = uuid._ipconfig_getnode()
if node is not None:
self.check_node(node, 'ipconfig')
@unittest.skipUnless(importable('win32wnet'), 'requires win32wnet')
@unittest.skipUnless(importable('netbios'), 'requires netbios')
def test_netbios_getnode(self):
self.check_node(uuid._netbios_getnode(), 'netbios')
def test_random_getnode(self):
node = uuid._random_getnode()
# Least significant bit of first octet must be set.
self.assertTrue(node & 0x010000000000)
self.assertTrue(node < (1L << 48))
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
@unittest.skipUnless(importable('ctypes'), 'requires ctypes')
def test_unixdll_getnode(self):
try: # Issues 1481, 3581: _uuid_generate_time() might be None.
self.check_node(uuid._unixdll_getnode(), 'unixdll')
except TypeError:
pass
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
@unittest.skipUnless(importable('ctypes'), 'requires ctypes')
def test_windll_getnode(self):
self.check_node(uuid._windll_getnode(), 'windll')
def test_getnode(self):
node1 = uuid.getnode()
self.check_node(node1, "getnode1")
# Test it again to ensure consistency.
node2 = uuid.getnode()
self.check_node(node2, "getnode2")
self.assertEqual(node1, node2)
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_find_mac(self):
data = '''\
fake hwaddr
cscotun0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00
eth0 Link encap:Ethernet HWaddr 12:34:56:78:90:ab
'''
def mock_popen(cmd):
return io.BytesIO(data)
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
path.extend(('/sbin', '/usr/sbin'))
for dir in path:
executable = os.path.join(dir, 'ifconfig')
if (os.path.exists(executable) and
os.access(executable, os.F_OK | os.X_OK) and
not os.path.isdir(executable)):
break
else:
self.skipTest('requires ifconfig')
with test_support.swap_attr(os, 'popen', mock_popen):
mac = uuid._find_mac(
command='ifconfig',
args='',
hw_identifiers=['hwaddr'],
get_index=lambda x: x + 1,
)
self.assertEqual(mac, 0x1234567890ab)
@unittest.skipUnless(importable('ctypes'), 'requires ctypes')
def test_uuid1(self):
equal = self.assertEqual
# Make sure uuid1() generates UUIDs that are actually version 1.
for u in [uuid.uuid1() for i in range(10)]:
equal(u.variant, uuid.RFC_4122)
equal(u.version, 1)
# Make sure the generated UUIDs are actually unique.
uuids = {}
for u in [uuid.uuid1() for i in range(1000)]:
uuids[u] = 1
equal(len(uuids.keys()), 1000)
# Make sure the supplied node ID appears in the UUID.
u = uuid.uuid1(0)
equal(u.node, 0)
u = uuid.uuid1(0x123456789abc)
equal(u.node, 0x123456789abc)
u = uuid.uuid1(0xffffffffffff)
equal(u.node, 0xffffffffffff)
# Make sure the supplied clock sequence appears in the UUID.
u = uuid.uuid1(0x123456789abc, 0)
equal(u.node, 0x123456789abc)
equal(((u.clock_seq_hi_variant & 0x3f) << 8) | u.clock_seq_low, 0)
u = uuid.uuid1(0x123456789abc, 0x1234)
equal(u.node, 0x123456789abc)
equal(((u.clock_seq_hi_variant & 0x3f) << 8) |
u.clock_seq_low, 0x1234)
u = uuid.uuid1(0x123456789abc, 0x3fff)
equal(u.node, 0x123456789abc)
equal(((u.clock_seq_hi_variant & 0x3f) << 8) |
u.clock_seq_low, 0x3fff)
def test_uuid3(self):
equal = self.assertEqual
# Test some known version-3 UUIDs.
for u, v in [(uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org'),
'6fa459ea-ee8a-3ca4-894e-db77e160355e'),
(uuid.uuid3(uuid.NAMESPACE_URL, 'http://python.org/'),
'9fe8e8c4-aaa8-32a9-a55c-4535a88b748d'),
(uuid.uuid3(uuid.NAMESPACE_OID, '1.3.6.1'),
'dd1a1cef-13d5-368a-ad82-eca71acd4cd1'),
(uuid.uuid3(uuid.NAMESPACE_X500, 'c=ca'),
'658d3002-db6b-3040-a1d1-8ddd7d189a4d'),
]:
equal(u.variant, uuid.RFC_4122)
equal(u.version, 3)
equal(u, uuid.UUID(v))
equal(str(u), v)
@unittest.skipUnless(importable('ctypes'), 'requires ctypes')
def test_uuid4(self):
equal = self.assertEqual
# Make sure uuid4() generates UUIDs that are actually version 4.
for u in [uuid.uuid4() for i in range(10)]:
equal(u.variant, uuid.RFC_4122)
equal(u.version, 4)
# Make sure the generated UUIDs are actually unique.
uuids = {}
for u in [uuid.uuid4() for i in range(1000)]:
uuids[u] = 1
equal(len(uuids.keys()), 1000)
def test_uuid5(self):
equal = self.assertEqual
# Test some known version-5 UUIDs.
for u, v in [(uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org'),
'886313e1-3b8a-5372-9b90-0c9aee199e5d'),
(uuid.uuid5(uuid.NAMESPACE_URL, 'http://python.org/'),
'4c565f0d-3f5a-5890-b41b-20cf47701c5e'),
(uuid.uuid5(uuid.NAMESPACE_OID, '1.3.6.1'),
'1447fa61-5277-5fef-a9b3-fbc6e44f4af3'),
(uuid.uuid5(uuid.NAMESPACE_X500, 'c=ca'),
'cc957dd1-a972-5349-98cd-874190002798'),
]:
equal(u.variant, uuid.RFC_4122)
equal(u.version, 5)
equal(u, uuid.UUID(v))
equal(str(u), v)
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def testIssue8621(self):
# On at least some versions of OSX uuid.uuid4 generates
# the same sequence of UUIDs in the parent and any
# children started using fork.
fds = os.pipe()
pid = os.fork()
if pid == 0:
os.close(fds[0])
value = uuid.uuid4()
os.write(fds[1], value.hex)
os._exit(0)
else:
os.close(fds[1])
self.addCleanup(os.close, fds[0])
parent_value = uuid.uuid4().hex
os.waitpid(pid, 0)
child_value = os.read(fds[0], 100)
self.assertNotEqual(parent_value, child_value)
def test_main():
test_support.run_unittest(TestUUID)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
deepfield/ibis | ibis/sql/postgres/tests/conftest.py | 1 | 1926 | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import ibis
PG_USER = os.environ.get(
'IBIS_TEST_POSTGRES_USER',
os.environ.get('PGUSER', 'postgres')
)
PG_PASS = os.environ.get(
'IBIS_TEST_POSTGRES_PASSWORD',
os.environ.get('PGPASSWORD', 'postgres')
)
PG_HOST = os.environ.get(
'IBIS_TEST_POSTGRES_HOST',
os.environ.get('PGHOST', 'localhost')
)
PG_PORT = os.environ.get(
'IBIS_TEST_POSTGRES_PORT',
os.environ.get('PGPORT', 5432)
)
IBIS_TEST_POSTGRES_DB = os.environ.get(
'IBIS_TEST_POSTGRES_DATABASE',
os.environ.get('PGDATABASE', 'ibis_testing')
)
@pytest.fixture(scope='module')
def con():
return ibis.postgres.connect(
host=PG_HOST,
user=PG_USER,
password=PG_PASS,
database=IBIS_TEST_POSTGRES_DB,
port=PG_PORT,
)
@pytest.fixture(scope='module')
def db(con):
return con.database()
@pytest.fixture(scope='module')
def alltypes(db):
return db.functional_alltypes
@pytest.fixture(scope='module')
def df(alltypes):
return alltypes.execute()
@pytest.fixture(scope='module')
def at(alltypes):
return alltypes.op().sqla_table
@pytest.fixture
def translate():
from ibis.sql.postgres.compiler import PostgreSQLDialect
dialect = PostgreSQLDialect()
context = dialect.make_context()
return lambda expr: dialect.translator(expr, context).get_result()
| apache-2.0 |
shlevy/ghc | utils/checkUniques/check-uniques.py | 17 | 1513 | #!/usr/bin/env python3
from __future__ import print_function
import os.path
import sys
import re
import glob
import io
from collections import defaultdict
# keyed on unique type, values are lists of (unique, name) pairs
def find_uniques(source_files):
uniques = defaultdict(lambda: defaultdict(lambda: set()))
unique_re = re.compile(r"([\w\d]+)\s*=\s*mk([\w\d']+)Unique\s+(\d+)")
for f in source_files:
ms = unique_re.findall(io.open(f, encoding='utf8').read())
for m in ms:
name = m[0]
_type = m[1]
n = int(m[2])
uniques[_type][n].add(name)
return uniques
def print_all(uniques):
for _type, uniqs in uniques.items():
print('{_type} uniques'.format(**locals()))
for n,names in uniqs.items():
all_names = ', '.join(names)
print(' {n} = {all_names}'.format(**locals()))
def find_conflicts(uniques):
return [ (uniqueType, number, names)
for uniqueType, uniqs in uniques.items()
for number, names in uniqs.items()
if len(names) > 1
]
top_dir = sys.argv[1]
uniques = find_uniques(glob.glob(os.path.join(top_dir, 'compiler', 'prelude', '*.hs')))
#print_all(uniques)
conflicts = find_conflicts(uniques)
if len(conflicts) > 0:
print("Error: check-uniques: Found Unique conflict")
print()
for (ty, n, names) in conflicts:
print(' %s unique %d conflict: %s' % (ty, n, ', '.join(names)))
print()
sys.exit(1)
| bsd-3-clause |
nuagenetworks/vspk-python | vspk/v6/nubridgeinterface.py | 1 | 24029 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUTCAsFetcher
from .fetchers import NURedirectionTargetsFetcher
from .fetchers import NUDeploymentFailuresFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUDHCPOptionsFetcher
from .fetchers import NUDHCPv6OptionsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUPolicyDecisionsFetcher
from .fetchers import NUPolicyGroupsFetcher
from .fetchers import NUQOSsFetcher
from .fetchers import NUStatisticsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUBridgeInterface(NURESTObject):
""" Represents a BridgeInterface in the VSD
Notes:
Provides information for each bridge interface.
"""
__rest_name__ = "bridgeinterface"
__resource_name__ = "bridgeinterfaces"
## Constants
CONST_ATTACHED_NETWORK_TYPE_L2DOMAIN = "L2DOMAIN"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ATTACHED_NETWORK_TYPE_SUBNET = "SUBNET"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a BridgeInterface instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> bridgeinterface = NUBridgeInterface(id=u'xxxx-xxx-xxx-xxx', name=u'BridgeInterface')
>>> bridgeinterface = NUBridgeInterface(data=my_dict)
"""
super(NUBridgeInterface, self).__init__()
# Read/Write Attributes
self._vport_id = None
self._vport_name = None
self._ipv6_gateway = None
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._gateway = None
self._netmask = None
self._network_name = None
self._tier_id = None
self._embedded_metadata = None
self._entity_scope = None
self._policy_decision_id = None
self._domain_id = None
self._domain_name = None
self._zone_id = None
self._zone_name = None
self._creation_date = None
self._associated_floating_ip_address = None
self._attached_network_id = None
self._attached_network_type = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="vport_id", remote_name="VPortID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="vport_name", remote_name="VPortName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ipv6_gateway", remote_name="IPv6Gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway", remote_name="gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="netmask", remote_name="netmask", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="network_name", remote_name="networkName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="tier_id", remote_name="tierID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="policy_decision_id", remote_name="policyDecisionID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="domain_id", remote_name="domainID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="domain_name", remote_name="domainName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zone_id", remote_name="zoneID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zone_name", remote_name="zoneName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_floating_ip_address", remote_name="associatedFloatingIPAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="attached_network_id", remote_name="attachedNetworkID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="attached_network_type", remote_name="attachedNetworkType", attribute_type=str, is_required=False, is_unique=False, choices=[u'L2DOMAIN', u'SUBNET'])
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.tcas = NUTCAsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.redirection_targets = NURedirectionTargetsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.deployment_failures = NUDeploymentFailuresFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.dhcp_options = NUDHCPOptionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.dhcpv6_options = NUDHCPv6OptionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.policy_decisions = NUPolicyDecisionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.policy_groups = NUPolicyGroupsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.qoss = NUQOSsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.statistics = NUStatisticsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def vport_id(self):
""" Get vport_id value.
Notes:
ID of the vport that the interface is attached to
This attribute is named `VPortID` in VSD API.
"""
return self._vport_id
@vport_id.setter
def vport_id(self, value):
""" Set vport_id value.
Notes:
ID of the vport that the interface is attached to
This attribute is named `VPortID` in VSD API.
"""
self._vport_id = value
@property
def vport_name(self):
""" Get vport_name value.
Notes:
Name of the vport that the VM is attached to
This attribute is named `VPortName` in VSD API.
"""
return self._vport_name
@vport_name.setter
def vport_name(self, value):
""" Set vport_name value.
Notes:
Name of the vport that the VM is attached to
This attribute is named `VPortName` in VSD API.
"""
self._vport_name = value
@property
def ipv6_gateway(self):
""" Get ipv6_gateway value.
Notes:
IPV6 Gateway of the subnet that the Bridge is connected to
This attribute is named `IPv6Gateway` in VSD API.
"""
return self._ipv6_gateway
@ipv6_gateway.setter
def ipv6_gateway(self, value):
""" Set ipv6_gateway value.
Notes:
IPV6 Gateway of the subnet that the Bridge is connected to
This attribute is named `IPv6Gateway` in VSD API.
"""
self._ipv6_gateway = value
@property
def name(self):
""" Get name value.
Notes:
Device name associated with this interface
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Device name associated with this interface
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def gateway(self):
""" Get gateway value.
Notes:
Gateway of the subnet that the VM is connected to
"""
return self._gateway
@gateway.setter
def gateway(self, value):
""" Set gateway value.
Notes:
Gateway of the subnet that the VM is connected to
"""
self._gateway = value
@property
def netmask(self):
""" Get netmask value.
Notes:
Netmask of the subnet that the VM is attached to
"""
return self._netmask
@netmask.setter
def netmask(self, value):
""" Set netmask value.
Notes:
Netmask of the subnet that the VM is attached to
"""
self._netmask = value
@property
def network_name(self):
""" Get network_name value.
Notes:
Name of the network that the VM is attached to
This attribute is named `networkName` in VSD API.
"""
return self._network_name
@network_name.setter
def network_name(self, value):
""" Set network_name value.
Notes:
Name of the network that the VM is attached to
This attribute is named `networkName` in VSD API.
"""
self._network_name = value
@property
def tier_id(self):
""" Get tier_id value.
Notes:
ID of the tier that the interface is attached to.
This attribute is named `tierID` in VSD API.
"""
return self._tier_id
@tier_id.setter
def tier_id(self, value):
""" Set tier_id value.
Notes:
ID of the tier that the interface is attached to.
This attribute is named `tierID` in VSD API.
"""
self._tier_id = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def policy_decision_id(self):
""" Get policy_decision_id value.
Notes:
The policy decision ID for this particular interface
This attribute is named `policyDecisionID` in VSD API.
"""
return self._policy_decision_id
@policy_decision_id.setter
def policy_decision_id(self, value):
""" Set policy_decision_id value.
Notes:
The policy decision ID for this particular interface
This attribute is named `policyDecisionID` in VSD API.
"""
self._policy_decision_id = value
@property
def domain_id(self):
""" Get domain_id value.
Notes:
ID of the domain that the VM is attached to
This attribute is named `domainID` in VSD API.
"""
return self._domain_id
@domain_id.setter
def domain_id(self, value):
""" Set domain_id value.
Notes:
ID of the domain that the VM is attached to
This attribute is named `domainID` in VSD API.
"""
self._domain_id = value
@property
def domain_name(self):
""" Get domain_name value.
Notes:
Name of the domain that the VM is attached to
This attribute is named `domainName` in VSD API.
"""
return self._domain_name
@domain_name.setter
def domain_name(self, value):
""" Set domain_name value.
Notes:
Name of the domain that the VM is attached to
This attribute is named `domainName` in VSD API.
"""
self._domain_name = value
@property
def zone_id(self):
""" Get zone_id value.
Notes:
ID of the zone that the interface is attached to
This attribute is named `zoneID` in VSD API.
"""
return self._zone_id
@zone_id.setter
def zone_id(self, value):
""" Set zone_id value.
Notes:
ID of the zone that the interface is attached to
This attribute is named `zoneID` in VSD API.
"""
self._zone_id = value
@property
def zone_name(self):
""" Get zone_name value.
Notes:
Name of the zone that the VM is attached to.
This attribute is named `zoneName` in VSD API.
"""
return self._zone_name
@zone_name.setter
def zone_name(self, value):
""" Set zone_name value.
Notes:
Name of the zone that the VM is attached to.
This attribute is named `zoneName` in VSD API.
"""
self._zone_name = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def associated_floating_ip_address(self):
""" Get associated_floating_ip_address value.
Notes:
Floating Ip Address of this network interface eg: 10.1.2.1
This attribute is named `associatedFloatingIPAddress` in VSD API.
"""
return self._associated_floating_ip_address
@associated_floating_ip_address.setter
def associated_floating_ip_address(self, value):
""" Set associated_floating_ip_address value.
Notes:
Floating Ip Address of this network interface eg: 10.1.2.1
This attribute is named `associatedFloatingIPAddress` in VSD API.
"""
self._associated_floating_ip_address = value
@property
def attached_network_id(self):
""" Get attached_network_id value.
Notes:
ID of the l2 domain or Subnet that the VM is attached to
This attribute is named `attachedNetworkID` in VSD API.
"""
return self._attached_network_id
@attached_network_id.setter
def attached_network_id(self, value):
""" Set attached_network_id value.
Notes:
ID of the l2 domain or Subnet that the VM is attached to
This attribute is named `attachedNetworkID` in VSD API.
"""
self._attached_network_id = value
@property
def attached_network_type(self):
""" Get attached_network_type value.
Notes:
l2 domain or Subnet that the interface is attached to
This attribute is named `attachedNetworkType` in VSD API.
"""
return self._attached_network_type
@attached_network_type.setter
def attached_network_type(self, value):
""" Set attached_network_type value.
Notes:
l2 domain or Subnet that the interface is attached to
This attribute is named `attachedNetworkType` in VSD API.
"""
self._attached_network_type = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause |
tombtc/dashman | lib/pycoin/tests/chainfinder_test.py | 22 | 5931 |
from pycoin.blockchain.ChainFinder import ChainFinder
class BHO(object):
def __init__(self, h, previous_block_hash=None, difficulty=10):
self.h = h
if previous_block_hash is None:
previous_block_hash = h-1
self.previous_block_hash = previous_block_hash
self.difficulty = difficulty
def hash(self):
return self.h
def __repr__(self):
return "<BHO: id:%s parent:%s difficulty:%s>" % \
(self.h, self.previous_block_hash, self.difficulty)
def do_scramble(items, tfb, dbt):
import itertools
for c in itertools.permutations(items):
cf = ChainFinder()
load_items(cf, c)
assert cf.trees_from_bottom == tfb
assert cf.descendents_by_top == dbt
cf = ChainFinder()
for b in c:
load_items(cf, [b])
assert cf.trees_from_bottom == tfb
assert cf.descendents_by_top == dbt
def load_items(cf, bhos):
return cf.load_nodes((bh.h, bh.previous_block_hash) for bh in bhos)
def test_basics():
cf = ChainFinder()
assert cf.trees_from_bottom == {}
assert cf.descendents_by_top == {}
ITEMS = [BHO(i) for i in range(6)]
load_items(cf, [ITEMS[0]])
assert cf.trees_from_bottom == {0: [0, -1]}
assert cf.descendents_by_top == {-1: {0}}
load_items(cf, [ITEMS[1]])
assert cf.trees_from_bottom == {1: [1, 0, -1]}
assert cf.descendents_by_top == {-1: {1}}
load_items(cf, ITEMS[0:2])
assert cf.trees_from_bottom == {1: [1, 0, -1]}
assert cf.descendents_by_top == {-1: {1}}
load_items(cf, [ITEMS[4]])
assert cf.trees_from_bottom == {1: [1, 0, -1], 4: [4, 3]}
assert cf.descendents_by_top == {-1: {1}, 3: {4}}
load_items(cf, [ITEMS[3]])
assert cf.trees_from_bottom == {1: [1, 0, -1], 4: [4, 3, 2]}
assert cf.descendents_by_top == {-1: {1}, 2: {4}}
load_items(cf, [ITEMS[5]])
assert cf.trees_from_bottom == {1: [1, 0, -1], 5: [5, 4, 3, 2]}
assert cf.descendents_by_top == {-1: {1}, 2: {5}}
load_items(cf, [ITEMS[2]])
assert cf.trees_from_bottom == {5: [5, 4, 3, 2, 1, 0, -1]}
assert cf.descendents_by_top == {-1: {5}}
do_scramble(ITEMS, cf.trees_from_bottom, cf.descendents_by_top)
def test_branch():
cf = ChainFinder()
assert cf.trees_from_bottom == {}
assert cf.descendents_by_top == {}
ITEMS = [BHO(i) for i in range(7)]
B301 = BHO(301, 3, 10)
B302, B303, B304 = [BHO(i) for i in range(302, 305)]
load_items(cf, [B302])
assert cf.trees_from_bottom == {302: [302, 301]}
assert cf.descendents_by_top == {301: {302}}
load_items(cf, [B304])
assert cf.trees_from_bottom == {302: [302, 301], 304: [304, 303]}
assert cf.descendents_by_top == {301: {302}, 303: {304}}
load_items(cf, [B303])
assert cf.trees_from_bottom == {304: [304, 303, 302, 301]}
assert cf.descendents_by_top == {301: {304}}
load_items(cf, ITEMS)
assert cf.trees_from_bottom == {
6: [6, 5, 4, 3, 2, 1, 0, -1],
304: [304, 303, 302, 301]
}
assert cf.descendents_by_top == {-1: {6}, 301: {304}}
load_items(cf, [B301])
assert cf.trees_from_bottom == {
6: [6, 5, 4, 3, 2, 1, 0, -1],
304: [304, 303, 302, 301, 3, 2, 1, 0, -1]
}
assert cf.descendents_by_top == {-1: {6, 304}}
def test_0123():
I0 = BHO(0)
I1 = BHO(1)
I2 = BHO(2)
I3 = BHO(3, 1)
cf = ChainFinder()
load_items(cf, [I0, I2, I3, I1])
assert cf.trees_from_bottom == {2: [2, 1, 0, -1], 3: [3, 1, 0, -1]}
assert cf.descendents_by_top == {-1: {2, 3}}
def test_all_orphans():
I1 = BHO(1)
I2 = BHO(2)
I3 = BHO(3)
cf = ChainFinder()
load_items(cf, [I2, I3, I1])
assert cf.trees_from_bottom == {3: [3, 2, 1, 0]}
assert cf.descendents_by_top == {0: {3}}
def test_scramble():
ITEMS = [BHO(i, (i-1)//2, 10) for i in range(7)]
tfb = {
3: [3, 1, 0, -1],
4: [4, 1, 0, -1],
5: [5, 2, 0, -1],
6: [6, 2, 0, -1]
}
dbt = {-1: {3, 4, 5, 6}}
do_scramble(ITEMS, tfb, dbt)
def test_branch_switch():
cf = ChainFinder()
assert cf.trees_from_bottom == {}
assert cf.descendents_by_top == {}
ITEMS = [BHO(i) for i in range(4)]
B201 = BHO(201, 2, 10)
B202, B203, B204 = [BHO(i) for i in range(202, 205)]
items = ITEMS + [B201, B202, B203, B204]
tfb = {204: [204, 203, 202, 201, 2, 1, 0, -1], 3: [3, 2, 1, 0, -1]}
dbt = {-1: {3, 204}}
do_scramble(items, tfb, dbt)
def test_longest_chain_endpoint():
cf = ChainFinder()
ITEMS = [BHO(i) for i in range(5)]
B201 = BHO(201, 2, 110)
B202, B203, B204 = [BHO(i) for i in range(202, 205)]
def node_weight_f(h):
if h == -1:
return 0
if h == 201:
return 110
return 10
items = ITEMS + [B201, B202, B203, B204]
load_items(cf, items)
#assert cf.difficulty(0, node_weight_f) == 10
#assert cf.difficulty(1, node_weight_f) == 20
#assert cf.difficulty(2, node_weight_f) == 30
#assert cf.difficulty(3, node_weight_f) == 40
#assert cf.difficulty(4, node_weight_f) == 50
#assert cf.difficulty(201, node_weight_f) == 140
#assert cf.difficulty(202, node_weight_f) == 150
def test_find_ancestral_path():
ITEMS = [BHO(i) for i in range(5)]
B201 = BHO(201, 2, 110)
B202, B203, B204 = [BHO(i) for i in range(202, 205)]
cf = ChainFinder()
items = ITEMS + [B202, B203, B204]
load_items(cf, items)
load_items(cf, [B201])
old_chain_endpoint, new_chain_endpoint = 4, 204
old_subpath, new_subpath = cf.find_ancestral_path(old_chain_endpoint, new_chain_endpoint)
assert old_subpath == [4, 3, 2]
assert new_subpath == [204, 203, 202, 201, 2]
def test_large():
ITEMS = [BHO(i) for i in range(10000)]
cf = ChainFinder()
load_items(cf, ITEMS)
old_subpath, new_subpath = cf.find_ancestral_path(5000, 9000)
| mit |
zvezdan/pip | src/pip/_vendor/urllib3/packages/ordered_dict.py | 2040 | 8935 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
kustodian/ansible | lib/ansible/modules/cloud/azure/azure_rm_iothubconsumergroup.py | 21 | 4854 | #!/usr/bin/python
#
# Copyright (c) 2019 Yuwei Zhou, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_iothubconsumergroup
version_added: "2.9"
short_description: Manage Azure IoT hub
description:
- Create, delete an Azure IoT hub.
options:
resource_group:
description:
- Name of resource group.
type: str
required: true
hub:
description:
- Name of the IoT hub.
type: str
required: true
state:
description:
- State of the IoT hub. Use C(present) to create or update an IoT hub and C(absent) to delete an IoT hub.
type: str
default: present
choices:
- absent
- present
event_hub:
description:
- Event hub endpoint name.
type: str
default: events
name:
description:
- Name of the consumer group.
type: str
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Create an IoT hub consumer group
azure_rm_iothubconsumergroup:
name: test
resource_group: myResourceGroup
hub: Testing
'''
RETURN = '''
id:
description:
- Resource ID of the consumer group.
returned: success
type: str
sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup
/providers/Microsoft.Devices/IotHubs/Testing/events/ConsumerGroups/%24Default"
name:
description:
- Name of the consumer group.
sample: Testing
returned: success
type: str
''' # NOQA
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
import re
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMIoTHubConsumerGroup(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
hub=dict(type='str', required=True),
event_hub=dict(type='str', default='events')
)
self.results = dict(
changed=False,
id=None
)
self.resource_group = None
self.name = None
self.state = None
self.hub = None
self.event_hub = None
super(AzureRMIoTHubConsumerGroup, self).__init__(self.module_arg_spec, supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys():
setattr(self, key, kwargs[key])
changed = False
cg = self.get_cg()
if not cg and self.state == 'present':
changed = True
if not self.check_mode:
cg = self.create_cg()
elif cg and self.state == 'absent':
changed = True
cg = None
if not self.check_mode:
self.delete_cg()
self.results = dict(
id=cg.id,
name=cg.name
) if cg else dict()
self.results['changed'] = changed
return self.results
def get_cg(self):
try:
return self.IoThub_client.iot_hub_resource.get_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name)
except Exception:
pass
return None
def create_cg(self):
try:
return self.IoThub_client.iot_hub_resource.create_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name)
except Exception as exc:
self.fail('Error when creating the consumer group {0} for IoT Hub {1} event hub {2}: {3}'.format(self.name, self.hub, self.event_hub, str(exc)))
def delete_cg(self):
try:
return self.IoThub_client.iot_hub_resource.delete_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name)
except Exception as exc:
self.fail('Error when deleting the consumer group {0} for IoT Hub {1} event hub {2}: {3}'.format(self.name, self.hub, self.event_hub, str(exc)))
def main():
AzureRMIoTHubConsumerGroup()
if __name__ == '__main__':
main()
| gpl-3.0 |
pasqualguerrero/django | django/core/handlers/base.py | 234 | 13346 | from __future__ import unicode_literals
import logging
import sys
import types
import warnings
from django import http
from django.conf import settings
from django.core import signals, urlresolvers
from django.core.exceptions import (
MiddlewareNotUsed, PermissionDenied, SuspiciousOperation,
)
from django.db import connections, transaction
from django.http.multipartparser import MultiPartParserError
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.views import debug
logger = logging.getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.conditional_content_removal,
]
def __init__(self):
self._request_middleware = None
self._view_middleware = None
self._template_response_middleware = None
self._response_middleware = None
self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if (db.settings_dict['ATOMIC_REQUESTS']
and db.alias not in non_atomic_requests):
view = transaction.atomic(using=db.alias)(view)
return view
def get_exception_response(self, request, resolver, status_code, exception):
try:
callback, param_dict = resolver.resolve_error_handler(status_code)
# Unfortunately, inspect.getargspec result is not trustable enough
# depending on the callback wrapping in decorators (frequent for handlers).
# Falling back on try/except:
try:
response = callback(request, **dict(param_dict, exception=exception))
except TypeError:
warnings.warn(
"Error handlers should accept an exception parameter. Update "
"your code as this parameter will be required in Django 2.0",
RemovedInDjango20Warning, stacklevel=2
)
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.get_resolver(urlconf)
# Use a flag to check if the response was rendered to prevent
# multiple renderings or to force rendering if necessary.
response_is_rendered = False
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, 'urlconf'):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.get_resolver(urlconf)
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
response = self.process_exception_by_middleware(e, request)
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object. It returned None instead."
% (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_template_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
try:
response = response.render()
except Exception as e:
response = self.process_exception_by_middleware(e, request)
response_is_rendered = True
except http.Http404 as exc:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
response = debug.technical_404_response(request, exc)
else:
response = self.get_exception_response(request, resolver, 404, exc)
except PermissionDenied as exc:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
response = self.get_exception_response(request, resolver, 403, exc)
except MultiPartParserError as exc:
logger.warning(
'Bad request (Unable to parse request body): %s', request.path,
extra={
'status_code': 400,
'request': request
})
response = self.get_exception_response(request, resolver, 400, exc)
except SuspiciousOperation as exc:
# The request logger receives events for any problematic request
# The security logger receives events for all SuspiciousOperations
security_logger = logging.getLogger('django.security.%s' %
exc.__class__.__name__)
security_logger.error(
force_text(exc),
extra={
'status_code': 400,
'request': request
})
if settings.DEBUG:
return debug.technical_500_response(request, *sys.exc_info(), status_code=400)
response = self.get_exception_response(request, resolver, 400, exc)
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
# Complain if the response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
response._closable_objects.append(request)
# If the exception handler returns a TemplateResponse that has not
# been rendered, force it to be rendered.
if not response_is_rendered and callable(getattr(response, 'render', None)):
response = response.render()
return response
def process_exception_by_middleware(self, exception, request):
"""
Pass the exception to the exception middleware. If no middleware
return a response for this exception, raise it.
"""
for middleware_method in self._exception_middleware:
response = middleware_method(request, exception)
if response:
return response
raise
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve_error_handler(500)
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
| bsd-3-clause |
miptliot/edx-platform | common/djangoapps/xblock_django/api.py | 40 | 2336 | """
API methods related to xblock state.
"""
from xblock_django.models import XBlockConfiguration, XBlockStudioConfiguration
def deprecated_xblocks():
"""
Return the QuerySet of deprecated XBlock types. Note that this method is independent of
`XBlockStudioConfigurationFlag` and `XBlockStudioConfiguration`.
"""
return XBlockConfiguration.objects.current_set().filter(deprecated=True)
def disabled_xblocks():
"""
Return the QuerySet of disabled XBlock types (which should not render in the LMS).
Note that this method is independent of `XBlockStudioConfigurationFlag` and `XBlockStudioConfiguration`.
"""
return XBlockConfiguration.objects.current_set().filter(enabled=False)
def authorable_xblocks(allow_unsupported=False, name=None):
"""
This method returns the QuerySet of XBlocks that can be created in Studio (by default, only fully supported
and provisionally supported XBlocks), as stored in `XBlockStudioConfiguration`.
Note that this method does NOT check the value `XBlockStudioConfigurationFlag`, nor does it take into account
fully disabled xblocks (as returned by `disabled_xblocks`) or deprecated xblocks
(as returned by `deprecated_xblocks`).
Arguments:
allow_unsupported (bool): If `True`, enabled but unsupported XBlocks will also be returned.
Note that unsupported XBlocks are not recommended for use in courses due to non-compliance
with one or more of the base requirements, such as testing, accessibility, internationalization,
and documentation. Default value is `False`.
name (str): If provided, filters the returned XBlocks to those with the provided name. This is
useful for XBlocks with lots of template types.
Returns:
QuerySet: Returns authorable XBlocks, taking into account `support_level`, `enabled` and `name`
(if specified) as specified by `XBlockStudioConfiguration`. Does not take into account whether or not
`XBlockStudioConfigurationFlag` is enabled.
"""
blocks = XBlockStudioConfiguration.objects.current_set().filter(enabled=True)
if not allow_unsupported:
blocks = blocks.exclude(support_level=XBlockStudioConfiguration.UNSUPPORTED)
if name:
blocks = blocks.filter(name=name)
return blocks
| agpl-3.0 |
shubhamdhama/zulip | zproject/default_settings.py | 1 | 16423 | from typing import TYPE_CHECKING, Any, Dict, List, Optional
if TYPE_CHECKING:
from django_auth_ldap.config import LDAPSearch
from typing_extensions import TypedDict
from .config import DEVELOPMENT, PRODUCTION, get_secret
if PRODUCTION:
from .prod_settings import EXTERNAL_HOST, ZULIP_ADMINISTRATOR
else:
from .dev_settings import EXTERNAL_HOST, ZULIP_ADMINISTRATOR
DEBUG = DEVELOPMENT
# These settings are intended for the server admin to set. We document them in
# prod_settings_template.py, and in the initial /etc/zulip/settings.py on a new
# install of the Zulip server.
# Extra HTTP "Host" values to allow (standard ones added in settings.py)
ALLOWED_HOSTS: List[str] = []
# Basic email settings
NOREPLY_EMAIL_ADDRESS = "noreply@" + EXTERNAL_HOST.split(":")[0]
ADD_TOKENS_TO_NOREPLY_ADDRESS = True
TOKENIZED_NOREPLY_EMAIL_ADDRESS = "noreply-{token}@" + EXTERNAL_HOST.split(":")[0]
PHYSICAL_ADDRESS = ''
FAKE_EMAIL_DOMAIN = EXTERNAL_HOST.split(":")[0]
# SMTP settings
EMAIL_HOST: Optional[str] = None
# Other settings, like EMAIL_HOST_USER, EMAIL_PORT, and EMAIL_USE_TLS,
# we leave up to Django's defaults.
# LDAP auth
AUTH_LDAP_SERVER_URI = ""
LDAP_EMAIL_ATTR: Optional[str] = None
AUTH_LDAP_USERNAME_ATTR: Optional[str] = None
AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
# AUTH_LDAP_CONNECTION_OPTIONS: we set ldap.OPT_REFERRALS in settings.py if unset.
AUTH_LDAP_CONNECTION_OPTIONS: Dict[int, object] = {}
# Disable django-auth-ldap caching, to prevent problems with OU changes.
AUTH_LDAP_CACHE_TIMEOUT = 0
# Disable syncing user on each login; Using sync_ldap_user_data cron is recommended.
AUTH_LDAP_ALWAYS_UPDATE_USER = False
# Development-only settings for fake LDAP authentication; used to
# support local development of LDAP auth without an LDAP server.
# Detailed docs in zproject/dev_settings.py.
FAKE_LDAP_MODE: Optional[str] = None
FAKE_LDAP_NUM_USERS = 8
# Social auth; we support providing values for some of these
# settings in zulip-secrets.conf instead of settings.py in development.
SOCIAL_AUTH_GITHUB_KEY = get_secret('social_auth_github_key', development_only=True)
SOCIAL_AUTH_GITHUB_ORG_NAME: Optional[str] = None
SOCIAL_AUTH_GITHUB_TEAM_ID: Optional[str] = None
SOCIAL_AUTH_GITLAB_KEY = get_secret('social_auth_gitlab_key', development_only=True)
SOCIAL_AUTH_SUBDOMAIN: Optional[str] = None
SOCIAL_AUTH_AZUREAD_OAUTH2_SECRET = get_secret('azure_oauth2_secret')
SOCIAL_AUTH_GOOGLE_KEY = get_secret('social_auth_google_key', development_only=True)
# SAML:
SOCIAL_AUTH_SAML_SP_ENTITY_ID: Optional[str] = None
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = ''
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = ''
SOCIAL_AUTH_SAML_ORG_INFO: Optional[Dict[str, Dict[str, str]]] = None
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT: Optional[Dict[str, str]] = None
SOCIAL_AUTH_SAML_SUPPORT_CONTACT: Optional[Dict[str, str]] = None
SOCIAL_AUTH_SAML_ENABLED_IDPS: Dict[str, Dict[str, str]] = {}
SOCIAL_AUTH_SAML_SECURITY_CONFIG: Dict[str, Any] = {}
# Set this to True to enforce that any configured IdP needs to specify
# the limit_to_subdomains setting to be considered valid:
SAML_REQUIRE_LIMIT_TO_SUBDOMAINS = False
# Historical name for SOCIAL_AUTH_GITHUB_KEY; still allowed in production.
GOOGLE_OAUTH2_CLIENT_ID: Optional[str] = None
# Apple:
SOCIAL_AUTH_APPLE_SERVICES_ID = get_secret('social_auth_apple_services_id', development_only=True)
SOCIAL_AUTH_APPLE_BUNDLE_ID = get_secret('social_auth_apple_bundle_id', development_only=True)
SOCIAL_AUTH_APPLE_KEY = get_secret('social_auth_apple_key', development_only=True)
SOCIAL_AUTH_APPLE_TEAM = get_secret('social_auth_apple_team', development_only=True)
SOCIAL_AUTH_APPLE_SCOPE = ['name', 'email']
SOCIAL_AUTH_APPLE_EMAIL_AS_USERNAME = True
# Other auth
SSO_APPEND_DOMAIN: Optional[str] = None
VIDEO_ZOOM_CLIENT_ID = get_secret('video_zoom_client_id', development_only=True)
VIDEO_ZOOM_CLIENT_SECRET = get_secret('video_zoom_client_secret')
# Email gateway
EMAIL_GATEWAY_PATTERN = ''
EMAIL_GATEWAY_LOGIN: Optional[str] = None
EMAIL_GATEWAY_IMAP_SERVER: Optional[str] = None
EMAIL_GATEWAY_IMAP_PORT: Optional[int] = None
EMAIL_GATEWAY_IMAP_FOLDER: Optional[str] = None
# Not documented for in /etc/zulip/settings.py, since it's rarely needed.
EMAIL_GATEWAY_EXTRA_PATTERN_HACK: Optional[str] = None
# Error reporting
ERROR_REPORTING = True
BROWSER_ERROR_REPORTING = False
LOGGING_SHOW_MODULE = False
LOGGING_SHOW_PID = False
# File uploads and avatars
DEFAULT_AVATAR_URI = '/static/images/default-avatar.png'
DEFAULT_LOGO_URI = '/static/images/logo/zulip-org-logo.png'
S3_AVATAR_BUCKET = ''
S3_AUTH_UPLOADS_BUCKET = ''
S3_REGION = ''
LOCAL_UPLOADS_DIR: Optional[str] = None
MAX_FILE_UPLOAD_SIZE = 25
# Jitsi Meet video call integration; set to None to disable integration.
JITSI_SERVER_URL = 'https://meet.jit.si/'
# Max state storage per user
# TODO: Add this to zproject/prod_settings_template.py once stateful bots are fully functional.
USER_STATE_SIZE_LIMIT = 10000000
# Max size of a single configuration entry of an embedded bot.
BOT_CONFIG_SIZE_LIMIT = 10000
# External service configuration
CAMO_URI = ''
MEMCACHED_LOCATION = '127.0.0.1:11211'
MEMCACHED_USERNAME = None if get_secret("memcached_password") is None else "zulip"
RABBITMQ_HOST = '127.0.0.1'
RABBITMQ_USERNAME = 'zulip'
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REMOTE_POSTGRES_HOST = ''
REMOTE_POSTGRES_PORT = ''
REMOTE_POSTGRES_SSLMODE = ''
THUMBOR_URL = ''
THUMBOR_SERVES_CAMO = False
THUMBNAIL_IMAGES = False
SENDFILE_BACKEND: Optional[str] = None
# ToS/Privacy templates
PRIVACY_POLICY: Optional[str] = None
TERMS_OF_SERVICE: Optional[str] = None
# Security
ENABLE_FILE_LINKS = False
ENABLE_GRAVATAR = True
INLINE_IMAGE_PREVIEW = True
INLINE_URL_EMBED_PREVIEW = True
NAME_CHANGES_DISABLED = False
AVATAR_CHANGES_DISABLED = False
PASSWORD_MIN_LENGTH = 6
PASSWORD_MIN_GUESSES = 10000
PUSH_NOTIFICATION_BOUNCER_URL: Optional[str] = None
PUSH_NOTIFICATION_REDACT_CONTENT = False
SUBMIT_USAGE_STATISTICS = True
RATE_LIMITING = True
RATE_LIMITING_AUTHENTICATE = True
SEND_LOGIN_EMAILS = True
EMBEDDED_BOTS_ENABLED = False
# Two Factor Authentication is not yet implementation-complete
TWO_FACTOR_AUTHENTICATION_ENABLED = False
# This is used to send all hotspots for convenient manual testing
# in development mode.
ALWAYS_SEND_ALL_HOTSPOTS = False
# In-development search pills feature.
SEARCH_PILLS_ENABLED = False
# We log emails in development environment for accessing
# them easily through /emails page
DEVELOPMENT_LOG_EMAILS = DEVELOPMENT
# These settings are not documented in prod_settings_template.py.
# They should either be documented here, or documented there.
#
# Settings that it makes sense to document here instead of in
# prod_settings_template.py are those that
# * don't make sense to change in production, but rather are intended
# for dev and test environments; or
# * don't make sense to change on a typical production server with
# one or a handful of realms, though they might on an installation
# like Zulip Cloud or to work around a problem on another server.
NOTIFICATION_BOT = '[email protected]'
EMAIL_GATEWAY_BOT = '[email protected]'
NAGIOS_SEND_BOT = '[email protected]'
NAGIOS_RECEIVE_BOT = '[email protected]'
WELCOME_BOT = '[email protected]'
REMINDER_BOT = '[email protected]'
# The following bots are optional system bots not enabled by
# default. The default ones are defined in INTERNAL_BOTS, in settings.py.
# ERROR_BOT sends Django exceptions to an "errors" stream in the
# system realm.
ERROR_BOT: Optional[str] = None
# These are extra bot users for our end-to-end Nagios message
# sending tests.
NAGIOS_STAGING_SEND_BOT = '[email protected]' if PRODUCTION else None
NAGIOS_STAGING_RECEIVE_BOT = '[email protected]' if PRODUCTION else None
# SYSTEM_BOT_REALM would be a constant always set to 'zulip',
# except that it isn't that on Zulip Cloud. We will likely do a
# migration and eliminate this parameter in the future.
SYSTEM_BOT_REALM = 'zulipinternal'
# Structurally, we will probably eventually merge
# analytics into part of the main server, rather
# than a separate app.
EXTRA_INSTALLED_APPS = ['analytics']
# Default GOOGLE_CLIENT_ID to the value needed for Android auth to work
GOOGLE_CLIENT_ID = '835904834568-77mtr5mtmpgspj9b051del9i9r5t4g4n.apps.googleusercontent.com'
# Legacy event logs configuration. Our plans include removing
# log_event entirely in favor of RealmAuditLog, at which point we
# can remove this setting.
EVENT_LOGS_ENABLED = False
# Used to construct URLs to point to the Zulip server. Since we
# only support HTTPS in production, this is just for development.
EXTERNAL_URI_SCHEME = "https://"
# Whether anyone can create a new organization on the Zulip server.
OPEN_REALM_CREATION = False
# Setting for where the system bot users are. Likely has no
# purpose now that the REALMS_HAVE_SUBDOMAINS migration is finished.
SYSTEM_ONLY_REALMS = {"zulip"}
# Alternate hostnames to serve particular realms on, in addition to
# their usual subdomains. Keys are realm string_ids (aka subdomains),
# and values are alternate hosts.
# The values will also be added to ALLOWED_HOSTS.
REALM_HOSTS: Dict[str, str] = {}
# Whether the server is using the Pgroonga full-text search
# backend. Plan is to turn this on for everyone after further
# testing.
USING_PGROONGA = False
# How Django should send emails. Set for most contexts in settings.py, but
# available for sysadmin override in unusual cases.
EMAIL_BACKEND: Optional[str] = None
# Whether to give admins a warning in the web app that email isn't set up.
# Set in settings.py when email isn't configured.
WARN_NO_EMAIL = False
# Whether to keep extra frontend stack trace data.
# TODO: Investigate whether this should be removed and set one way or other.
SAVE_FRONTEND_STACKTRACES = False
# If True, disable rate-limiting and other filters on sending error messages
# to admins, and enable logging on the error-reporting itself. Useful
# mainly in development.
DEBUG_ERROR_REPORTING = False
# Whether to flush memcached after data migrations. Because of
# how we do deployments in a way that avoids reusing memcached,
# this is disabled in production, but we need it in development.
POST_MIGRATION_CACHE_FLUSHING = False
# Settings for APNS. Only needed on push.zulipchat.com or if
# rebuilding the mobile app with a different push notifications
# server.
APNS_CERT_FILE: Optional[str] = None
APNS_SANDBOX = True
APNS_TOPIC = 'org.zulip.Zulip'
ZULIP_IOS_APP_ID = 'org.zulip.Zulip'
# Max number of "remove notification" FCM/GCM messages to send separately
# in one burst; the rest are batched. Older clients ignore the batched
# portion, so only receive this many removals. Lower values mitigate
# server congestion and client battery use. To batch unconditionally,
# set to 1.
MAX_UNBATCHED_REMOVE_NOTIFICATIONS = 10
# Limits related to the size of file uploads; last few in MB.
DATA_UPLOAD_MAX_MEMORY_SIZE = 25 * 1024 * 1024
MAX_AVATAR_FILE_SIZE = 5
MAX_ICON_FILE_SIZE = 5
MAX_LOGO_FILE_SIZE = 5
MAX_EMOJI_FILE_SIZE = 5
# Limits to help prevent spam, in particular by sending invitations.
#
# A non-admin user who's joined an open realm this recently can't invite at all.
INVITES_MIN_USER_AGE_DAYS = 3
# Default for a realm's `max_invites`; which applies per day,
# and only applies if OPEN_REALM_CREATION is true.
INVITES_DEFAULT_REALM_DAILY_MAX = 100
# Global rate-limit (list of pairs (days, max)) on invites from new realms.
# Only applies if OPEN_REALM_CREATION is true.
INVITES_NEW_REALM_LIMIT_DAYS = [(1, 100)]
# Definition of a new realm for INVITES_NEW_REALM_LIMIT.
INVITES_NEW_REALM_DAYS = 7
# Controls for which links are published in portico footers/headers/etc.
REGISTER_LINK_DISABLED: Optional[bool] = None
LOGIN_LINK_DISABLED = False
FIND_TEAM_LINK_DISABLED = True
# Controls if the server should run certain jobs like deliver_email or
# deliver_scheduled_messages. This setting in long term is meant for
# handling jobs for which we don't have a means of establishing a locking
# mechanism that works with multiple servers running these jobs.
# TODO: We should rename this setting so that it reflects its purpose actively.
EMAIL_DELIVERER_DISABLED = False
# What domains to treat like the root domain
# "auth" is by default a reserved subdomain for the use by python-social-auth.
ROOT_SUBDOMAIN_ALIASES = ["www", "auth"]
# Whether the root domain is a landing page or can host a realm.
ROOT_DOMAIN_LANDING_PAGE = False
# If using the Zephyr mirroring supervisord configuration, the
# hostname to connect to in order to transfer credentials from webathena.
PERSONAL_ZMIRROR_SERVER: Optional[str] = None
# When security-relevant links in emails expire.
CONFIRMATION_LINK_DEFAULT_VALIDITY_DAYS = 1
INVITATION_LINK_VALIDITY_DAYS = 10
REALM_CREATION_LINK_VALIDITY_DAYS = 7
# Version number for ToS. Change this if you want to force every
# user to click through to re-accept terms of service before using
# Zulip again on the web.
TOS_VERSION: Optional[str] = None
# Template to use when bumping TOS_VERSION to explain situation.
FIRST_TIME_TOS_TEMPLATE: Optional[str] = None
# Hostname used for Zulip's statsd logging integration.
STATSD_HOST = ''
# Configuration for JWT auth.
if TYPE_CHECKING:
class JwtAuthKey(TypedDict):
key: str
# See https://pyjwt.readthedocs.io/en/latest/algorithms.html for a list
# of supported algorithms.
algorithms: List[str]
JWT_AUTH_KEYS: Dict[str, "JwtAuthKey"] = {}
# https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-SERVER_EMAIL
# Django setting for what from address to use in error emails.
SERVER_EMAIL = ZULIP_ADMINISTRATOR
# Django setting for who receives error emails.
ADMINS = (("Zulip Administrator", ZULIP_ADMINISTRATOR),)
# From address for welcome emails.
WELCOME_EMAIL_SENDER: Optional[Dict[str, str]] = None
# Whether we should use users' own email addresses as the from
# address when sending missed-message emails. Off by default
# because some transactional email providers reject sending such
# emails since they can look like spam.
SEND_MISSED_MESSAGE_EMAILS_AS_USER = False
# Whether to send periodic digests of activity.
SEND_DIGEST_EMAILS = True
# Used to change the Zulip logo in portico pages.
CUSTOM_LOGO_URL: Optional[str] = None
# Random salt used when deterministically generating passwords in
# development.
INITIAL_PASSWORD_SALT: Optional[str] = None
# Settings configuring the special instrumention of the send_event
# code path used in generating API documentation for /events.
LOG_API_EVENT_TYPES = False
# Used to control whether certain management commands are run on
# the server.
# TODO: Replace this with a smarter "run on only one server" system.
STAGING = False
# Configuration option for our email/Zulip error reporting.
STAGING_ERROR_NOTIFICATIONS = False
# How long to wait before presence should treat a user as offline.
# TODO: Figure out why this is different from the corresponding
# value in static/js/presence.js. Also, probably move it out of
# default_settings, since it likely isn't usefully user-configurable.
OFFLINE_THRESHOLD_SECS = 5 * 60
# How many days deleted messages data should be kept before being
# permanently deleted.
ARCHIVED_DATA_VACUUMING_DELAY_DAYS = 7
# Enables billing pages and plan-based feature gates. If False, all features
# are available to all realms.
BILLING_ENABLED = False
FREE_TRIAL_DAYS = get_secret('free_trial_days', None)
# Custom message (supports HTML) to be shown in the navbar of landing pages. Used mainly for
# making announcements.
LANDING_PAGE_NAVBAR_MESSAGE: Optional[str] = None
# Automatically catch-up soft deactivated users when running the
# `soft-deactivate-users` cron. Turn this off if the server has 10Ks of
# users, and you would like to save some disk space. Soft-deactivated
# returning users would still be caught-up normally.
AUTO_CATCH_UP_SOFT_DEACTIVATED_USERS = True
# Enables Google Analytics on selected portico pages.
GOOGLE_ANALYTICS_ID: Optional[str] = None
# This is overridden by dev_settings.py for droplets.
IS_DEV_DROPLET = False
# Used by puppet/zulip_ops/files/cron.d/check_send_receive_time.
NAGIOS_BOT_HOST = EXTERNAL_HOST
# Automatically deactivate users not found by the AUTH_LDAP_USER_SEARCH query.
LDAP_DEACTIVATE_NON_MATCHING_USERS: Optional[bool] = None
| apache-2.0 |
cctaylor/googleads-python-lib | examples/adwords/v201409/advanced_operations/add_ad_customizer.py | 1 | 8099 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that
uses the feed to populate dynamic data.
Tags: CustomerFeedService.mutate, FeedItemService.mutate
Tags: FeedMappingService.mutate, FeedService.mutate
Tags: AdGroupAdService.mutate
"""
__author__ = ('[email protected] (Mark Saniscalchi)',
'[email protected] (Yufeng Guo)')
# Import appropriate classes from the client library.
from googleads import adwords
# See the Placeholder reference page for a list of all the placeholder types
# and fields:
# https://developers.google.com/adwords/api/docs/appendix/placeholders
PLACEHOLDER_AD_CUSTOMIZER = '10'
PLACEHOLDER_FIELD_INTEGER = '1'
PLACEHOLDER_FIELD_FLOAT = '2'
PLACEHOLDER_FIELD_PRICE = '3'
PLACEHOLDER_FIELD_DATE = '4'
PLACEHOLDER_FIELD_STRING = '5'
ADGROUPS = [
'INSERT_ADGROUP_ID_1_HERE',
'INSERT_ADGROUP_ID_2_HERE'
]
FEEDNAME = 'INSERT_FEED_NAME_HERE'
def main(client, adgroups):
# Initialize appropriate services.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201409')
customer_feed_service = client.GetService(
'CustomerFeedService', version='v201409')
feed_item_service = client.GetService('FeedItemService', version='v201409')
feed_mapping_service = client.GetService(
'FeedMappingService', version='v201409')
feed_service = client.GetService('FeedService', version='v201409')
# First, create a customizer feed. One feed per account can be used for all
# ads.
customizer_feed = {
'name': FEEDNAME,
'attributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['id'],
'nameId': feed['attributes'][0]['id'],
'priceId': feed['attributes'][1]['id'],
'dateId': feed['attributes'][2]['id']
}
print ('Feed with name \'%s\' and ID %s was added with:'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['name'], feed['id'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
else:
raise Exception('No feeds were added')
# Creating feed mapping to map the fields with customizer IDs.
feed_mapping = {
'placeholderType': PLACEHOLDER_AD_CUSTOMIZER,
'feedId': feed_data['feedId'],
'attributeFieldMappings': [
{
'feedAttributeId': feed_data['nameId'],
'fieldId': PLACEHOLDER_FIELD_STRING
},
{
'feedAttributeId': feed_data['priceId'],
'fieldId': PLACEHOLDER_FIELD_PRICE
},
{
'feedAttributeId': feed_data['dateId'],
'fieldId': PLACEHOLDER_FIELD_DATE
}
]
}
feed_mapping_operation = {
'operator': 'ADD',
'operand': feed_mapping
}
response = feed_mapping_service.mutate([feed_mapping_operation])
if response and 'value' in response:
feed_mapping = response['value'][0]
print ('Feed mapping with ID %s and placeholder type %s was saved for feed'
' with ID %s.') % (feed_mapping['feedMappingId'],
feed_mapping['placeholderType'],
feed_mapping['feedId'])
else:
raise Exception('No feed mappings were added.')
# Now adding feed items -- the values we'd like to place.
items_data = [
{
'name': 'Mars',
'price': '$1234.56',
'date': '20140601 000000',
'adGroupId': adgroups[0]
},
{
'name': 'Venus',
'price': '$1450.00',
'date': '20140615 120000',
'adGroupId': adgroups[1]
}
]
feed_items = [{'feedId': feed_data['feedId'],
'adGroupTargeting': {
'TargetingAdGroupId': item['adGroupId']
},
'attributeValues': [
{
'feedAttributeId': feed_data['nameId'],
'stringValue': item['name']
},
{
'feedAttributeId': feed_data['priceId'],
'stringValue': item['price']
},
{
'feedAttributeId': feed_data['dateId'],
'stringValue': item['date']
}
]} for item in items_data]
feed_item_operations = [{
'operator': 'ADD',
'operand': feed_item
} for feed_item in feed_items]
response = feed_item_service.mutate(feed_item_operations)
if response and 'value' in response:
for feed_item in response['value']:
print 'Feed item with ID %s was added.' % feed_item['feedItemId']
else:
raise Exception('No feed items were added.')
# Finally, creating a customer (account-level) feed with a matching function
# that determines when to use this feed. For this case we use the "IDENTITY"
# matching function that is always 'true' just to associate this feed with
# the customer. The targeting is done within the feed items using the
# :campaign_targeting, :ad_group_targeting, or :keyword_targeting attributes.
matching_function = {
'operator': 'IDENTITY',
'lhsOperand': [
{
'xsi_type': 'ConstantOperand',
'type': 'BOOLEAN',
'booleanValue': 'true'
}
]
}
customer_feed = {
'feedId': feed_data['feedId'],
'matchingFunction': matching_function,
'placeholderTypes': [PLACEHOLDER_AD_CUSTOMIZER]
}
customer_feed_operation = {
'operator': 'ADD',
'operand': customer_feed
}
response = customer_feed_service.mutate([customer_feed_operation])
if response and 'value' in response:
feed = response['value'][0]
print 'Customer feed with ID %s was added.' % feed['feedId']
else:
raise Exception('No customer feeds were added.')
# All set! We can now create ads with customizations.
text_ad = {
'xsi_type': 'TextAd',
'headline': 'Luxury Cruise to {=%s.Name}' % FEEDNAME,
'description1': 'Only {=%s.Price}' % FEEDNAME,
'description2': 'Offer ends in {=countdown(%s.Date)}!' % FEEDNAME,
'finalUrls': ['http://www.example.com'],
'displayUrl': 'www.example.com'
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': text_ad
}
} for adgroup in adgroups]
print operations
response = ad_group_ad_service.mutate(operations)
print '===ad group ad service==='
print response
if response and 'value' in response:
for ad in response['value']:
print ('\tCreated an ad with ID \'%s\', type \'%s\', and status \'%s\'.'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise Exception('No ads were added.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
| apache-2.0 |
valkjsaaa/sl4a | python/src/Lib/test/string_tests.py | 55 | 59642 | """
Common tests shared by test_str, test_unicode, test_userstring and test_string.
"""
import unittest, string, sys, struct
from test import test_support
from UserList import UserList
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123L]
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class CommonTest(unittest.TestCase):
# This testcase contains test that can be used in all
# stringlike classes. Currently this is str, unicode
# UserString and the string module.
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.iteritems()
])
else:
return obj
# check that object.method(*args) returns result
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
args = self.fixtype(args)
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if object == realresult:
class subtype(self.__class__.type2test):
pass
object = subtype(object)
realresult = getattr(object, methodname)(*args)
self.assert_(object is not realresult)
# check that object.method(*args) raises exc
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
# call object.method(*args) without any checks
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
getattr(object, methodname)(*args)
def test_hash(self):
# SF bug 1054139: += optimization was not invalidating cached hash value
a = self.type2test('DNSSEC')
b = self.type2test('')
for c in a:
b += c
hash(b)
self.assertEqual(hash(a), hash(b))
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(2, 'aaa', 'count', 'a', 1)
self.checkequal(0, 'aaa', 'count', 'a', 10)
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(1, 'aaa', 'count', 'a', 0, 1)
self.checkequal(3, 'aaa', 'count', 'a', 0, 10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkequal(3, 'aaa', 'count', '', 1)
self.checkequal(1, 'aaa', 'count', '', 3)
self.checkequal(0, 'aaa', 'count', '', 10)
self.checkequal(2, 'aaa', 'count', '', -1)
self.checkequal(4, 'aaa', 'count', '', -10)
self.checkequal(1, '', 'count', '')
self.checkequal(0, '', 'count', '', 1, 1)
self.checkequal(0, '', 'count', '', sys.maxint, 0)
self.checkequal(0, '', 'count', 'xx')
self.checkequal(0, '', 'count', 'xx', 1, 1)
self.checkequal(0, '', 'count', 'xx', sys.maxint, 0)
self.checkraises(TypeError, 'hello', 'count')
self.checkraises(TypeError, 'hello', 'count', 42)
# For a variety of combinations,
# verify that str.count() matches an equivalent function
# replacing all occurrences and then differencing the string lengths
charset = ['', 'a', 'b']
digits = 7
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
n = len(i)
for j in teststrings:
r1 = i.count(j)
if j:
r2, rem = divmod(n - len(i.replace(j, '')), len(j))
else:
r2, rem = len(i)+1, 0
if rem or r1 != r2:
self.assertEqual(rem, 0, '%s != 0 for %s' % (rem, i))
self.assertEqual(r1, r2, '%s != %s for %s' % (r1, r2, i))
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkequal(0, 'abc', 'find', '', 0)
self.checkequal(3, 'abc', 'find', '', 3)
self.checkequal(-1, 'abc', 'find', '', 4)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'find', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'find')
self.checkraises(TypeError, 'hello', 'find', 42)
self.checkequal(0, '', 'find', '')
self.checkequal(-1, '', 'find', '', 1, 1)
self.checkequal(-1, '', 'find', '', sys.maxint, 0)
self.checkequal(-1, '', 'find', 'xx')
self.checkequal(-1, '', 'find', 'xx', 1, 1)
self.checkequal(-1, '', 'find', 'xx', sys.maxint, 0)
# For a variety of combinations,
# verify that str.find() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
for j in teststrings:
loc = i.find(j)
r1 = (loc != -1)
r2 = j in i
if r1 != r2:
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkequal(3, 'abc', 'rfind', '', 0)
self.checkequal(3, 'abc', 'rfind', '', 3)
self.checkequal(-1, 'abc', 'rfind', '', 4)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'rfind', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rfind', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rfind')
self.checkraises(TypeError, 'hello', 'rfind', 42)
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'index', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'index')
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'rindex', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rindex', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rindex')
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi', 'expandtabs', 4)
self.checkequal(' a\n b', ' \ta\n\tb', 'expandtabs', 1)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxint < (1 << 32) and struct.calcsize('P') == 4:
self.checkraises(OverflowError,
'\ta\n\tb', 'expandtabs', sys.maxint)
def test_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'split')
self.checkequal(['a', 'b c d'], 'a b c d', 'split', None, 1)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None,
sys.maxint-1)
self.checkequal(['a b c d'], 'a b c d', 'split', None, 0)
self.checkequal(['a b c d'], ' a b c d', 'split', None, 0)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal([], ' ', 'split')
self.checkequal(['a'], ' a ', 'split')
self.checkequal(['a', 'b'], ' a b ', 'split')
self.checkequal(['a', 'b '], ' a b ', 'split', None, 1)
self.checkequal(['a', 'b c '], ' a b c ', 'split', None, 1)
self.checkequal(['a', 'b', 'c '], ' a b c ', 'split', None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'split')
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'split')
self.checkequal(['a'] + [aaa[4:]], aaa, 'split', None, 1)
self.checkequal(['a']*19 + ['a '], aaa, 'split', None, 19)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|')
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', 'b|c|d'], 'a|b|c|d', 'split', '|', 1)
self.checkequal(['a', 'b', 'c|d'], 'a|b|c|d', 'split', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|',
sys.maxint-2)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', '', 'b||c||d'], 'a||b||c||d', 'split', '|', 2)
self.checkequal(['endcase ', ''], 'endcase |', 'split', '|')
self.checkequal(['', ' startcase'], '| startcase', 'split', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'split', '|')
self.checkequal(['a', '', 'b\x00c\x00d'], 'a\x00\x00b\x00c\x00d', 'split', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'split', '|')
self.checkequal(['a']*15 +['a|a|a|a|a'],
('a|'*20)[:-1], 'split', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequal(['a', 'b//c//d'], 'a//b//c//d', 'split', '//', 1)
self.checkequal(['a', 'b', 'c//d'], 'a//b//c//d', 'split', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//',
sys.maxint-10)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'split', '//', 0)
self.checkequal(['a', '', 'b////c////d'], 'a////b////c////d', 'split', '//', 2)
self.checkequal(['endcase ', ''], 'endcase test', 'split', 'test')
self.checkequal(['', ' begincase'], 'test begincase', 'split', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'split', 'test')
self.checkequal(['a', 'bc'], 'abbbc', 'split', 'bb')
self.checkequal(['', ''], 'aaa', 'split', 'aaa')
self.checkequal(['aaa'], 'aaa', 'split', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'split', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'split', 'aab')
self.checkequal([''], '', 'split', 'aaa')
self.checkequal(['aa'], 'aa', 'split', 'aaa')
self.checkequal(['A', 'bobb'], 'Abbobbbobb', 'split', 'bbobb')
self.checkequal(['A', 'B', ''], 'AbbobbBbbobb', 'split', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH', 19)
self.checkequal(['a']*18 + ['aBLAHa'], ('aBLAH'*20)[:-4],
'split', 'BLAH', 18)
# mixed use of str and unicode
self.checkequal([u'a', u'b', u'c d'], 'a b c d', 'split', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'split', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'split', '')
self.checkraises(ValueError, 'hello', 'split', '', 0)
def test_rsplit(self):
self.checkequal(['this', 'is', 'the', 'rsplit', 'function'],
'this is the rsplit function', 'rsplit')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'rsplit')
self.checkequal(['a b c', 'd'], 'a b c d', 'rsplit', None, 1)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None,
sys.maxint-20)
self.checkequal(['a b c d'], 'a b c d', 'rsplit', None, 0)
self.checkequal(['a b c d'], 'a b c d ', 'rsplit', None, 0)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal([], ' ', 'rsplit')
self.checkequal(['a'], ' a ', 'rsplit')
self.checkequal(['a', 'b'], ' a b ', 'rsplit')
self.checkequal([' a', 'b'], ' a b ', 'rsplit', None, 1)
self.checkequal([' a b','c'], ' a b c ', 'rsplit',
None, 1)
self.checkequal([' a', 'b', 'c'], ' a b c ', 'rsplit',
None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'rsplit', None, 88)
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'rsplit')
self.checkequal([aaa[:-4]] + ['a'], aaa, 'rsplit', None, 1)
self.checkequal([' a a'] + ['a']*18, aaa, 'rsplit', None, 18)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|')
self.checkequal(['a|b|c', 'd'], 'a|b|c|d', 'rsplit', '|', 1)
self.checkequal(['a|b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|',
sys.maxint-100)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'rsplit', '|', 0)
self.checkequal(['a||b||c', '', 'd'], 'a||b||c||d', 'rsplit', '|', 2)
self.checkequal(['', ' begincase'], '| begincase', 'rsplit', '|')
self.checkequal(['endcase ', ''], 'endcase |', 'rsplit', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'rsplit', '|')
self.checkequal(['a\x00\x00b', 'c', 'd'], 'a\x00\x00b\x00c\x00d', 'rsplit', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'rsplit', '|')
self.checkequal(['a|a|a|a|a']+['a']*15,
('a|'*20)[:-1], 'rsplit', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//')
self.checkequal(['a//b//c', 'd'], 'a//b//c//d', 'rsplit', '//', 1)
self.checkequal(['a//b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//',
sys.maxint-5)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'rsplit', '//', 0)
self.checkequal(['a////b////c', '', 'd'], 'a////b////c////d', 'rsplit', '//', 2)
self.checkequal(['', ' begincase'], 'test begincase', 'rsplit', 'test')
self.checkequal(['endcase ', ''], 'endcase test', 'rsplit', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'rsplit', 'test')
self.checkequal(['ab', 'c'], 'abbbc', 'rsplit', 'bb')
self.checkequal(['', ''], 'aaa', 'rsplit', 'aaa')
self.checkequal(['aaa'], 'aaa', 'rsplit', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'rsplit', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'rsplit', 'aab')
self.checkequal([''], '', 'rsplit', 'aaa')
self.checkequal(['aa'], 'aa', 'rsplit', 'aaa')
self.checkequal(['bbob', 'A'], 'bbobbbobbA', 'rsplit', 'bbobb')
self.checkequal(['', 'B', 'A'], 'bbobbBbbobbA', 'rsplit', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH', 19)
self.checkequal(['aBLAHa'] + ['a']*18, ('aBLAH'*20)[:-4],
'rsplit', 'BLAH', 18)
# mixed use of str and unicode
self.checkequal([u'a b', u'c', u'd'], 'a b c d', 'rsplit', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'rsplit', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'rsplit', '')
self.checkraises(ValueError, 'hello', 'rsplit', '', 0)
def test_strip(self):
self.checkequal('hello', ' hello ', 'strip')
self.checkequal('hello ', ' hello ', 'lstrip')
self.checkequal(' hello', ' hello ', 'rstrip')
self.checkequal('hello', 'hello', 'strip')
# strip/lstrip/rstrip with None arg
self.checkequal('hello', ' hello ', 'strip', None)
self.checkequal('hello ', ' hello ', 'lstrip', None)
self.checkequal(' hello', ' hello ', 'rstrip', None)
self.checkequal('hello', 'hello', 'strip', None)
# strip/lstrip/rstrip with str arg
self.checkequal('hello', 'xyzzyhelloxyzzy', 'strip', 'xyz')
self.checkequal('helloxyzzy', 'xyzzyhelloxyzzy', 'lstrip', 'xyz')
self.checkequal('xyzzyhello', 'xyzzyhelloxyzzy', 'rstrip', 'xyz')
self.checkequal('hello', 'hello', 'strip', 'xyz')
# strip/lstrip/rstrip with unicode arg
if test_support.have_unicode:
self.checkequal(unicode('hello', 'ascii'), 'xyzzyhelloxyzzy',
'strip', unicode('xyz', 'ascii'))
self.checkequal(unicode('helloxyzzy', 'ascii'), 'xyzzyhelloxyzzy',
'lstrip', unicode('xyz', 'ascii'))
self.checkequal(unicode('xyzzyhello', 'ascii'), 'xyzzyhelloxyzzy',
'rstrip', unicode('xyz', 'ascii'))
# XXX
#self.checkequal(unicode('hello', 'ascii'), 'hello',
# 'strip', unicode('xyz', 'ascii'))
self.checkraises(TypeError, 'hello', 'strip', 42, 42)
self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
self.checkraises(TypeError, 'hello', 'rstrip', 42, 42)
def test_ljust(self):
self.checkequal('abc ', 'abc', 'ljust', 10)
self.checkequal('abc ', 'abc', 'ljust', 6)
self.checkequal('abc', 'abc', 'ljust', 3)
self.checkequal('abc', 'abc', 'ljust', 2)
self.checkequal('abc*******', 'abc', 'ljust', 10, '*')
self.checkraises(TypeError, 'abc', 'ljust')
def test_rjust(self):
self.checkequal(' abc', 'abc', 'rjust', 10)
self.checkequal(' abc', 'abc', 'rjust', 6)
self.checkequal('abc', 'abc', 'rjust', 3)
self.checkequal('abc', 'abc', 'rjust', 2)
self.checkequal('*******abc', 'abc', 'rjust', 10, '*')
self.checkraises(TypeError, 'abc', 'rjust')
def test_center(self):
self.checkequal(' abc ', 'abc', 'center', 10)
self.checkequal(' abc ', 'abc', 'center', 6)
self.checkequal('abc', 'abc', 'center', 3)
self.checkequal('abc', 'abc', 'center', 2)
self.checkequal('***abc****', 'abc', 'center', 10, '*')
self.checkraises(TypeError, 'abc', 'center')
def test_swapcase(self):
self.checkequal('hEllO CoMPuTErS', 'HeLLo cOmpUteRs', 'swapcase')
self.checkraises(TypeError, 'hello', 'swapcase', 42)
def test_replace(self):
EQ = self.checkequal
# Operations on the empty string
EQ("", "", "replace", "", "")
EQ("A", "", "replace", "", "A")
EQ("", "", "replace", "A", "")
EQ("", "", "replace", "A", "A")
EQ("", "", "replace", "", "", 100)
EQ("", "", "replace", "", "", sys.maxint)
# interleave (from=="", 'to' gets inserted everywhere)
EQ("A", "A", "replace", "", "")
EQ("*A*", "A", "replace", "", "*")
EQ("*1A*1", "A", "replace", "", "*1")
EQ("*-#A*-#", "A", "replace", "", "*-#")
EQ("*-A*-A*-", "AA", "replace", "", "*-")
EQ("*-A*-A*-", "AA", "replace", "", "*-", -1)
EQ("*-A*-A*-", "AA", "replace", "", "*-", sys.maxint)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 4)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 3)
EQ("*-A*-A", "AA", "replace", "", "*-", 2)
EQ("*-AA", "AA", "replace", "", "*-", 1)
EQ("AA", "AA", "replace", "", "*-", 0)
# single character deletion (from=="A", to=="")
EQ("", "A", "replace", "A", "")
EQ("", "AAA", "replace", "A", "")
EQ("", "AAA", "replace", "A", "", -1)
EQ("", "AAA", "replace", "A", "", sys.maxint)
EQ("", "AAA", "replace", "A", "", 4)
EQ("", "AAA", "replace", "A", "", 3)
EQ("A", "AAA", "replace", "A", "", 2)
EQ("AA", "AAA", "replace", "A", "", 1)
EQ("AAA", "AAA", "replace", "A", "", 0)
EQ("", "AAAAAAAAAA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "", -1)
EQ("BCD", "ABACADA", "replace", "A", "", sys.maxint)
EQ("BCD", "ABACADA", "replace", "A", "", 5)
EQ("BCD", "ABACADA", "replace", "A", "", 4)
EQ("BCDA", "ABACADA", "replace", "A", "", 3)
EQ("BCADA", "ABACADA", "replace", "A", "", 2)
EQ("BACADA", "ABACADA", "replace", "A", "", 1)
EQ("ABACADA", "ABACADA", "replace", "A", "", 0)
EQ("BCD", "ABCAD", "replace", "A", "")
EQ("BCD", "ABCADAA", "replace", "A", "")
EQ("BCD", "BCD", "replace", "A", "")
EQ("*************", "*************", "replace", "A", "")
EQ("^A^", "^"+"A"*1000+"^", "replace", "A", "", 999)
# substring deletion (from=="the", to=="")
EQ("", "the", "replace", "the", "")
EQ("ater", "theater", "replace", "the", "")
EQ("", "thethe", "replace", "the", "")
EQ("", "thethethethe", "replace", "the", "")
EQ("aaaa", "theatheatheathea", "replace", "the", "")
EQ("that", "that", "replace", "the", "")
EQ("thaet", "thaet", "replace", "the", "")
EQ("here and re", "here and there", "replace", "the", "")
EQ("here and re and re", "here and there and there",
"replace", "the", "", sys.maxint)
EQ("here and re and re", "here and there and there",
"replace", "the", "", -1)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 3)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 2)
EQ("here and re and there", "here and there and there",
"replace", "the", "", 1)
EQ("here and there and there", "here and there and there",
"replace", "the", "", 0)
EQ("here and re and re", "here and there and there", "replace", "the", "")
EQ("abc", "abc", "replace", "the", "")
EQ("abcdefg", "abcdefg", "replace", "the", "")
# substring deletion (from=="bob", to=="")
EQ("bob", "bbobob", "replace", "bob", "")
EQ("bobXbob", "bbobobXbbobob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaabob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaa", "replace", "bob", "")
# single character replace in place (len(from)==len(to)==1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "o")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", sys.maxint)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", -1)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 3)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 2)
EQ("WhO goes there?", "Who goes there?", "replace", "o", "O", 1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "O", 0)
EQ("Who goes there?", "Who goes there?", "replace", "a", "q")
EQ("who goes there?", "Who goes there?", "replace", "W", "w")
EQ("wwho goes there?ww", "WWho goes there?WW", "replace", "W", "w")
EQ("Who goes there!", "Who goes there?", "replace", "?", "!")
EQ("Who goes there!!", "Who goes there??", "replace", "?", "!")
EQ("Who goes there?", "Who goes there?", "replace", ".", "!")
# substring replace in place (len(from)==len(to) > 1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**")
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", sys.maxint)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", -1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 4)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 3)
EQ("Th** ** a tissue", "This is a tissue", "replace", "is", "**", 2)
EQ("Th** is a tissue", "This is a tissue", "replace", "is", "**", 1)
EQ("This is a tissue", "This is a tissue", "replace", "is", "**", 0)
EQ("cobob", "bobob", "replace", "bob", "cob")
EQ("cobobXcobocob", "bobobXbobobob", "replace", "bob", "cob")
EQ("bobob", "bobob", "replace", "bot", "bot")
# replace single character (len(from)==1, len(to)>1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK")
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", -1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", sys.maxint)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", 2)
EQ("ReyKKjavik", "Reykjavik", "replace", "k", "KK", 1)
EQ("Reykjavik", "Reykjavik", "replace", "k", "KK", 0)
EQ("A----B----C----", "A.B.C.", "replace", ".", "----")
EQ("Reykjavik", "Reykjavik", "replace", "q", "KK")
# replace substring (len(from)>1, len(to)!=len(from))
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham")
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", sys.maxint)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", -1)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 4)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 3)
EQ("ham, ham, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 2)
EQ("ham, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 1)
EQ("spam, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 0)
EQ("bobob", "bobobob", "replace", "bobob", "bob")
EQ("bobobXbobob", "bobobobXbobobob", "replace", "bobob", "bob")
EQ("BOBOBOB", "BOBOBOB", "replace", "bob", "bobby")
ba = buffer('a')
bb = buffer('b')
EQ("bbc", "abc", "replace", ba, bb)
EQ("aac", "abc", "replace", bb, ba)
#
self.checkequal('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.checkequal('onetwothree', 'one!two!three!', 'replace', '!', '')
self.checkequal('one@two@three!', 'one!two!three!', 'replace', '!', '@', 2)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 3)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 4)
self.checkequal('one!two!three!', 'one!two!three!', 'replace', '!', '@', 0)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@', 2)
self.checkequal('-a-b-c-', 'abc', 'replace', '', '-')
self.checkequal('-a-b-c', 'abc', 'replace', '', '-', 3)
self.checkequal('abc', 'abc', 'replace', '', '-', 0)
self.checkequal('', '', 'replace', '', '')
self.checkequal('abc', 'abc', 'replace', 'ab', '--', 0)
self.checkequal('abc', 'abc', 'replace', 'xy', '--')
# Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
# MemoryError due to empty result (platform malloc issue when requesting
# 0 bytes).
self.checkequal('', '123', 'replace', '123', '')
self.checkequal('', '123123', 'replace', '123', '')
self.checkequal('x', '123x123', 'replace', '123', '')
self.checkraises(TypeError, 'hello', 'replace')
self.checkraises(TypeError, 'hello', 'replace', 42)
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
def test_replace_overflow(self):
# Check for overflow checking on 32 bit machines
if sys.maxint != 2147483647 or struct.calcsize("P") > 4:
return
A2_16 = "A" * (2**16)
self.checkraises(OverflowError, A2_16, "replace", "", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "A", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "AA", A2_16+A2_16)
def test_zfill(self):
self.checkequal('123', '123', 'zfill', 2)
self.checkequal('123', '123', 'zfill', 3)
self.checkequal('0123', '123', 'zfill', 4)
self.checkequal('+123', '+123', 'zfill', 3)
self.checkequal('+123', '+123', 'zfill', 4)
self.checkequal('+0123', '+123', 'zfill', 5)
self.checkequal('-123', '-123', 'zfill', 3)
self.checkequal('-123', '-123', 'zfill', 4)
self.checkequal('-0123', '-123', 'zfill', 5)
self.checkequal('000', '', 'zfill', 3)
self.checkequal('34', '34', 'zfill', 1)
self.checkequal('0034', '34', 'zfill', 4)
self.checkraises(TypeError, '123', 'zfill')
# XXX alias for py3k forward compatibility
BaseTest = CommonTest
class MixinStrUnicodeUserStringTest:
# additional tests that only work for
# stringlike objects, i.e. str, unicode, UserString
# (but not the string module)
def test_islower(self):
self.checkequal(False, '', 'islower')
self.checkequal(True, 'a', 'islower')
self.checkequal(False, 'A', 'islower')
self.checkequal(False, '\n', 'islower')
self.checkequal(True, 'abc', 'islower')
self.checkequal(False, 'aBc', 'islower')
self.checkequal(True, 'abc\n', 'islower')
self.checkraises(TypeError, 'abc', 'islower', 42)
def test_isupper(self):
self.checkequal(False, '', 'isupper')
self.checkequal(False, 'a', 'isupper')
self.checkequal(True, 'A', 'isupper')
self.checkequal(False, '\n', 'isupper')
self.checkequal(True, 'ABC', 'isupper')
self.checkequal(False, 'AbC', 'isupper')
self.checkequal(True, 'ABC\n', 'isupper')
self.checkraises(TypeError, 'abc', 'isupper', 42)
def test_istitle(self):
self.checkequal(False, '', 'istitle')
self.checkequal(False, 'a', 'istitle')
self.checkequal(True, 'A', 'istitle')
self.checkequal(False, '\n', 'istitle')
self.checkequal(True, 'A Titlecased Line', 'istitle')
self.checkequal(True, 'A\nTitlecased Line', 'istitle')
self.checkequal(True, 'A Titlecased, Line', 'istitle')
self.checkequal(False, 'Not a capitalized String', 'istitle')
self.checkequal(False, 'Not\ta Titlecase String', 'istitle')
self.checkequal(False, 'Not--a Titlecase String', 'istitle')
self.checkequal(False, 'NOT', 'istitle')
self.checkraises(TypeError, 'abc', 'istitle', 42)
def test_isspace(self):
self.checkequal(False, '', 'isspace')
self.checkequal(False, 'a', 'isspace')
self.checkequal(True, ' ', 'isspace')
self.checkequal(True, '\t', 'isspace')
self.checkequal(True, '\r', 'isspace')
self.checkequal(True, '\n', 'isspace')
self.checkequal(True, ' \t\r\n', 'isspace')
self.checkequal(False, ' \t\r\na', 'isspace')
self.checkraises(TypeError, 'abc', 'isspace', 42)
def test_isalpha(self):
self.checkequal(False, '', 'isalpha')
self.checkequal(True, 'a', 'isalpha')
self.checkequal(True, 'A', 'isalpha')
self.checkequal(False, '\n', 'isalpha')
self.checkequal(True, 'abc', 'isalpha')
self.checkequal(False, 'aBc123', 'isalpha')
self.checkequal(False, 'abc\n', 'isalpha')
self.checkraises(TypeError, 'abc', 'isalpha', 42)
def test_isalnum(self):
self.checkequal(False, '', 'isalnum')
self.checkequal(True, 'a', 'isalnum')
self.checkequal(True, 'A', 'isalnum')
self.checkequal(False, '\n', 'isalnum')
self.checkequal(True, '123abc456', 'isalnum')
self.checkequal(True, 'a1b3c', 'isalnum')
self.checkequal(False, 'aBc000 ', 'isalnum')
self.checkequal(False, 'abc\n', 'isalnum')
self.checkraises(TypeError, 'abc', 'isalnum', 42)
def test_isdigit(self):
self.checkequal(False, '', 'isdigit')
self.checkequal(False, 'a', 'isdigit')
self.checkequal(True, '0', 'isdigit')
self.checkequal(True, '0123456789', 'isdigit')
self.checkequal(False, '0123456789a', 'isdigit')
self.checkraises(TypeError, 'abc', 'isdigit', 42)
def test_title(self):
self.checkequal(' Hello ', ' hello ', 'title')
self.checkequal('Hello ', 'hello ', 'title')
self.checkequal('Hello ', 'Hello ', 'title')
self.checkequal('Format This As Title String', "fOrMaT thIs aS titLe String", 'title')
self.checkequal('Format,This-As*Title;String', "fOrMaT,thIs-aS*titLe;String", 'title', )
self.checkequal('Getint', "getInt", 'title')
self.checkraises(TypeError, 'hello', 'title', 42)
def test_splitlines(self):
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\rghi", 'splitlines')
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi\n", 'splitlines')
self.checkequal(['abc', 'def', 'ghi', ''], "abc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'], "\nabc\ndef\r\nghi\n\r", 'splitlines', 1)
self.checkraises(TypeError, 'abc', 'splitlines', 42, 42)
def test_startswith(self):
self.checkequal(True, 'hello', 'startswith', 'he')
self.checkequal(True, 'hello', 'startswith', 'hello')
self.checkequal(False, 'hello', 'startswith', 'hello world')
self.checkequal(True, 'hello', 'startswith', '')
self.checkequal(False, 'hello', 'startswith', 'ello')
self.checkequal(True, 'hello', 'startswith', 'ello', 1)
self.checkequal(True, 'hello', 'startswith', 'o', 4)
self.checkequal(False, 'hello', 'startswith', 'o', 5)
self.checkequal(True, 'hello', 'startswith', '', 5)
self.checkequal(False, 'hello', 'startswith', 'lo', 6)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'startswith', 'lowo', 3, 6)
# test negative indices
self.checkequal(True, 'hello', 'startswith', 'he', 0, -1)
self.checkequal(True, 'hello', 'startswith', 'he', -53, -1)
self.checkequal(False, 'hello', 'startswith', 'hello', 0, -1)
self.checkequal(False, 'hello', 'startswith', 'hello world', -1, -10)
self.checkequal(False, 'hello', 'startswith', 'ello', -5)
self.checkequal(True, 'hello', 'startswith', 'ello', -4)
self.checkequal(False, 'hello', 'startswith', 'o', -2)
self.checkequal(True, 'hello', 'startswith', 'o', -1)
self.checkequal(True, 'hello', 'startswith', '', -3, -3)
self.checkequal(False, 'hello', 'startswith', 'lo', -9)
self.checkraises(TypeError, 'hello', 'startswith')
self.checkraises(TypeError, 'hello', 'startswith', 42)
# test tuple arguments
self.checkequal(True, 'hello', 'startswith', ('he', 'ha'))
self.checkequal(False, 'hello', 'startswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'startswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'startswith', ())
self.checkequal(True, 'helloworld', 'startswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'startswith', ('hellowo', 'ello',
'rld'), 3)
self.checkequal(True, 'hello', 'startswith', ('lo', 'he'), 0, -1)
self.checkequal(False, 'hello', 'startswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'startswith', ('he', 'hel'), 0, 2)
self.checkraises(TypeError, 'hello', 'startswith', (42,))
def test_endswith(self):
self.checkequal(True, 'hello', 'endswith', 'lo')
self.checkequal(False, 'hello', 'endswith', 'he')
self.checkequal(True, 'hello', 'endswith', '')
self.checkequal(False, 'hello', 'endswith', 'hello world')
self.checkequal(False, 'helloworld', 'endswith', 'worl')
self.checkequal(True, 'helloworld', 'endswith', 'worl', 3, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', 3, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 1, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 2, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 4, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, 8)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 1)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 0)
# test negative indices
self.checkequal(True, 'hello', 'endswith', 'lo', -2)
self.checkequal(False, 'hello', 'endswith', 'he', -2)
self.checkequal(True, 'hello', 'endswith', '', -3, -3)
self.checkequal(False, 'hello', 'endswith', 'hello world', -10, -2)
self.checkequal(False, 'helloworld', 'endswith', 'worl', -6)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, -1)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', -7, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -99, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -8, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -7, -3)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, -4)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', -8, -2)
self.checkraises(TypeError, 'hello', 'endswith')
self.checkraises(TypeError, 'hello', 'endswith', 42)
# test tuple arguments
self.checkequal(False, 'hello', 'endswith', ('he', 'ha'))
self.checkequal(True, 'hello', 'endswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'endswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'endswith', ())
self.checkequal(True, 'helloworld', 'endswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'endswith', ('hellowo', 'ello',
'rld'), 3, -1)
self.checkequal(True, 'hello', 'endswith', ('hell', 'ell'), 0, -1)
self.checkequal(False, 'hello', 'endswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'endswith', ('he', 'hell'), 0, 4)
self.checkraises(TypeError, 'hello', 'endswith', (42,))
def test___contains__(self):
self.checkequal(True, '', '__contains__', '') # vereq('' in '', True)
self.checkequal(True, 'abc', '__contains__', '') # vereq('' in 'abc', True)
self.checkequal(False, 'abc', '__contains__', '\0') # vereq('\0' in 'abc', False)
self.checkequal(True, '\0abc', '__contains__', '\0') # vereq('\0' in '\0abc', True)
self.checkequal(True, 'abc\0', '__contains__', '\0') # vereq('\0' in 'abc\0', True)
self.checkequal(True, '\0abc', '__contains__', 'a') # vereq('a' in '\0abc', True)
self.checkequal(True, 'asdf', '__contains__', 'asdf') # vereq('asdf' in 'asdf', True)
self.checkequal(False, 'asd', '__contains__', 'asdf') # vereq('asdf' in 'asd', False)
self.checkequal(False, '', '__contains__', 'asdf') # vereq('asdf' in '', False)
def test_subscript(self):
self.checkequal(u'a', 'abc', '__getitem__', 0)
self.checkequal(u'c', 'abc', '__getitem__', -1)
self.checkequal(u'a', 'abc', '__getitem__', 0L)
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal(u'a', 'abc', '__getitem__', slice(0, 1))
self.checkequal(u'', 'abc', '__getitem__', slice(0, 0))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_slice(self):
self.checkequal('abc', 'abc', '__getslice__', 0, 1000)
self.checkequal('abc', 'abc', '__getslice__', 0, 3)
self.checkequal('ab', 'abc', '__getslice__', 0, 2)
self.checkequal('bc', 'abc', '__getslice__', 1, 3)
self.checkequal('b', 'abc', '__getslice__', 1, 2)
self.checkequal('', 'abc', '__getslice__', 2, 2)
self.checkequal('', 'abc', '__getslice__', 1000, 1000)
self.checkequal('', 'abc', '__getslice__', 2000, 1000)
self.checkequal('', 'abc', '__getslice__', 2, 1)
self.checkraises(TypeError, 'abc', '__getslice__', 'def')
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = string.ascii_letters + string.digits
indices = (0, None, 1, 3, 41, -1, -2, -37)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
L = list(s)[start:stop:step]
self.checkequal(u"".join(L), s, '__getitem__',
slice(start, stop, step))
def test_mul(self):
self.checkequal('', 'abc', '__mul__', -1)
self.checkequal('', 'abc', '__mul__', 0)
self.checkequal('abc', 'abc', '__mul__', 1)
self.checkequal('abcabcabc', 'abc', '__mul__', 3)
self.checkraises(TypeError, 'abc', '__mul__')
self.checkraises(TypeError, 'abc', '__mul__', '')
# XXX: on a 64-bit system, this doesn't raise an overflow error,
# but either raises a MemoryError, or succeeds (if you have 54TiB)
#self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000)
def test_join(self):
# join now works with any sequence type
# moved here, because the argument order is
# different in string.join (see the test in
# test.test_string.StringTest.test_join)
self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequal('bd', '', 'join', ('', 'b', '', 'd'))
self.checkequal('ac', '', 'join', ('a', '', 'c', ''))
self.checkequal('w x y z', ' ', 'join', Sequence())
self.checkequal('abc', 'a', 'join', ('abc',))
self.checkequal('z', 'a', 'join', UserList(['z']))
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), unicode('.'), 'join', ['a', 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', [unicode('a'), 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', unicode('b'), 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', 'b', unicode('c')])
self.checkraises(TypeError, '.', 'join', ['a', unicode('b'), 3])
for i in [5, 25, 125]:
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
['a' * i] * i)
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
('a' * i,) * i)
self.checkraises(TypeError, ' ', 'join', BadSeq1())
self.checkequal('a b c', ' ', 'join', BadSeq2())
self.checkraises(TypeError, ' ', 'join')
self.checkraises(TypeError, ' ', 'join', 7)
self.checkraises(TypeError, ' ', 'join', Sequence([7, 'hello', 123L]))
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError, e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
def test_formatting(self):
self.checkequal('+hello+', '+%s+', '__mod__', 'hello')
self.checkequal('+10+', '+%d+', '__mod__', 10)
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('"', "%c", '__mod__', 34)
self.checkequal('$', "%c", '__mod__', 36)
self.checkequal('10', "%d", '__mod__', 10)
self.checkequal('\x7f', "%c", '__mod__', 0x7f)
for ordinal in (-100, 0x200000):
# unicode raises ValueError, str raises OverflowError
self.checkraises((ValueError, OverflowError), '%c', '__mod__', ordinal)
longvalue = sys.maxint + 10L
slongvalue = str(longvalue)
if slongvalue[-1] in ("L","l"): slongvalue = slongvalue[:-1]
self.checkequal(' 42', '%3ld', '__mod__', 42)
self.checkequal('42', '%d', '__mod__', 42L)
self.checkequal('42', '%d', '__mod__', 42.0)
self.checkequal(slongvalue, '%d', '__mod__', longvalue)
self.checkcall('%d', '__mod__', float(longvalue))
self.checkequal('0042.00', '%07.2f', '__mod__', 42)
self.checkequal('0042.00', '%07.2F', '__mod__', 42)
self.checkraises(TypeError, 'abc', '__mod__')
self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
self.checkraises(TypeError, '%s%s', '__mod__', (42,))
self.checkraises(TypeError, '%c', '__mod__', (None,))
self.checkraises(ValueError, '%(foo', '__mod__', {})
self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
self.checkraises(TypeError, '%d', '__mod__', "42") # not numeric
self.checkraises(TypeError, '%d', '__mod__', (42+0j)) # no int/long conversion provided
# argument names with properly nested brackets are supported
self.checkequal('bar', '%((foo))s', '__mod__', {'(foo)': 'bar'})
# 100 is a magic number in PyUnicode_Format, this forces a resize
self.checkequal(103*'a'+'x', '%sx', '__mod__', 103*'a')
self.checkraises(TypeError, '%*s', '__mod__', ('foo', 'bar'))
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
def test_floatformatting(self):
# float formatting
for prec in xrange(100):
format = '%%.%if' % prec
value = 0.01
for x in xrange(60):
value = value * 3.141592655 / 3.0 * 10.0
# The formatfloat() code in stringobject.c and
# unicodeobject.c uses a 120 byte buffer and switches from
# 'f' formatting to 'g' at precision 50, so we expect
# OverflowErrors for the ranges x < 50 and prec >= 67.
if x < 50 and prec >= 67:
self.checkraises(OverflowError, format, "__mod__", value)
else:
self.checkcall(format, "__mod__", value)
def test_inplace_rewrites(self):
# Check that strings don't copy and modify cached single-character strings
self.checkequal('a', 'A', 'lower')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'upper')
self.checkequal(True, 'a', 'islower')
self.checkequal('a', 'A', 'replace', 'A', 'a')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'capitalize')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'swapcase')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'title')
self.checkequal(True, 'a', 'islower')
def test_partition(self):
self.checkequal(('this is the par', 'ti', 'tion method'),
'this is the partition method', 'partition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'partition', '://')
self.checkequal(('http://www.python.org', '', ''), S, 'partition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'partition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'partition', 'org')
self.checkraises(ValueError, S, 'partition', '')
self.checkraises(TypeError, S, 'partition', None)
# mixed use of str and unicode
self.assertEqual('a/b/c'.partition(u'/'), ('a', '/', 'b/c'))
def test_rpartition(self):
self.checkequal(('this is the rparti', 'ti', 'on method'),
'this is the rpartition method', 'rpartition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'rpartition', '://')
self.checkequal(('', '', 'http://www.python.org'), S, 'rpartition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'rpartition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'rpartition', 'org')
self.checkraises(ValueError, S, 'rpartition', '')
self.checkraises(TypeError, S, 'rpartition', None)
# mixed use of str and unicode
self.assertEqual('a/b/c'.rpartition(u'/'), ('a/b', '/', 'c'))
class MixinStrStringUserStringTest:
# Additional tests for 8bit strings, i.e. str, UserString and
# the string module
def test_maketrans(self):
self.assertEqual(
''.join(map(chr, xrange(256))).replace('abc', 'xyz'),
string.maketrans('abc', 'xyz')
)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzw')
def test_translate(self):
table = string.maketrans('abc', 'xyz')
self.checkequal('xyzxyz', 'xyzabcdef', 'translate', table, 'def')
table = string.maketrans('a', 'A')
self.checkequal('Abc', 'abc', 'translate', table)
self.checkequal('xyz', 'xyz', 'translate', table)
self.checkequal('yz', 'xyz', 'translate', table, 'x')
self.checkequal('yx', 'zyzzx', 'translate', None, 'z')
self.checkequal('zyzzx', 'zyzzx', 'translate', None, '')
self.checkequal('zyzzx', 'zyzzx', 'translate', None)
self.checkraises(ValueError, 'xyz', 'translate', 'too short', 'strip')
self.checkraises(ValueError, 'xyz', 'translate', 'too short')
class MixinStrUserStringTest:
# Additional tests that only work with
# 8bit compatible object, i.e. str and UserString
if test_support.have_unicode:
def test_encoding_decoding(self):
codecs = [('rot13', 'uryyb jbeyq'),
('base64', 'aGVsbG8gd29ybGQ=\n'),
('hex', '68656c6c6f20776f726c64'),
('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
for encoding, data in codecs:
self.checkequal(data, 'hello world', 'encode', encoding)
self.checkequal('hello world', data, 'decode', encoding)
# zlib is optional, so we make the test optional too...
try:
import zlib
except ImportError:
pass
else:
data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
self.checkequal(data, 'hello world', 'encode', 'zlib')
self.checkequal('hello world', data, 'decode', 'zlib')
self.checkraises(TypeError, 'xyz', 'decode', 42)
self.checkraises(TypeError, 'xyz', 'encode', 42)
class MixinStrUnicodeTest:
# Additional tests that only work with str and unicode.
def test_bug1001011(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
# Check the optimisation still occurs for standard objects.
t = self.type2test
class subclass(t):
pass
s1 = subclass("abcd")
s2 = t().join([s1])
self.assert_(s1 is not s2)
self.assert_(type(s2) is t)
s1 = t("abcd")
s2 = t().join([s1])
self.assert_(s1 is s2)
# Should also test mixed-type join.
if t is unicode:
s1 = subclass("abcd")
s2 = "".join([s1])
self.assert_(s1 is not s2)
self.assert_(type(s2) is t)
s1 = t("abcd")
s2 = "".join([s1])
self.assert_(s1 is s2)
elif t is str:
s1 = subclass("abcd")
s2 = u"".join([s1])
self.assert_(s1 is not s2)
self.assert_(type(s2) is unicode) # promotes!
s1 = t("abcd")
s2 = u"".join([s1])
self.assert_(s1 is not s2)
self.assert_(type(s2) is unicode) # promotes!
else:
self.fail("unexpected type for MixinStrUnicodeTest %r" % t)
| apache-2.0 |
jackkiej/SickRage | lib/sqlalchemy/testing/requirements.py | 75 | 17798 | # testing/requirements.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exlusions.
"""
from . import exclusions
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a SELECT."""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert or \
config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"'returning' not supported by database"
)
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences and \
config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including Postgresql don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at all."""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this requirement
as not present.
"""
return exclusions.skip_if(lambda config: config.options.low_connections)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not self._has_cextensions(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine('sqlite://')
return True
except ImportError:
return False
def _has_cextensions(self):
try:
from sqlalchemy import cresultproxy, cprocessors
return True
except ImportError:
return False
| gpl-3.0 |
mcardillo55/django | django/contrib/admin/actions.py | 395 | 3316 | """
Built-in, globally-available admin actions.
"""
from django.contrib import messages
from django.contrib.admin import helpers
from django.contrib.admin.utils import get_deleted_objects, model_ngettext
from django.core.exceptions import PermissionDenied
from django.db import router
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _, ugettext_lazy
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, model_count, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_text(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
}, messages.SUCCESS)
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_text(opts.verbose_name)
else:
objects_name = force_text(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = dict(
modeladmin.admin_site.each_context(request),
title=title,
objects_name=objects_name,
deletable_objects=[deletable_objects],
model_count=dict(model_count).items(),
queryset=queryset,
perms_lacking=perms_needed,
protected=protected,
opts=opts,
action_checkbox_name=helpers.ACTION_CHECKBOX_NAME,
)
request.current_app = modeladmin.admin_site.name
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.model_name),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
| bsd-3-clause |
minhtuancn/odoo | openerp/workflow/workitem.py | 294 | 14389 |
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# TODO:
# cr.execute('delete from wkf_triggers where model=%s and res_id=%s', (res_type,res_id))
#
import logging
import instance
from openerp.workflow.helpers import Session
from openerp.workflow.helpers import Record
from openerp.workflow.helpers import WorkflowActivity
logger = logging.getLogger(__name__)
import openerp
from openerp.tools.safe_eval import safe_eval as eval
class Environment(dict):
"""
Dictionary class used as an environment to evaluate workflow code (such as
the condition on transitions).
This environment provides sybmols for cr, uid, id, model name, model
instance, column names, and all the record (the one obtained by browsing
the provided ID) attributes.
"""
def __init__(self, session, record):
self.cr = session.cr
self.uid = session.uid
self.model = record.model
self.id = record.id
self.ids = [record.id]
self.obj = openerp.registry(self.cr.dbname)[self.model]
def __getitem__(self, key):
records = self.obj.browse(self.cr, self.uid, self.ids)
if hasattr(records, key):
return getattr(records, key)
else:
return super(Environment, self).__getitem__(key)
class WorkflowItem(object):
def __init__(self, session, record, work_item_values):
assert isinstance(session, Session)
assert isinstance(record, Record)
self.session = session
self.record = record
if not work_item_values:
work_item_values = {}
assert isinstance(work_item_values, dict)
self.workitem = work_item_values
@classmethod
def create(cls, session, record, activity, instance_id, stack):
assert isinstance(session, Session)
assert isinstance(record, Record)
assert isinstance(activity, dict)
assert isinstance(instance_id, (long, int))
assert isinstance(stack, list)
cr = session.cr
cr.execute("select nextval('wkf_workitem_id_seq')")
id_new = cr.fetchone()[0]
cr.execute("insert into wkf_workitem (id,act_id,inst_id,state) values (%s,%s,%s,'active')", (id_new, activity['id'], instance_id))
cr.execute('select * from wkf_workitem where id=%s',(id_new,))
work_item_values = cr.dictfetchone()
logger.info('Created workflow item in activity %s',
activity['id'],
extra={'ident': (session.uid, record.model, record.id)})
workflow_item = WorkflowItem(session, record, work_item_values)
workflow_item.process(stack=stack)
@classmethod
def create_all(cls, session, record, activities, instance_id, stack):
assert isinstance(activities, list)
for activity in activities:
cls.create(session, record, activity, instance_id, stack)
def process(self, signal=None, force_running=False, stack=None):
assert isinstance(force_running, bool)
assert stack is not None
cr = self.session.cr
cr.execute('select * from wkf_activity where id=%s', (self.workitem['act_id'],))
activity = cr.dictfetchone()
triggers = False
if self.workitem['state'] == 'active':
triggers = True
if not self._execute(activity, stack):
return False
if force_running or self.workitem['state'] == 'complete':
ok = self._split_test(activity['split_mode'], signal, stack)
triggers = triggers and not ok
if triggers:
cr.execute('select * from wkf_transition where act_from=%s ORDER BY sequence,id', (self.workitem['act_id'],))
for trans in cr.dictfetchall():
if trans['trigger_model']:
ids = self.wkf_expr_eval_expr(trans['trigger_expr_id'])
for res_id in ids:
cr.execute('select nextval(\'wkf_triggers_id_seq\')')
id =cr.fetchone()[0]
cr.execute('insert into wkf_triggers (model,res_id,instance_id,workitem_id,id) values (%s,%s,%s,%s,%s)', (trans['trigger_model'],res_id, self.workitem['inst_id'], self.workitem['id'], id))
return True
def _execute(self, activity, stack):
"""Send a signal to parenrt workflow (signal: subflow.signal_name)"""
result = True
cr = self.session.cr
signal_todo = []
if (self.workitem['state']=='active') and activity['signal_send']:
# signal_send']:
cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (self.workitem['inst_id'],))
for instance_id, model_name, record_id in cr.fetchall():
record = Record(model_name, record_id)
signal_todo.append((instance_id, record, activity['signal_send']))
if activity['kind'] == WorkflowActivity.KIND_DUMMY:
if self.workitem['state']=='active':
self._state_set(activity, 'complete')
if activity['action_id']:
res2 = self.wkf_expr_execute_action(activity)
if res2:
stack.append(res2)
result=res2
elif activity['kind'] == WorkflowActivity.KIND_FUNCTION:
if self.workitem['state']=='active':
self._state_set(activity, 'running')
returned_action = self.wkf_expr_execute(activity)
if type(returned_action) in (dict,):
stack.append(returned_action)
if activity['action_id']:
res2 = self.wkf_expr_execute_action(activity)
# A client action has been returned
if res2:
stack.append(res2)
result=res2
self._state_set(activity, 'complete')
elif activity['kind'] == WorkflowActivity.KIND_STOPALL:
if self.workitem['state']=='active':
self._state_set(activity, 'running')
cr.execute('delete from wkf_workitem where inst_id=%s and id<>%s', (self.workitem['inst_id'], self.workitem['id']))
if activity['action']:
self.wkf_expr_execute(activity)
self._state_set(activity, 'complete')
elif activity['kind'] == WorkflowActivity.KIND_SUBFLOW:
if self.workitem['state']=='active':
self._state_set(activity, 'running')
if activity.get('action', False):
id_new = self.wkf_expr_execute(activity)
if not id_new:
cr.execute('delete from wkf_workitem where id=%s', (self.workitem['id'],))
return False
assert type(id_new)==type(1) or type(id_new)==type(1L), 'Wrong return value: '+str(id_new)+' '+str(type(id_new))
cr.execute('select id from wkf_instance where res_id=%s and wkf_id=%s', (id_new, activity['subflow_id']))
id_new = cr.fetchone()[0]
else:
inst = instance.WorkflowInstance(self.session, self.record)
id_new = inst.create(activity['subflow_id'])
cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (id_new, self.workitem['id']))
self.workitem['subflow_id'] = id_new
if self.workitem['state']=='running':
cr.execute("select state from wkf_instance where id=%s", (self.workitem['subflow_id'],))
state = cr.fetchone()[0]
if state=='complete':
self._state_set(activity, 'complete')
for instance_id, record, signal_send in signal_todo:
wi = instance.WorkflowInstance(self.session, record, {'id': instance_id})
wi.validate(signal_send, force_running=True)
return result
def _state_set(self, activity, state):
self.session.cr.execute('update wkf_workitem set state=%s where id=%s', (state, self.workitem['id']))
self.workitem['state'] = state
logger.info('Changed state of work item %s to "%s" in activity %s',
self.workitem['id'], state, activity['id'],
extra={'ident': (self.session.uid, self.record.model, self.record.id)})
def _split_test(self, split_mode, signal, stack):
cr = self.session.cr
cr.execute('select * from wkf_transition where act_from=%s ORDER BY sequence,id', (self.workitem['act_id'],))
test = False
transitions = []
alltrans = cr.dictfetchall()
if split_mode in ('XOR', 'OR'):
for transition in alltrans:
if self.wkf_expr_check(transition,signal):
test = True
transitions.append((transition['id'], self.workitem['inst_id']))
if split_mode=='XOR':
break
else:
test = True
for transition in alltrans:
if not self.wkf_expr_check(transition, signal):
test = False
break
cr.execute('select count(*) from wkf_witm_trans where trans_id=%s and inst_id=%s', (transition['id'], self.workitem['inst_id']))
if not cr.fetchone()[0]:
transitions.append((transition['id'], self.workitem['inst_id']))
if test and transitions:
cr.executemany('insert into wkf_witm_trans (trans_id,inst_id) values (%s,%s)', transitions)
cr.execute('delete from wkf_workitem where id=%s', (self.workitem['id'],))
for t in transitions:
self._join_test(t[0], t[1], stack)
return True
return False
def _join_test(self, trans_id, inst_id, stack):
cr = self.session.cr
cr.execute('select * from wkf_activity where id=(select act_to from wkf_transition where id=%s)', (trans_id,))
activity = cr.dictfetchone()
if activity['join_mode']=='XOR':
WorkflowItem.create(self.session, self.record, activity, inst_id, stack=stack)
cr.execute('delete from wkf_witm_trans where inst_id=%s and trans_id=%s', (inst_id,trans_id))
else:
cr.execute('select id from wkf_transition where act_to=%s ORDER BY sequence,id', (activity['id'],))
trans_ids = cr.fetchall()
ok = True
for (id,) in trans_ids:
cr.execute('select count(*) from wkf_witm_trans where trans_id=%s and inst_id=%s', (id,inst_id))
res = cr.fetchone()[0]
if not res:
ok = False
break
if ok:
for (id,) in trans_ids:
cr.execute('delete from wkf_witm_trans where trans_id=%s and inst_id=%s', (id,inst_id))
WorkflowItem.create(self.session, self.record, activity, inst_id, stack=stack)
def wkf_expr_eval_expr(self, lines):
"""
Evaluate each line of ``lines`` with the ``Environment`` environment, returning
the value of the last line.
"""
assert lines, 'You used a NULL action in a workflow, use dummy node instead.'
result = False
for line in lines.split('\n'):
line = line.strip()
if not line:
continue
if line == 'True':
result = True
elif line == 'False':
result = False
else:
env = Environment(self.session, self.record)
result = eval(line, env, nocopy=True)
return result
def wkf_expr_execute_action(self, activity):
"""
Evaluate the ir.actions.server action specified in the activity.
"""
context = {
'active_model': self.record.model,
'active_id': self.record.id,
'active_ids': [self.record.id]
}
ir_actions_server = openerp.registry(self.session.cr.dbname)['ir.actions.server']
result = ir_actions_server.run(self.session.cr, self.session.uid, [activity['action_id']], context)
return result
def wkf_expr_execute(self, activity):
"""
Evaluate the action specified in the activity.
"""
return self.wkf_expr_eval_expr(activity['action'])
def wkf_expr_check(self, transition, signal):
"""
Test if a transition can be taken. The transition can be taken if:
- the signal name matches,
- the uid is SUPERUSER_ID or the user groups contains the transition's
group,
- the condition evaluates to a truish value.
"""
if transition['signal'] and signal != transition['signal']:
return False
if self.session.uid != openerp.SUPERUSER_ID and transition['group_id']:
registry = openerp.registry(self.session.cr.dbname)
user_groups = registry['res.users'].read(self.session.cr, self.session.uid, [self.session.uid], ['groups_id'])[0]['groups_id']
if transition['group_id'] not in user_groups:
return False
return self.wkf_expr_eval_expr(transition['condition'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tersmitten/ansible | test/runner/lib/cloud/vcenter.py | 6 | 5233 | """VMware vCenter plugin for integration tests."""
from __future__ import absolute_import, print_function
import os
from lib.cloud import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from lib.util import (
find_executable,
display,
)
from lib.docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
get_docker_container_id,
)
class VcenterProvider(CloudProvider):
"""VMware vcenter/esx plugin. Sets up cloud resources for tests."""
DOCKER_SIMULATOR_NAME = 'vcenter-simulator'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(VcenterProvider, self).__init__(args)
# The simulator must be pinned to a specific version to guarantee CI passes with the version used.
if os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
self.image = os.environ.get('ANSIBLE_VCSIM_CONTAINER')
else:
self.image = 'quay.io/ansible/vcenter-test-container:1.5.0'
self.container_name = ''
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(VcenterProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
if self.managed:
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
docker_rm(self.args, self.container_name)
super(VcenterProvider, self).cleanup()
def _setup_dynamic(self):
"""Create a vcenter simulator using docker."""
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0].get('State', {}).get('Running'):
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing vCenter simulator docker container.', verbosity=1)
else:
display.info('Starting a new vCenter simulator docker container.', verbosity=1)
if not self.args.docker and not container_id:
# publish the simulator ports when not running inside docker
publish_ports = [
'-p', '80:80',
'-p', '443:443',
'-p', '8080:8080',
'-p', '8989:8989',
'-p', '5000:5000', # control port for flask app in simulator
]
else:
publish_ports = []
if not os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
docker_pull(self.args, self.image)
docker_run(
self.args,
self.image,
['-d', '--name', self.container_name] + publish_ports,
)
if self.args.docker:
vcenter_host = self.DOCKER_SIMULATOR_NAME
elif container_id:
vcenter_host = self._get_simulator_address()
display.info('Found vCenter simulator container address: %s' % vcenter_host, verbosity=1)
else:
vcenter_host = 'localhost'
self._set_cloud_config('vcenter_host', vcenter_host)
def _get_simulator_address(self):
results = docker_inspect(self.args, self.container_name)
ipaddress = results[0]['NetworkSettings']['IPAddress']
return ipaddress
def _setup_static(self):
raise NotImplementedError()
class VcenterEnvironment(CloudEnvironment):
"""VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
env_vars = dict(
VCENTER_HOST=self._get_cloud_config('vcenter_host'),
)
ansible_vars = dict(
vcsim=self._get_cloud_config('vcenter_host'),
)
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| gpl-3.0 |
oceanobservatories/mi-instrument | mi/dataset/parser/test/test_flord_l_wfp_sio.py | 7 | 3522 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test
@file marine-integrations/mi/dataset/parser/test/test_flord_l_wfp.py
@author Maria Lutz, Mark Worden
@brief Test code for a flord_l_wfp_sio data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import UnexpectedDataException
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.flord_l_wfp.sio.resource import RESOURCE_PATH
from mi.dataset.parser.flord_l_wfp_sio import FlordLWfpSioParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class FlordLWfpSioParserUnitTestCase(ParserUnitTestCase):
"""
flord_l_wfp_sio Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.flord_l_wfp_sio',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlordLWfpSioDataParticle'
}
def test_parsing_of_input_file_without_decimation_factor(self):
"""
This test method will process a flord_l_wfp_sio input file that does not include a status
particle with a decimation factor.
"""
file_path = os.path.join(RESOURCE_PATH, 'node58p1_0.we_wfp.dat')
# Open the file holding the test sample data
with open(file_path, 'rb') as stream_handle:
parser = FlordLWfpSioParser(self.config, stream_handle, self.exception_callback)
# Attempt to retrieve 1000 particles
particles = parser.get_records(1000)
# We should end up with 160 particles
self.assertTrue(len(particles) == 160)
self.assert_particles(particles, 'node58p1_0.we_wfp.yml', RESOURCE_PATH)
def test_parsing_of_input_file_with_decimation_factor(self):
"""
This test method will process a flord_l_wfp_sio input file that includes a status
particle with a decimation factor.
"""
file_path = os.path.join(RESOURCE_PATH, 'node58p1_3.we_wfp.dat')
# Open the file holding the test sample data
with open(file_path, 'rb') as stream_handle:
parser = FlordLWfpSioParser(self.config, stream_handle, self.exception_callback)
# Attempt to retrieve 1000 particles
particles = parser.get_records(1000)
# We should end up with 49 particles
self.assertTrue(len(particles) == 49)
self.assert_particles(particles, 'node58p1_3.we_wfp.yml', RESOURCE_PATH)
def test_parsing_of_input_file_with_unexpected_data(self):
"""
This test method will process a flord_l_wfp_sio input file that includes unexpected data.
"""
file_path = os.path.join(RESOURCE_PATH, 'flord_l_wfp_sio_unexpected_data.dat')
# Open the file holding the test sample data
with open(file_path, 'rb') as stream_handle:
parser = FlordLWfpSioParser(self.config, stream_handle, self.exception_callback)
# Attempt to retrieve 1000 particles
particles = parser.get_records(1000)
self.assertEquals(len(self.exception_callback_value), 3)
for i in range(0,len(self.exception_callback_value)):
self.assert_(isinstance(self.exception_callback_value[i], UnexpectedDataException))
# We should end up with 0 particles
self.assertTrue(len(particles) == 0)
| bsd-2-clause |
wenhuizhang/neutron | neutron/db/migration/__init__.py | 39 | 5405 | # Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import functools
from alembic import context
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine import reflection
def skip_if_offline(func):
"""Decorator for skipping migrations in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
return
return func(*args, **kwargs)
return decorator
def raise_if_offline(func):
"""Decorator for raising if a function is called in offline mode."""
@functools.wraps(func)
def decorator(*args, **kwargs):
if context.is_offline_mode():
raise RuntimeError(_("%s cannot be called while in offline mode") %
func.__name__)
return func(*args, **kwargs)
return decorator
@raise_if_offline
def schema_has_table(table_name):
"""Check whether the specified table exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
return table_name in insp.get_table_names()
@raise_if_offline
def schema_has_column(table_name, column_name):
"""Check whether the specified column exists in the current schema.
This method cannot be executed in offline mode.
"""
bind = op.get_bind()
insp = sa.engine.reflection.Inspector.from_engine(bind)
# first check that the table exists
if not schema_has_table(table_name):
return
# check whether column_name exists in table columns
return column_name in [column['name'] for column in
insp.get_columns(table_name)]
@raise_if_offline
def alter_column_if_exists(table_name, column_name, **kwargs):
"""Alter a column only if it exists in the schema."""
if schema_has_column(table_name, column_name):
op.alter_column(table_name, column_name, **kwargs)
@raise_if_offline
def drop_table_if_exists(table_name):
if schema_has_table(table_name):
op.drop_table(table_name)
@raise_if_offline
def rename_table_if_exists(old_table_name, new_table_name):
if schema_has_table(old_table_name):
op.rename_table(old_table_name, new_table_name)
def alter_enum(table, column, enum_type, nullable):
bind = op.get_bind()
engine = bind.engine
if engine.name == 'postgresql':
values = {'table': table,
'column': column,
'name': enum_type.name}
op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values)
enum_type.create(bind, checkfirst=False)
op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO "
"old_%(column)s" % values)
op.add_column(table, sa.Column(column, enum_type, nullable=nullable))
op.execute("UPDATE %(table)s SET %(column)s = "
"old_%(column)s::text::%(name)s" % values)
op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values)
op.execute("DROP TYPE old_%(name)s" % values)
else:
op.alter_column(table, column, type_=enum_type,
existing_nullable=nullable)
def create_table_if_not_exist_psql(table_name, values):
if op.get_bind().engine.dialect.server_version_info < (9, 1, 0):
op.execute("CREATE LANGUAGE plpgsql")
op.execute("CREATE OR REPLACE FUNCTION execute(TEXT) RETURNS VOID AS $$"
"BEGIN EXECUTE $1; END;"
"$$ LANGUAGE plpgsql STRICT;")
op.execute("CREATE OR REPLACE FUNCTION table_exist(TEXT) RETURNS bool as "
"$$ SELECT exists(select 1 from pg_class where relname=$1);"
"$$ language sql STRICT;")
op.execute("SELECT execute($$CREATE TABLE %(name)s %(columns)s $$) "
"WHERE NOT table_exist(%(name)r);" %
{'name': table_name,
'columns': values})
def remove_foreign_keys(table, foreign_keys):
for fk in foreign_keys:
op.drop_constraint(
name=fk['name'],
table_name=table,
type_='foreignkey'
)
def create_foreign_keys(table, foreign_keys):
for fk in foreign_keys:
op.create_foreign_key(
name=fk['name'],
source=table,
referent=fk['referred_table'],
local_cols=fk['constrained_columns'],
remote_cols=fk['referred_columns'],
ondelete='CASCADE'
)
@contextlib.contextmanager
def remove_fks_from_table(table):
try:
inspector = reflection.Inspector.from_engine(op.get_bind())
foreign_keys = inspector.get_foreign_keys(table)
remove_foreign_keys(table, foreign_keys)
yield
finally:
create_foreign_keys(table, foreign_keys)
| apache-2.0 |
ymotongpoo/restroom | 0x7d8/RSSReader/mailto.py | 1 | 2150 | # -*- coding: utf-8; encoding: utf-8; -*-;
"""
mailto.py
http://labs.unoh.net/2007/06/python_2.html
Known issue:
- need some change for exceptions
"""
__author__ = "ymotongpoo <[email protected]>"
__date__ = "21 Nov. 2008"
__credits__ = "0x7d8 -- programming training"
__version__ = "$Revision: 0.10"
import smtplib
from email.MIMEText import MIMEText
from email.Header import Header
from email.Utils import formatdate
class MailTo:
"""
class for sending e-mail
"""
def __init__(self, from_addr = '', to_addr = [], subject = '', body = ''):
"""
initialization
arguments:
from_addr : 'From' address
to_addr : list of 'To' addresses
subject : subject of the e-mali
body : body of the e-mail
"""
self.from_addr = from_addr
self.to_addr = to_addr
self.subject = subject
self.body = body
def CreateMessage(self, encoding):
"""
create e-mail message including e-mail header
arguments:
encoding : mail encoding
return:
e-mail message
"""
msg = MIMEText(self.body, 'plain', encoding)
msg['Subject'] = self.subject
msg['From'] = self.from_addr
msg['To'] = self.to_addr
msg['Date'] = formatdate()
return msg
def Send(self, msg):
"""
send e-mail using normal smtp server
arguments:
msg : e-mail message created by CreateMessage()
"""
s = smtplib.SMTP()
s.sendmail(self.from_addr, [self.to_addr], msg.as_string())
s.close()
def SendViaGmail(self, msg, account, password):
"""
send e-mail using Gmail
arguments:
msg : e-mail message created by CreateMessage()
account : Gmail account
password : password for Gmail account
"""
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.ehlo()
s.login(account, password)
s.sendmail(self.from_addr, [self.to_addr], msg.as_string())
s.close()
| apache-2.0 |
abtreece/ansible | lib/ansible/utils/module_docs_fragments/asa.py | 2 | 3986 | #
# (c) 2016, Peter Sprygada <[email protected]>
# (c) 2016, Patrick Ogenstad <@ogenstad>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. The port value will default to the well known SSH port
of 22
required: false
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. The value of I(username) is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. The value of I(password) is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. The value of I(ssh_keyfile) is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
required: false
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
required: false
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
required: false
default: none
timeout:
description:
- Specifies idle timeout for the connection. Useful if the console
freezes before continuing. For example when saving configurations.
required: false
default: 10
provider:
description:
- Convenience method that allows all I(ios) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
context:
description:
- Specifies which context to target if you are running in the ASA in
multiple context mode. Defaults to the current context you login to.
required: false
default: null
"""
| mit |
ARG-TLQ/Red-DiscordBot | redbot/cogs/audio/audio_dataclasses.py | 3 | 25683 | import contextlib
import glob
import logging
import ntpath
import os
import posixpath
import re
from pathlib import Path, PosixPath, WindowsPath
from typing import (
AsyncIterator,
Callable,
Final,
Iterator,
MutableMapping,
Optional,
Pattern,
Tuple,
Union,
)
from urllib.parse import urlparse
import lavalink
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
_ = Translator("Audio", Path(__file__))
_RE_REMOVE_START: Final[Pattern] = re.compile(r"^(sc|list) ")
_RE_YOUTUBE_TIMESTAMP: Final[Pattern] = re.compile(r"[&|?]t=(\d+)s?")
_RE_YOUTUBE_INDEX: Final[Pattern] = re.compile(r"&index=(\d+)")
_RE_SPOTIFY_URL: Final[Pattern] = re.compile(r"(http[s]?://)?(open\.spotify\.com)/")
_RE_SPOTIFY_TIMESTAMP: Final[Pattern] = re.compile(r"#(\d+):(\d+)")
_RE_SOUNDCLOUD_TIMESTAMP: Final[Pattern] = re.compile(r"#t=(\d+):(\d+)s?")
_RE_TWITCH_TIMESTAMP: Final[Pattern] = re.compile(r"\?t=(\d+)h(\d+)m(\d+)s")
_PATH_SEPS: Final[Tuple[str, str]] = (posixpath.sep, ntpath.sep)
_FULLY_SUPPORTED_MUSIC_EXT: Final[Tuple[str, ...]] = (".mp3", ".flac", ".ogg")
_PARTIALLY_SUPPORTED_MUSIC_EXT: Tuple[str, ...] = (
".m3u",
".m4a",
".aac",
".ra",
".wav",
".opus",
".wma",
".ts",
".au",
# These do not work
# ".mid",
# ".mka",
# ".amr",
# ".aiff",
# ".ac3",
# ".voc",
# ".dsf",
)
_PARTIALLY_SUPPORTED_VIDEO_EXT: Tuple[str, ...] = (
".mp4",
".mov",
".flv",
".webm",
".mkv",
".wmv",
".3gp",
".m4v",
".mk3d", # https://github.com/Devoxin/lavaplayer
".mka", # https://github.com/Devoxin/lavaplayer
".mks", # https://github.com/Devoxin/lavaplayer
# These do not work
# ".vob",
# ".mts",
# ".avi",
# ".mpg",
# ".mpeg",
# ".swf",
)
_PARTIALLY_SUPPORTED_MUSIC_EXT += _PARTIALLY_SUPPORTED_VIDEO_EXT
log = logging.getLogger("red.cogs.Audio.audio_dataclasses")
class LocalPath:
"""Local tracks class.
Used to handle system dir trees in a cross system manner. The only use of this class is for
`localtracks`.
"""
_all_music_ext = _FULLY_SUPPORTED_MUSIC_EXT + _PARTIALLY_SUPPORTED_MUSIC_EXT
def __init__(self, path, localtrack_folder, **kwargs):
self._localtrack_folder = localtrack_folder
self._path = path
if isinstance(path, (Path, WindowsPath, PosixPath, LocalPath)):
path = str(path.absolute())
elif path is not None:
path = str(path)
self.cwd = Path.cwd()
_lt_folder = Path(self._localtrack_folder) if self._localtrack_folder else self.cwd
_path = Path(path) if path else self.cwd
if _lt_folder.parts[-1].lower() == "localtracks" and not kwargs.get("forced"):
self.localtrack_folder = _lt_folder
elif kwargs.get("forced"):
if _path.parts[-1].lower() == "localtracks":
self.localtrack_folder = _path
else:
self.localtrack_folder = _path / "localtracks"
else:
self.localtrack_folder = _lt_folder / "localtracks"
try:
_path = Path(path)
_path.relative_to(self.localtrack_folder)
self.path = _path
except (ValueError, TypeError):
for sep in _PATH_SEPS:
if path and path.startswith(f"localtracks{sep}{sep}"):
path = path.replace(f"localtracks{sep}{sep}", "", 1)
elif path and path.startswith(f"localtracks{sep}"):
path = path.replace(f"localtracks{sep}", "", 1)
self.path = self.localtrack_folder.joinpath(path) if path else self.localtrack_folder
try:
if self.path.is_file():
parent = self.path.parent
else:
parent = self.path
self.parent = Path(parent)
except OSError:
self.parent = None
@property
def name(self):
return str(self.path.name)
@property
def suffix(self):
return str(self.path.suffix)
def is_dir(self):
try:
return self.path.is_dir()
except OSError:
return False
def exists(self):
try:
return self.path.exists()
except OSError:
return False
def is_file(self):
try:
return self.path.is_file()
except OSError:
return False
def absolute(self):
try:
return self.path.absolute()
except OSError:
return self._path
@classmethod
def joinpath(cls, localpath, *args):
modified = cls(None, localpath)
modified.path = modified.path.joinpath(*args)
return modified
def rglob(self, pattern, folder=False) -> Iterator[str]:
if folder:
return glob.iglob(f"{glob.escape(self.path)}{os.sep}**{os.sep}", recursive=True)
else:
return glob.iglob(
f"{glob.escape(self.path)}{os.sep}**{os.sep}*{pattern}", recursive=True
)
def glob(self, pattern, folder=False) -> Iterator[str]:
if folder:
return glob.iglob(f"{glob.escape(self.path)}{os.sep}*{os.sep}", recursive=False)
else:
return glob.iglob(f"{glob.escape(self.path)}{os.sep}*{pattern}", recursive=False)
async def _multiglob(self, pattern: str, folder: bool, method: Callable):
async for rp in AsyncIter(method(pattern)):
rp_local = LocalPath(rp, self._localtrack_folder)
if (
(folder and rp_local.is_dir() and rp_local.exists())
or (not folder and rp_local.suffix in self._all_music_ext and rp_local.is_file())
and rp_local.exists()
):
yield rp_local
async def multiglob(self, *patterns, folder=False) -> AsyncIterator["LocalPath"]:
async for p in AsyncIter(patterns):
async for path in self._multiglob(p, folder, self.glob):
yield path
async def multirglob(self, *patterns, folder=False) -> AsyncIterator["LocalPath"]:
async for p in AsyncIter(patterns):
async for path in self._multiglob(p, folder, self.rglob):
yield path
def __str__(self):
return self.to_string()
def __repr__(self):
return str(self)
def to_string(self):
try:
return str(self.path.absolute())
except OSError:
return str(self._path)
def to_string_user(self, arg: str = None):
string = str(self.absolute()).replace(
(str(self.localtrack_folder.absolute()) + os.sep) if arg is None else arg, ""
)
chunked = False
while len(string) > 145 and os.sep in string:
string = string.split(os.sep, 1)[-1]
chunked = True
if chunked:
string = f"...{os.sep}{string}"
return string
async def tracks_in_tree(self):
tracks = []
async for track in self.multirglob(*[f"{ext}" for ext in self._all_music_ext]):
with contextlib.suppress(ValueError):
if track.path.parent != self.localtrack_folder and track.path.relative_to(
self.path
):
tracks.append(Query.process_input(track, self._localtrack_folder))
return sorted(tracks, key=lambda x: x.to_string_user().lower())
async def subfolders_in_tree(self):
return_folders = []
async for f in self.multirglob("", folder=True):
with contextlib.suppress(ValueError):
if (
f not in return_folders
and f.is_dir()
and f.path != self.localtrack_folder
and f.path.relative_to(self.path)
):
return_folders.append(f)
return sorted(return_folders, key=lambda x: x.to_string_user().lower())
async def tracks_in_folder(self):
tracks = []
async for track in self.multiglob(*[f"{ext}" for ext in self._all_music_ext]):
with contextlib.suppress(ValueError):
if track.path.parent != self.localtrack_folder and track.path.relative_to(
self.path
):
tracks.append(Query.process_input(track, self._localtrack_folder))
return sorted(tracks, key=lambda x: x.to_string_user().lower())
async def subfolders(self):
return_folders = []
async for f in self.multiglob("", folder=True):
with contextlib.suppress(ValueError):
if (
f not in return_folders
and f.path != self.localtrack_folder
and f.path.relative_to(self.path)
):
return_folders.append(f)
return sorted(return_folders, key=lambda x: x.to_string_user().lower())
def __eq__(self, other):
if isinstance(other, LocalPath):
return self.path._cparts == other.path._cparts
elif isinstance(other, Path):
return self.path._cparts == other._cpart
return NotImplemented
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(tuple(self.path._cparts))
return self._hash
def __lt__(self, other):
if isinstance(other, LocalPath):
return self.path._cparts < other.path._cparts
elif isinstance(other, Path):
return self.path._cparts < other._cpart
return NotImplemented
def __le__(self, other):
if isinstance(other, LocalPath):
return self.path._cparts <= other.path._cparts
elif isinstance(other, Path):
return self.path._cparts <= other._cpart
return NotImplemented
def __gt__(self, other):
if isinstance(other, LocalPath):
return self.path._cparts > other.path._cparts
elif isinstance(other, Path):
return self.path._cparts > other._cpart
return NotImplemented
def __ge__(self, other):
if isinstance(other, LocalPath):
return self.path._cparts >= other.path._cparts
elif isinstance(other, Path):
return self.path._cparts >= other._cpart
return NotImplemented
class Query:
"""Query data class.
Use: Query.process_input(query, localtrack_folder) to generate the Query object.
"""
def __init__(self, query: Union[LocalPath, str], local_folder_current_path: Path, **kwargs):
query = kwargs.get("queryforced", query)
self._raw: Union[LocalPath, str] = query
self._local_folder_current_path = local_folder_current_path
_localtrack: LocalPath = LocalPath(query, local_folder_current_path)
self.valid: bool = query != "InvalidQueryPlaceHolderName"
self.is_local: bool = kwargs.get("local", False)
self.is_spotify: bool = kwargs.get("spotify", False)
self.is_youtube: bool = kwargs.get("youtube", False)
self.is_soundcloud: bool = kwargs.get("soundcloud", False)
self.is_bandcamp: bool = kwargs.get("bandcamp", False)
self.is_vimeo: bool = kwargs.get("vimeo", False)
self.is_mixer: bool = kwargs.get("mixer", False)
self.is_twitch: bool = kwargs.get("twitch", False)
self.is_other: bool = kwargs.get("other", False)
self.is_pornhub: bool = kwargs.get("pornhub", False)
self.is_playlist: bool = kwargs.get("playlist", False)
self.is_album: bool = kwargs.get("album", False)
self.is_search: bool = kwargs.get("search", False)
self.is_stream: bool = kwargs.get("stream", False)
self.single_track: bool = kwargs.get("single", False)
self.id: Optional[str] = kwargs.get("id", None)
self.invoked_from: Optional[str] = kwargs.get("invoked_from", None)
self.local_name: Optional[str] = kwargs.get("name", None)
self.search_subfolders: bool = kwargs.get("search_subfolders", False)
self.spotify_uri: Optional[str] = kwargs.get("uri", None)
self.uri: Optional[str] = kwargs.get("url", None)
self.is_url: bool = kwargs.get("is_url", False)
self.start_time: int = kwargs.get("start_time", 0)
self.track_index: Optional[int] = kwargs.get("track_index", None)
if self.invoked_from == "sc search":
self.is_youtube = False
self.is_soundcloud = True
if (_localtrack.is_file() or _localtrack.is_dir()) and _localtrack.exists():
self.local_track_path: Optional[LocalPath] = _localtrack
self.track: str = str(_localtrack.absolute())
self.is_local: bool = True
self.uri = self.track
else:
self.local_track_path: Optional[LocalPath] = None
self.track: str = str(query)
self.lavalink_query: str = self._get_query()
if self.is_playlist or self.is_album:
self.single_track = False
self._hash = hash(
(
self.valid,
self.is_local,
self.is_spotify,
self.is_youtube,
self.is_soundcloud,
self.is_bandcamp,
self.is_vimeo,
self.is_mixer,
self.is_twitch,
self.is_other,
self.is_playlist,
self.is_album,
self.is_search,
self.is_stream,
self.single_track,
self.id,
self.spotify_uri,
self.start_time,
self.track_index,
self.uri,
)
)
def __str__(self):
return str(self.lavalink_query)
@classmethod
def process_input(
cls,
query: Union[LocalPath, lavalink.Track, "Query", str],
_local_folder_current_path: Path,
**kwargs,
) -> "Query":
"""Process the input query into its type.
Parameters
----------
query : Union[Query, LocalPath, lavalink.Track, str]
The query string or LocalPath object.
_local_folder_current_path: Path
The Current Local Track folder
Returns
-------
Query
Returns a parsed Query object.
"""
if not query:
query = "InvalidQueryPlaceHolderName"
possible_values = {}
if isinstance(query, str):
query = query.strip("<>")
while "ytsearch:" in query:
query = query.replace("ytsearch:", "")
while "scsearch:" in query:
query = query.replace("scsearch:", "")
elif isinstance(query, Query):
for key, val in kwargs.items():
setattr(query, key, val)
return query
elif isinstance(query, lavalink.Track):
possible_values["stream"] = query.is_stream
query = query.uri
possible_values.update(dict(**kwargs))
possible_values.update(cls._parse(query, _local_folder_current_path, **kwargs))
return cls(query, _local_folder_current_path, **possible_values)
@staticmethod
def _parse(track, _local_folder_current_path: Path, **kwargs) -> MutableMapping:
"""Parse a track into all the relevant metadata."""
returning: MutableMapping = {}
if (
type(track) == type(LocalPath)
and (track.is_file() or track.is_dir())
and track.exists()
):
returning["local"] = True
returning["name"] = track.name
if track.is_file():
returning["single"] = True
elif track.is_dir():
returning["album"] = True
else:
track = str(track)
if track.startswith("spotify:"):
returning["spotify"] = True
if ":playlist:" in track:
returning["playlist"] = True
elif ":album:" in track:
returning["album"] = True
elif ":track:" in track:
returning["single"] = True
_id = track.split(":", 2)[-1]
_id = _id.split("?")[0]
returning["id"] = _id
if "#" in _id:
match = re.search(_RE_SPOTIFY_TIMESTAMP, track)
if match:
returning["start_time"] = (int(match.group(1)) * 60) + int(match.group(2))
returning["uri"] = track
return returning
if track.startswith("sc ") or track.startswith("list "):
if track.startswith("sc "):
returning["invoked_from"] = "sc search"
returning["soundcloud"] = True
elif track.startswith("list "):
returning["invoked_from"] = "search list"
track = _RE_REMOVE_START.sub("", track, 1)
returning["queryforced"] = track
_localtrack = LocalPath(track, _local_folder_current_path)
if _localtrack.exists():
if _localtrack.is_file():
returning["local"] = True
returning["single"] = True
returning["name"] = _localtrack.name
return returning
elif _localtrack.is_dir():
returning["album"] = True
returning["local"] = True
returning["name"] = _localtrack.name
return returning
try:
query_url = urlparse(track)
if all([query_url.scheme, query_url.netloc, query_url.path]):
returning["url"] = track
returning["is_url"] = True
url_domain = ".".join(query_url.netloc.split(".")[-2:])
if not query_url.netloc:
url_domain = ".".join(query_url.path.split("/")[0].split(".")[-2:])
if url_domain in ["youtube.com", "youtu.be"]:
returning["youtube"] = True
_has_index = "&index=" in track
if "&t=" in track or "?t=" in track:
match = re.search(_RE_YOUTUBE_TIMESTAMP, track)
if match:
returning["start_time"] = int(match.group(1))
if _has_index:
match = re.search(_RE_YOUTUBE_INDEX, track)
if match:
returning["track_index"] = int(match.group(1)) - 1
if all(k in track for k in ["&list=", "watch?"]):
returning["track_index"] = 0
returning["playlist"] = True
returning["single"] = False
elif all(x in track for x in ["playlist?"]):
returning["playlist"] = not _has_index
returning["single"] = _has_index
elif any(k in track for k in ["list="]):
returning["track_index"] = 0
returning["playlist"] = True
returning["single"] = False
else:
returning["single"] = True
elif url_domain == "spotify.com":
returning["spotify"] = True
if "/playlist/" in track:
returning["playlist"] = True
elif "/album/" in track:
returning["album"] = True
elif "/track/" in track:
returning["single"] = True
val = re.sub(_RE_SPOTIFY_URL, "", track).replace("/", ":")
if "user:" in val:
val = val.split(":", 2)[-1]
_id = val.split(":", 1)[-1]
_id = _id.split("?")[0]
if "#" in _id:
_id = _id.split("#")[0]
match = re.search(_RE_SPOTIFY_TIMESTAMP, track)
if match:
returning["start_time"] = (int(match.group(1)) * 60) + int(
match.group(2)
)
returning["id"] = _id
returning["uri"] = f"spotify:{val}"
elif url_domain == "soundcloud.com":
returning["soundcloud"] = True
if "#t=" in track:
match = re.search(_RE_SOUNDCLOUD_TIMESTAMP, track)
if match:
returning["start_time"] = (int(match.group(1)) * 60) + int(
match.group(2)
)
if "/sets/" in track:
if "?in=" in track:
returning["single"] = True
else:
returning["playlist"] = True
else:
returning["single"] = True
elif url_domain == "bandcamp.com":
returning["bandcamp"] = True
if "/album/" in track:
returning["album"] = True
else:
returning["single"] = True
elif url_domain == "vimeo.com":
returning["vimeo"] = True
elif url_domain == "twitch.tv":
returning["twitch"] = True
if "?t=" in track:
match = re.search(_RE_TWITCH_TIMESTAMP, track)
if match:
returning["start_time"] = (
(int(match.group(1)) * 60 * 60)
+ (int(match.group(2)) * 60)
+ int(match.group(3))
)
if not any(x in track for x in ["/clip/", "/videos/"]):
returning["stream"] = True
else:
returning["other"] = True
returning["single"] = True
else:
if kwargs.get("soundcloud", False):
returning["soundcloud"] = True
else:
returning["youtube"] = True
returning["search"] = True
returning["single"] = True
except Exception:
returning["search"] = True
returning["youtube"] = True
returning["single"] = True
return returning
def _get_query(self):
if self.is_local:
return self.local_track_path.to_string()
elif self.is_spotify:
return self.spotify_uri
elif self.is_search and self.is_youtube:
return f"ytsearch:{self.track}"
elif self.is_search and self.is_soundcloud:
return f"scsearch:{self.track}"
return self.track
def to_string_user(self):
if self.is_local:
return str(self.local_track_path.to_string_user())
return str(self._raw)
@property
def suffix(self):
if self.is_local:
return self.local_track_path.suffix
return None
def __eq__(self, other):
if not isinstance(other, Query):
return NotImplemented
return self.to_string_user() == other.to_string_user()
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(
(
self.valid,
self.is_local,
self.is_spotify,
self.is_youtube,
self.is_soundcloud,
self.is_bandcamp,
self.is_vimeo,
self.is_mixer,
self.is_twitch,
self.is_other,
self.is_playlist,
self.is_album,
self.is_search,
self.is_stream,
self.single_track,
self.id,
self.spotify_uri,
self.start_time,
self.track_index,
self.uri,
)
)
return self._hash
def __lt__(self, other):
if not isinstance(other, Query):
return NotImplemented
return self.to_string_user() < other.to_string_user()
def __le__(self, other):
if not isinstance(other, Query):
return NotImplemented
return self.to_string_user() <= other.to_string_user()
def __gt__(self, other):
if not isinstance(other, Query):
return NotImplemented
return self.to_string_user() > other.to_string_user()
def __ge__(self, other):
if not isinstance(other, Query):
return NotImplemented
return self.to_string_user() >= other.to_string_user()
| gpl-3.0 |
chuijiaolianying/robotx | robotx/core/fabworker.py | 4 | 1400 | """fabric ops"""
import os
from fabric.api import cd
from fabric.api import env
from fabric.api import get
from fabric.api import put
from fabric.api import run
from fabric.api import shell_env
import robotx
# env.user = 'root'
env.password = os.environ['all_slave_password']
# env.hosts = ['192.168.122.56', '192.168.122.153', '192.168.122.254']
env.skip_bad_hosts = True
#env.timeout=120
env.parallel = True
def copy_files(project_path, worker_root):
"""copy all needed files to workers"""
# send tests file to worker
robotx_path = robotx.__path__[0]
worker_file = os.path.join(robotx_path, 'core', 'workerdaemon.py')
put(project_path, worker_root, use_sudo=True)
put(worker_file, worker_root, use_sudo=True)
def run_workers(worker_root, masterip, planid, project_name, other_variables):
"""run all workers on given hosts"""
worker_file = 'workerdaemon.py'
worker_cmd = 'python %s %s %s %s %s' \
% (worker_file, masterip, planid, project_name, other_variables)
with shell_env(DISPLAY=':0'):
with cd(worker_root):
run(worker_cmd)
def collect_reports(worker_root, project_name):
"""docstring for collect_reports"""
results_path = os.path.join(worker_root, project_name, 'results')
with cd(results_path):
print "\nStart to collect result files"
get('*.xml', './')
run('rm -rf *.xml')
| mit |
qgis/QGIS | tests/src/python/test_qgsrelationeditwidget.py | 17 | 17770 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for edit widgets.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '28/11/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
import qgis # NOQA
import os
from qgis.core import (
QgsFeature,
QgsVectorLayer,
QgsProject,
QgsRelation,
QgsTransaction,
QgsFeatureRequest,
QgsVectorLayerTools,
QgsGeometry
)
from qgis.gui import (
QgsGui,
QgsRelationWidgetWrapper,
QgsAttributeEditorContext,
QgsMapCanvas,
QgsAdvancedDigitizingDockWidget
)
from qgis.PyQt.QtCore import QTimer
from qgis.PyQt.QtWidgets import (
QToolButton,
QMessageBox,
QDialogButtonBox,
QTableView,
QDialog
)
from qgis.testing import start_app, unittest
start_app()
class TestQgsRelationEditWidget(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Setup the involved layers and relations for a n:m relation
:return:
"""
cls.mapCanvas = QgsMapCanvas()
QgsGui.editorWidgetRegistry().initEditors(cls.mapCanvas)
cls.dbconn = 'service=\'qgis_test\''
if 'QGIS_PGTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_PGTEST_DB']
# Create test layer
cls.vl_books = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books" sql=', 'books', 'postgres')
cls.vl_authors = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."authors" sql=', 'authors', 'postgres')
cls.vl_editors = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'fk_book,fk_author\' table="qgis_test"."editors" sql=', 'editors', 'postgres')
cls.vl_link_books_authors = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books_authors" sql=', 'books_authors', 'postgres')
QgsProject.instance().addMapLayer(cls.vl_books)
QgsProject.instance().addMapLayer(cls.vl_authors)
QgsProject.instance().addMapLayer(cls.vl_editors)
QgsProject.instance().addMapLayer(cls.vl_link_books_authors)
cls.relMgr = QgsProject.instance().relationManager()
# Our mock QgsVectorLayerTools, that allow injecting data where user input is expected
cls.vltools = VlTools()
cls.layers = {cls.vl_authors, cls.vl_books, cls.vl_link_books_authors}
assert(cls.vl_authors.isValid())
assert(cls.vl_books.isValid())
assert(cls.vl_editors.isValid())
assert(cls.vl_link_books_authors.isValid())
@classmethod
def tearDownClass(cls):
QgsProject.instance().removeAllMapLayers()
cls.vl_books = None
cls.vl_authors = None
cls.vl_editors = None
cls.vl_link_books_authors = None
cls.layers = None
cls.mapCanvas = None
cls.vltools = None
cls.relMgr = None
def setUp(self):
self.rel_a = QgsRelation()
self.rel_a.setReferencingLayer(self.vl_link_books_authors.id())
self.rel_a.setReferencedLayer(self.vl_authors.id())
self.rel_a.addFieldPair('fk_author', 'pk')
self.rel_a.setId('rel_a')
assert(self.rel_a.isValid())
self.relMgr.addRelation(self.rel_a)
self.rel_b = QgsRelation()
self.rel_b.setReferencingLayer(self.vl_link_books_authors.id())
self.rel_b.setReferencedLayer(self.vl_books.id())
self.rel_b.addFieldPair('fk_book', 'pk')
self.rel_b.setId('rel_b')
assert(self.rel_b.isValid())
self.relMgr.addRelation(self.rel_b)
self.startTransaction()
def tearDown(self):
self.rollbackTransaction()
del self.transaction
self.relMgr.clear()
def startTransaction(self):
"""
Start a new transaction and set all layers into transaction mode.
:return: None
"""
self.transaction = QgsTransaction.create(self.layers)
self.transaction.begin()
for layer in self.layers:
layer.startEditing()
def rollbackTransaction(self):
"""
Rollback all changes done in this transaction.
We always rollback and never commit to have the database in a pristine
state at the end of each test.
:return: None
"""
for layer in self.layers:
layer.commitChanges()
self.transaction.rollback()
def test_delete_feature(self):
"""
Check if a feature can be deleted properly
"""
self.createWrapper(self.vl_authors, '"name"=\'Erich Gamma\'')
self.assertEqual(self.table_view.model().rowCount(), 1)
self.assertEqual(1, len([f for f in self.vl_books.getFeatures()]))
fid = next(self.vl_books.getFeatures(QgsFeatureRequest().setFilterExpression('"name"=\'Design Patterns. Elements of Reusable Object-Oriented Software\''))).id()
self.widget.featureSelectionManager().select([fid])
btn = self.widget.findChild(QToolButton, 'mDeleteFeatureButton')
def clickOk():
# Click the "Delete features" button on the confirmation message
# box
widget = self.widget.findChild(QMessageBox)
buttonBox = widget.findChild(QDialogButtonBox)
deleteButton = next((b for b in buttonBox.buttons() if buttonBox.buttonRole(b) == QDialogButtonBox.AcceptRole))
deleteButton.click()
QTimer.singleShot(1, clickOk)
btn.click()
# This is the important check that the feature is deleted
self.assertEqual(0, len([f for f in self.vl_books.getFeatures()]))
# This is actually more checking that the database on delete action is properly set on the relation
self.assertEqual(0, len([f for f in self.vl_link_books_authors.getFeatures()]))
self.assertEqual(self.table_view.model().rowCount(), 0)
def test_list(self):
"""
Simple check if several related items are shown
"""
wrapper = self.createWrapper(self.vl_books) # NOQA
self.assertEqual(self.table_view.model().rowCount(), 4)
def test_add_feature(self):
"""
Check if a new related feature is added
"""
self.createWrapper(self.vl_authors, '"name"=\'Douglas Adams\'')
self.assertEqual(self.table_view.model().rowCount(), 0)
self.vltools.setValues([None, 'The Hitchhiker\'s Guide to the Galaxy', 'Sputnik Editions', 1961])
btn = self.widget.findChild(QToolButton, 'mAddFeatureButton')
btn.click()
# Book entry has been created
self.assertEqual(2, len([f for f in self.vl_books.getFeatures()]))
# Link entry has been created
self.assertEqual(5, len([f for f in self.vl_link_books_authors.getFeatures()]))
self.assertEqual(self.table_view.model().rowCount(), 1)
def test_link_feature(self):
"""
Check if an existing feature can be linked
"""
wrapper = self.createWrapper(self.vl_authors, '"name"=\'Douglas Adams\'') # NOQA
f = QgsFeature(self.vl_books.fields())
f.setAttributes([self.vl_books.dataProvider().defaultValueClause(0), 'The Hitchhiker\'s Guide to the Galaxy', 'Sputnik Editions', 1961])
self.vl_books.addFeature(f)
btn = self.widget.findChild(QToolButton, 'mLinkFeatureButton')
btn.click()
dlg = self.widget.findChild(QDialog)
dlg.setSelectedFeatures([f.id()])
dlg.accept()
# magically the above code selects the feature here...
link_feature = next(self.vl_link_books_authors.getFeatures(QgsFeatureRequest().setFilterExpression('"fk_book"={}'.format(f[0]))))
self.assertIsNotNone(link_feature[0])
self.assertEqual(self.table_view.model().rowCount(), 1)
def test_unlink_feature(self):
"""
Check if a linked feature can be unlinked
"""
wrapper = self.createWrapper(self.vl_books) # NOQA
# All authors are listed
self.assertEqual(self.table_view.model().rowCount(), 4)
it = self.vl_authors.getFeatures(
QgsFeatureRequest().setFilterExpression('"name" IN (\'Richard Helm\', \'Ralph Johnson\')'))
self.widget.featureSelectionManager().select([f.id() for f in it])
self.assertEqual(2, self.widget.featureSelectionManager().selectedFeatureCount())
btn = self.widget.findChild(QToolButton, 'mUnlinkFeatureButton')
btn.click()
# This is actually more checking that the database on delete action is properly set on the relation
self.assertEqual(2, len([f for f in self.vl_link_books_authors.getFeatures()]))
self.assertEqual(2, self.table_view.model().rowCount())
def test_discover_relations(self):
"""
Test the automatic discovery of relations
"""
relations = self.relMgr.discoverRelations([], [self.vl_authors, self.vl_books, self.vl_link_books_authors])
relations = {r.name(): r for r in relations}
self.assertEqual({'books_authors_fk_book_fkey', 'books_authors_fk_author_fkey'}, set(relations.keys()))
ba2b = relations['books_authors_fk_book_fkey']
self.assertTrue(ba2b.isValid())
self.assertEqual('books_authors', ba2b.referencingLayer().name())
self.assertEqual('books', ba2b.referencedLayer().name())
self.assertEqual([0], ba2b.referencingFields())
self.assertEqual([0], ba2b.referencedFields())
ba2a = relations['books_authors_fk_author_fkey']
self.assertTrue(ba2a.isValid())
self.assertEqual('books_authors', ba2a.referencingLayer().name())
self.assertEqual('authors', ba2a.referencedLayer().name())
self.assertEqual([1], ba2a.referencingFields())
self.assertEqual([0], ba2a.referencedFields())
self.assertEqual([], self.relMgr.discoverRelations([self.rel_a, self.rel_b], [self.vl_authors, self.vl_books, self.vl_link_books_authors]))
self.assertEqual(1, len(self.relMgr.discoverRelations([], [self.vl_authors, self.vl_link_books_authors])))
# composite keys relation
relations = self.relMgr.discoverRelations([], [self.vl_books, self.vl_editors])
self.assertEqual(len(relations), 1)
relation = relations[0]
self.assertEqual('books_fk_editor_fkey', relation.name())
self.assertTrue(relation.isValid())
self.assertEqual('books', relation.referencingLayer().name())
self.assertEqual('editors', relation.referencedLayer().name())
self.assertEqual([2, 3], relation.referencingFields())
self.assertEqual([0, 1], relation.referencedFields())
def test_selection(self):
fbook = QgsFeature(self.vl_books.fields())
fbook.setAttributes([self.vl_books.dataProvider().defaultValueClause(0), 'The Hitchhiker\'s Guide to the Galaxy', 'Sputnik Editions', 1961])
self.vl_books.addFeature(fbook)
flink = QgsFeature(self.vl_link_books_authors.fields())
flink.setAttributes([fbook.id(), 5])
self.vl_link_books_authors.addFeature(flink)
self.createWrapper(self.vl_authors, '"name"=\'Douglas Adams\'')
self.zoomToButton = self.widget.findChild(QToolButton, "mDeleteFeatureButton")
self.assertTrue(self.zoomToButton)
self.assertTrue(not self.zoomToButton.isEnabled())
selectionMgr = self.widget.featureSelectionManager()
self.assertTrue(selectionMgr)
self.vl_books.select(fbook.id())
self.assertEqual([fbook.id()], selectionMgr.selectedFeatureIds())
self.assertTrue(self.zoomToButton.isEnabled())
selectionMgr.deselect([fbook.id()])
self.assertEqual([], selectionMgr.selectedFeatureIds())
self.assertTrue(not self.zoomToButton.isEnabled())
self.vl_books.select([1, fbook.id()])
self.assertEqual([fbook.id()], selectionMgr.selectedFeatureIds())
self.assertTrue(self.zoomToButton.isEnabled())
def test_add_feature_geometry(self):
"""
Test to add a feature with a geometry
"""
vl_pipes = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."pipes" (geom) sql=', 'pipes', 'postgres')
vl_leaks = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."leaks" (geom) sql=', 'leaks', 'postgres')
vl_leaks.startEditing()
QgsProject.instance().addMapLayer(vl_pipes)
QgsProject.instance().addMapLayer(vl_leaks)
self.assertEqual(vl_pipes.featureCount(), 2)
self.assertEqual(vl_leaks.featureCount(), 3)
rel = QgsRelation()
rel.setReferencingLayer(vl_leaks.id())
rel.setReferencedLayer(vl_pipes.id())
rel.addFieldPair('pipe', 'id')
rel.setId('rel_pipe_leak')
self.assertTrue(rel.isValid())
self.relMgr.addRelation(rel)
# Mock vector layer tool to just set default value on created feature
class DummyVlTools(QgsVectorLayerTools):
def addFeature(self, layer, defaultValues, defaultGeometry):
f = QgsFeature(layer.fields())
for idx, value in defaultValues.items():
f.setAttribute(idx, value)
f.setGeometry(defaultGeometry)
ok = layer.addFeature(f)
return ok, f
wrapper = QgsRelationWidgetWrapper(vl_leaks, rel)
context = QgsAttributeEditorContext()
vltool = DummyVlTools()
context.setVectorLayerTools(vltool)
context.setMapCanvas(self.mapCanvas)
cadDockWidget = QgsAdvancedDigitizingDockWidget(self.mapCanvas)
context.setCadDockWidget(cadDockWidget)
wrapper.setContext(context)
widget = wrapper.widget()
widget.show()
pipe = next(vl_pipes.getFeatures())
self.assertEqual(pipe.id(), 1)
wrapper.setFeature(pipe)
table_view = widget.findChild(QTableView)
self.assertEqual(table_view.model().rowCount(), 1)
btn = widget.findChild(QToolButton, 'mAddFeatureGeometryButton')
self.assertTrue(btn.isVisible())
self.assertTrue(btn.isEnabled())
btn.click()
self.assertTrue(self.mapCanvas.mapTool())
feature = QgsFeature(vl_leaks.fields())
feature.setGeometry(QgsGeometry.fromWkt('POINT(0 0.8)'))
self.mapCanvas.mapTool().digitizingCompleted.emit(feature)
self.assertEqual(table_view.model().rowCount(), 2)
self.assertEqual(vl_leaks.featureCount(), 4)
request = QgsFeatureRequest()
request.addOrderBy("id", False)
# get new created feature
feat = next(vl_leaks.getFeatures('"id" is NULL'))
self.assertTrue(feat.isValid())
self.assertTrue(feat.geometry().equals(QgsGeometry.fromWkt('POINT(0 0.8)')))
vl_leaks.rollBack()
def createWrapper(self, layer, filter=None):
"""
Basic setup of a relation widget wrapper.
Will create a new wrapper and set its feature to the one and only book
in the table.
It will also assign some instance variables to help
* self.widget The created widget
* self.table_view The table view of the widget
:return: The created wrapper
"""
if layer == self.vl_books:
relation = self.rel_b
nmrel = self.rel_a
else:
relation = self.rel_a
nmrel = self.rel_b
self.wrapper = QgsRelationWidgetWrapper(layer, relation)
self.wrapper.setConfig({'nm-rel': nmrel.id()})
context = QgsAttributeEditorContext()
context.setMapCanvas(self.mapCanvas)
context.setVectorLayerTools(self.vltools)
self.wrapper.setContext(context)
self.widget = self.wrapper.widget()
self.widget.show()
request = QgsFeatureRequest()
if filter:
request.setFilterExpression(filter)
book = next(layer.getFeatures(request))
self.wrapper.setFeature(book)
self.table_view = self.widget.findChild(QTableView)
return self.wrapper
class VlTools(QgsVectorLayerTools):
"""
Mock the QgsVectorLayerTools
Since we don't have a user on the test server to input this data for us, we can just use this.
"""
def setValues(self, values):
"""
Set the values for the next feature to insert
:param values: An array of values that shall be used for the next inserted record
:return: None
"""
self.values = values
def addFeature(self, layer, defaultValues, defaultGeometry):
"""
Overrides the addFeature method
:param layer: vector layer
:param defaultValues: some default values that may be provided by QGIS
:param defaultGeometry: a default geometry that may be provided by QGIS
:return: tuple(ok, f) where ok is if the layer added the feature and f is the added feature
"""
values = list()
for i, v in enumerate(self.values):
if v:
values.append(v)
else:
values.append(layer.dataProvider().defaultValueClause(i))
f = QgsFeature(layer.fields())
f.setAttributes(self.values)
f.setGeometry(defaultGeometry)
ok = layer.addFeature(f)
return ok, f
def startEditing(self, layer):
pass
def stopEditing(self, layer, allowCancel):
pass
def saveEdits(self, layer):
pass
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
stewartsmith/bzr | bzrlib/tests/test_symbol_versioning.py | 2 | 12854 | # Copyright (C) 2006-2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Symbol versioning tests."""
import warnings
from bzrlib import symbol_versioning
from bzrlib.symbol_versioning import (
deprecated_function,
deprecated_in,
deprecated_method,
)
from bzrlib.tests import TestCase
@deprecated_function(deprecated_in((0, 7, 0)))
def sample_deprecated_function():
"""Deprecated function docstring."""
return 1
a_deprecated_list = symbol_versioning.deprecated_list(deprecated_in((0, 9, 0)),
'a_deprecated_list', ['one'], extra="Don't use me")
a_deprecated_dict = symbol_versioning.DeprecatedDict(
deprecated_in((0, 14, 0)),
'a_deprecated_dict',
dict(a=42),
advice='Pull the other one!',
)
class TestDeprecationWarnings(TestCase):
def capture_warning(self, message, category, stacklevel=None):
self._warnings.append((message, category, stacklevel))
def setUp(self):
super(TestDeprecationWarnings, self).setUp()
self._warnings = []
@deprecated_method(deprecated_in((0, 7, 0)))
def deprecated_method(self):
"""Deprecated method docstring.
This might explain stuff.
"""
return 1
@staticmethod
@deprecated_function(deprecated_in((0, 7, 0)))
def deprecated_static():
"""Deprecated static."""
return 1
def test_deprecated_static(self):
# XXX: The results are not quite right because the class name is not
# shown - however it is enough to give people a good indication of
# where the problem is.
expected_warning = (
"bzrlib.tests.test_symbol_versioning."
"deprecated_static "
"was deprecated in version 0.7.0.", DeprecationWarning, 2)
expected_docstring = (
'Deprecated static.\n'
'\n'
'This function was deprecated in version 0.7.0.\n'
)
self.check_deprecated_callable(
expected_warning, expected_docstring,
"deprecated_static",
"bzrlib.tests.test_symbol_versioning",
self.deprecated_static)
def test_deprecated_method(self):
expected_warning = (
"bzrlib.tests.test_symbol_versioning."
"TestDeprecationWarnings.deprecated_method "
"was deprecated in version 0.7.0.", DeprecationWarning, 2)
expected_docstring = (
'Deprecated method docstring.\n'
'\n'
' This might explain stuff.\n'
' \n'
' This method was deprecated in version 0.7.0.\n'
' ')
self.check_deprecated_callable(expected_warning, expected_docstring,
"deprecated_method",
"bzrlib.tests.test_symbol_versioning",
self.deprecated_method)
def test_deprecated_function(self):
expected_warning = (
"bzrlib.tests.test_symbol_versioning.sample_deprecated_function "
"was deprecated in version 0.7.0.", DeprecationWarning, 2)
expected_docstring = ('Deprecated function docstring.\n'
'\n'
'This function was deprecated in version 0.7.0.\n'
)
self.check_deprecated_callable(expected_warning, expected_docstring,
"sample_deprecated_function",
"bzrlib.tests.test_symbol_versioning",
sample_deprecated_function)
def test_deprecated_list(self):
expected_warning = (
"Modifying a_deprecated_list was deprecated in version 0.9.0."
" Don't use me", DeprecationWarning, 3)
old_warning_method = symbol_versioning.warn
try:
symbol_versioning.set_warning_method(self.capture_warning)
self.assertEqual(['one'], a_deprecated_list)
self.assertEqual([], self._warnings)
a_deprecated_list.append('foo')
self.assertEqual([expected_warning], self._warnings)
self.assertEqual(['one', 'foo'], a_deprecated_list)
a_deprecated_list.extend(['bar', 'baz'])
self.assertEqual([expected_warning]*2, self._warnings)
self.assertEqual(['one', 'foo', 'bar', 'baz'], a_deprecated_list)
a_deprecated_list.insert(1, 'xxx')
self.assertEqual([expected_warning]*3, self._warnings)
self.assertEqual(['one', 'xxx', 'foo', 'bar', 'baz'], a_deprecated_list)
a_deprecated_list.remove('foo')
self.assertEqual([expected_warning]*4, self._warnings)
self.assertEqual(['one', 'xxx', 'bar', 'baz'], a_deprecated_list)
val = a_deprecated_list.pop()
self.assertEqual([expected_warning]*5, self._warnings)
self.assertEqual('baz', val)
self.assertEqual(['one', 'xxx', 'bar'], a_deprecated_list)
val = a_deprecated_list.pop(1)
self.assertEqual([expected_warning]*6, self._warnings)
self.assertEqual('xxx', val)
self.assertEqual(['one', 'bar'], a_deprecated_list)
finally:
symbol_versioning.set_warning_method(old_warning_method)
def test_deprecated_dict(self):
expected_warning = (
"access to a_deprecated_dict was deprecated in version 0.14.0."
" Pull the other one!", DeprecationWarning, 2)
old_warning_method = symbol_versioning.warn
try:
symbol_versioning.set_warning_method(self.capture_warning)
self.assertEqual(len(a_deprecated_dict), 1)
self.assertEqual([expected_warning], self._warnings)
a_deprecated_dict['b'] = 42
self.assertEqual(a_deprecated_dict['b'], 42)
self.assertTrue('b' in a_deprecated_dict)
del a_deprecated_dict['b']
self.assertFalse('b' in a_deprecated_dict)
self.assertEqual([expected_warning] * 6, self._warnings)
finally:
symbol_versioning.set_warning_method(old_warning_method)
def check_deprecated_callable(self, expected_warning, expected_docstring,
expected_name, expected_module,
deprecated_callable):
if __doc__ is None:
# With -OO the docstring should just be the deprecated version
expected_docstring = expected_docstring.split('\n')[-2].lstrip()
old_warning_method = symbol_versioning.warn
try:
symbol_versioning.set_warning_method(self.capture_warning)
self.assertEqual(1, deprecated_callable())
self.assertEqual([expected_warning], self._warnings)
deprecated_callable()
self.assertEqual([expected_warning, expected_warning],
self._warnings)
self.assertEqualDiff(expected_docstring, deprecated_callable.__doc__)
self.assertEqualDiff(expected_name, deprecated_callable.__name__)
self.assertEqualDiff(expected_module, deprecated_callable.__module__)
self.assertTrue(deprecated_callable.is_deprecated)
finally:
symbol_versioning.set_warning_method(old_warning_method)
def test_deprecated_passed(self):
self.assertEqual(True, symbol_versioning.deprecated_passed(None))
self.assertEqual(True, symbol_versioning.deprecated_passed(True))
self.assertEqual(True, symbol_versioning.deprecated_passed(False))
self.assertEqual(False,
symbol_versioning.deprecated_passed(
symbol_versioning.DEPRECATED_PARAMETER))
def test_deprecation_string(self):
"""We can get a deprecation string for a method or function."""
self.assertEqual('bzrlib.tests.test_symbol_versioning.'
'TestDeprecationWarnings.test_deprecation_string was deprecated in '
'version 0.11.0.',
symbol_versioning.deprecation_string(
self.test_deprecation_string,
deprecated_in((0, 11, 0))))
self.assertEqual('bzrlib.symbol_versioning.deprecated_function was '
'deprecated in version 0.11.0.',
symbol_versioning.deprecation_string(
symbol_versioning.deprecated_function,
deprecated_in((0, 11, 0))))
class TestSuppressAndActivate(TestCase):
def setUp(self):
super(TestSuppressAndActivate, self).setUp()
existing_filters = list(warnings.filters)
def restore():
warnings.filters[:] = existing_filters
self.addCleanup(restore)
# Clean out the filters so we have a clean slate.
warnings.resetwarnings()
def assertFirstWarning(self, action, category):
"""Test the first warning in the filters is correct"""
first = warnings.filters[0]
self.assertEqual((action, category), (first[0], first[2]))
def test_suppress_deprecation_warnings(self):
"""suppress_deprecation_warnings sets DeprecationWarning to ignored."""
symbol_versioning.suppress_deprecation_warnings()
self.assertFirstWarning('ignore', DeprecationWarning)
def test_set_restore_filters(self):
original_filters = warnings.filters[:]
symbol_versioning.suppress_deprecation_warnings()()
self.assertEqual(original_filters, warnings.filters)
def test_suppress_deprecation_with_warning_filter(self):
"""don't suppress if we already have a filter"""
warnings.filterwarnings('error', category=Warning)
self.assertFirstWarning('error', Warning)
self.assertEqual(1, len(warnings.filters))
symbol_versioning.suppress_deprecation_warnings(override=False)
self.assertFirstWarning('error', Warning)
self.assertEqual(1, len(warnings.filters))
def test_suppress_deprecation_with_filter(self):
"""don't suppress if we already have a filter"""
warnings.filterwarnings('error', category=DeprecationWarning)
self.assertFirstWarning('error', DeprecationWarning)
self.assertEqual(1, len(warnings.filters))
symbol_versioning.suppress_deprecation_warnings(override=False)
self.assertFirstWarning('error', DeprecationWarning)
self.assertEqual(1, len(warnings.filters))
symbol_versioning.suppress_deprecation_warnings(override=True)
self.assertFirstWarning('ignore', DeprecationWarning)
self.assertEqual(2, len(warnings.filters))
def test_activate_deprecation_no_error(self):
# First nuke the filters, so we know it is clean
symbol_versioning.activate_deprecation_warnings()
self.assertFirstWarning('default', DeprecationWarning)
def test_activate_deprecation_with_error(self):
# First nuke the filters, so we know it is clean
# Add a warning == error rule
warnings.filterwarnings('error', category=Warning)
self.assertFirstWarning('error', Warning)
self.assertEqual(1, len(warnings.filters))
symbol_versioning.activate_deprecation_warnings(override=False)
# There should not be a new warning
self.assertFirstWarning('error', Warning)
self.assertEqual(1, len(warnings.filters))
def test_activate_deprecation_with_DW_error(self):
# First nuke the filters, so we know it is clean
# Add a warning == error rule
warnings.filterwarnings('error', category=DeprecationWarning)
self.assertFirstWarning('error', DeprecationWarning)
self.assertEqual(1, len(warnings.filters))
symbol_versioning.activate_deprecation_warnings(override=False)
# There should not be a new warning
self.assertFirstWarning('error', DeprecationWarning)
self.assertEqual(1, len(warnings.filters))
symbol_versioning.activate_deprecation_warnings(override=True)
self.assertFirstWarning('default', DeprecationWarning)
self.assertEqual(2, len(warnings.filters))
| gpl-2.0 |
Perferom/android_external_chromium_org | tools/symsrc/pdb_fingerprint_from_img.py | 179 | 2017 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This will retrieve a PDBs "fingerprint" from it's corresponding executable
image (.dll or .exe). This is used when retrieving the PDB from the symbol
server. The .pdb (or cab compressed .pd_) is expected at a path like:
foo.pdb/FINGERPRINT/foo.pdb
We can retrieve the same information from the .PDB file itself, but this file
format is much more difficult and undocumented. Instead, we can look at the
DLL's reference to the PDB, and use that to retrieve the information."""
import sys
import pefile
__CV_INFO_PDB70_format__ = ('CV_INFO_PDB70',
('4s,CvSignature', '16s,Signature', 'L,Age'))
__GUID_format__ = ('GUID',
('L,Data1', 'H,Data2', 'H,Data3', '8s,Data4'))
def GetPDBInfoFromImg(filename):
"""Returns the PDB fingerprint and the pdb filename given an image file"""
pe = pefile.PE(filename)
for dbg in pe.DIRECTORY_ENTRY_DEBUG:
if dbg.struct.Type == 2: # IMAGE_DEBUG_TYPE_CODEVIEW
off = dbg.struct.AddressOfRawData
size = dbg.struct.SizeOfData
data = pe.get_memory_mapped_image()[off:off+size]
cv = pefile.Structure(__CV_INFO_PDB70_format__)
cv.__unpack__(data)
cv.PdbFileName = data[cv.sizeof():]
guid = pefile.Structure(__GUID_format__)
guid.__unpack__(cv.Signature)
guid.Data4_0 = ''.join("%02X" % ord(x) for x in guid.Data4[0:2])
guid.Data4_1 = ''.join("%02X" % ord(x) for x in guid.Data4[2:])
return ("%08X%04X%04X%s%s%d" % (
guid.Data1, guid.Data2, guid.Data3,
guid.Data4_0, guid.Data4_1, cv.Age),
cv.PdbFileName.split('\x00', 1)[0])
break
def main():
if len(sys.argv) != 2:
print "usage: file.dll"
return 1
(fingerprint, filename) = GetPDBInfoFromImg(sys.argv[1])
print "%s %s" % (fingerprint, filename)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
GitHublong/hue | desktop/core/ext-py/pysaml2-2.4.0/example/idp2_repoze/modules/login.mako.py | 31 | 2690 | # -*- encoding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1367126126.936375
_template_filename='htdocs/login.mako'
_template_uri='login.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='utf-8'
_exports = []
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'root.mako', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
redirect_uri = context.get('redirect_uri', UNDEFINED)
key = context.get('key', UNDEFINED)
action = context.get('action', UNDEFINED)
authn_reference = context.get('authn_reference', UNDEFINED)
login = context.get('login', UNDEFINED)
password = context.get('password', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n\n<h1>Please log in</h1>\n<p class="description">\n To register it\'s quite simple: enter a login and a password\n</p>\n\n<form action="')
# SOURCE LINE 8
__M_writer(unicode(action))
__M_writer(u'" method="post">\n <input type="hidden" name="key" value="')
# SOURCE LINE 9
__M_writer(unicode(key))
__M_writer(u'"/>\n <input type="hidden" name="authn_reference" value="')
# SOURCE LINE 10
__M_writer(unicode(authn_reference))
__M_writer(u'"/>\n <input type="hidden" name="redirect_uri" value="')
# SOURCE LINE 11
__M_writer(unicode(redirect_uri))
__M_writer(u'"/>\n\n <div class="label">\n <label for="login">Username</label>\n </div>\n <div>\n <input type="text" name="login" value="')
# SOURCE LINE 17
__M_writer(unicode(login))
__M_writer(u'"/><br/>\n </div>\n\n <div class="label">\n <label for="password">Password</label>\n </div>\n <div>\n <input type="password" name="password"\n value="')
# SOURCE LINE 25
__M_writer(unicode(password))
__M_writer(u'"/>\n </div>\n\n <input class="submit" type="submit" name="form.submitted" value="Log In"/>\n</form>\n')
return ''
finally:
context.caller_stack._pop_frame()
| apache-2.0 |
ryanbauman/bulkioInterfaces | libsrc/testing/components/multiout_attachable/python/multiout_attachable.py | 2 | 9155 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK bulkioInterfaces.
#
# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
#
#
# AUTO-GENERATED
#
# Source: multiout_attachable.spd.xml
from ossie.resource import start_component
import logging
from ossie.utils import uuid
from multiout_attachable_base import *
class multiout_attachable_i(multiout_attachable_base):
class SDDSCallback(object):
def __init__(self, parent):
self.parent = parent
def attach(self, streamDef, user):
self.parent.callback_stats.num_sdds_attaches += 1
aid = str(uuid.uuid4())
newAttachment = self.parent.SddsAttachment(streamDef.id, aid, streamDef.port)
self.parent.received_sdds_attachments.append(newAttachment)
return aid
def detach(self, attachId):
for curr in list(self.parent.received_sdds_attachments):
if curr.attachId == attachId:
self.parent.received_sdds_attachments.remove(curr)
self.parent.callback_stats.num_sdds_detaches += 1
class VITA49Callback(object):
def __init__(self, parent):
self.parent = parent
def attach(self, streamDef, user):
self.parent.callback_stats.num_vita49_attaches += 1
aid = str(uuid.uuid4())
newAttachment = self.parent.Vita49Attachment(streamDef.id, aid, streamDef.port)
self.parent.received_vita49_attachments.append(newAttachment)
return aid
def detach(self, attachId):
for curr in list(self.parent.received_vita49_attachments):
if curr.attachId == attachId:
self.parent.received_vita49_attachments.remove(curr)
self.parent.callback_stats.num_vita49_detaches += 1
def initialize(self):
multiout_attachable_base.initialize(self)
self.reattaches = 0
self.port_dataSDDS_in.logger = self._log
self.port_dataVITA49_in.logger = self._log
self.port_dataSDDS_out.setLogger(self._log)
self.port_dataVITA49_out.setLogger(self._log)
self.sddsCallback = self.SDDSCallback(self)
self.vitaCallback = self.VITA49Callback(self)
self.port_dataSDDS_in.setNewAttachDetachListener(self.sddsCallback)
self.port_dataVITA49_in.setNewAttachDetachListener(self.vitaCallback)
self.port_dataSDDS_in.setNewSriListener(self.newSriCallback)
self.port_dataVITA49_in.setNewSriListener(self.newSriCallback)
self.port_dataSDDS_in.setSriChangeListener(self.sriChangeCallback)
self.port_dataVITA49_in.setSriChangeListener(self.sriChangeCallback)
def onconfigure_prop_SDDSStreamDefinitions(self, oldVal, newVal):
oldAttachIds = []
newAttachIds = []
for oldDef in oldVal: oldAttachIds.append(oldDef.id)
for newDef in newVal: newAttachIds.append(newDef.id)
# Find which streams need to be detaches
for oldDef in oldVal:
if newAttachIds.count(oldDef.id) == 0:
self.port_dataSDDS_out.removeStream(oldDef.id)
# Find which new streams need to be attached
for newDef in newVal:
isNew = (oldAttachIds.count(newDef.id) == 0)
isUpdated = False
for oldDef in oldVal:
if oldDef.id == newDef.id:
# Test only allows for updated port value
isUpdated = (oldDef.port != newDef.port)
if isUpdated: break
newAttachment = bulkio.BULKIO.SDDSStreamDefinition(newDef.id,
bulkio.BULKIO.SDDS_CF,
newDef.multicastAddress,
newDef.vlan,
newDef.port,
newDef.sampleRate,
newDef.timeTagValid,
newDef.privateInfo)
if isNew: self.port_dataSDDS_out.addStream(newAttachment)
if isUpdated: self.port_dataSDDS_out.updateStream(newAttachment)
self.SDDSStreamDefinitions = newVal;
def onconfigure_prop_VITA49StreamDefinitions(self, oldVal, newVal):
oldAttachIds = []
newAttachIds = []
for oldDef in oldVal: oldAttachIds.append(oldDef.id)
for newDef in newVal: newAttachIds.append(newDef.id)
# Find which streams need to be detaches
for oldDef in oldVal:
if newAttachIds.count(oldDef.id) == 0:
self.port_dataVITA49_out.removeStream(oldDef.id)
# Find which new streams need to be attached
for newDef in newVal:
isNew = (oldAttachIds.count(newDef.id) == 0)
isUpdated = False
for oldDef in oldVal:
if oldDef.id == newDef.id:
# Test only allows for updated port value
isUpdated = (oldDef.port != newDef.port)
if isUpdated: break
dataFormat = bulkio.BULKIO.VITA49DataPacketPayloadFormat(newDef.packing_method_processing_efficient,
bulkio.BULKIO.VITA49_REAL,
bulkio.BULKIO.VITA49_32F,
newDef.repeating,
newDef.event_tag_size,
newDef.channel_tag_size,
newDef.item_packing_field_size,
newDef.data_item_size,
newDef.repeat_count,
newDef.vector_size)
newAttachment = bulkio.BULKIO.VITA49StreamDefinition(newDef.ip_address,
newDef.vlan,
newDef.port,
bulkio.BULKIO.VITA49_UDP_TRANSPORT,
newDef.id,
newDef.valid_data_format,
dataFormat)
if isNew: self.port_dataVITA49_out.addStream(newAttachment)
if isUpdated: self.port_dataVITA49_out.updateStream(newAttachment)
self.VITA49StreamDefinitions = newVal;
def process(self):
data, T, EOS, streamID, sri, sriChanged, inputQueueFlushed = self.port_dataFloat_in.getPacket()
if not data:
return NOOP
if sriChanged:
logging.debug("process() sri changed : " + str(sri) + " T: " + str(T))
self.port_dataSDDS_out.pushSRI(sri,T)
self.port_dataVITA49_out.pushSRI(sri,T)
self.packets_ingested += 1
return NORMAL
def newSriCallback(self,sri):
# Query SRIs to ensure deadlock doesn't occur
sddsSriList = self.port_dataSDDS_in._get_activeSRIs()
vita49SriList = self.port_dataVITA49_in._get_activeSRIs()
self.callback_stats.num_new_sri_callbacks += 1
def sriChangeCallback(self,sri):
# Query SRIs to ensure deadlock doesn't occur
sddsSriList = self.port_dataSDDS_in._get_activeSRIs()
vita49SriList = self.port_dataVITA49_in._get_activeSRIs()
self.callback_stats.num_sri_change_callbacks += 1
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
logging.debug("Starting Component")
start_component(multiout_attachable_i)
| lgpl-3.0 |
hiyoku/ccd10 | src/ui/projectSettingsWindow/settingsWindow.py | 2 | 3319 | from PyQt5 import QtWidgets
from src.business.configuration.configProject import ConfigProject
from src.business.consoleThreadOutput import ConsoleThreadOutput
from src.ui.commons.layout import set_hbox, set_lvbox
from src.ui.projectSettingsWindow.widgetsGeography import WidgetsGeography
from src.ui.projectSettingsWindow.widgetsSite import WidgetsSite
from src.ui.projectSettingsWindow.widgetsSun import WidgetsSun
class SettingsWindow(QtWidgets.QWidget):
def __init__(self, parent=None):
super(SettingsWindow, self).__init__(parent)
self.p = parent
self.console = ConsoleThreadOutput()
self.site = WidgetsSite(self)
self.geo = WidgetsGeography(self)
self.sun = WidgetsSun(self)
self.button_clear = QtWidgets.QPushButton('Clear', self)
self.button_ok = QtWidgets.QPushButton('Save', self)
self.button_cancel = QtWidgets.QPushButton('Cancel', self)
self.button_settings()
# Init Interface
self.setting_up()
self.refresh_all_fields()
def button_settings(self):
self.button_clear.clicked.connect(self.clear_all)
self.button_cancel.clicked.connect(self.func_cancel)
self.button_ok.clicked.connect(self.func_ok)
def func_cancel(self):
self.p.close()
#self.clear_all()
def func_ok(self):
try:
self.save_settings()
self.console.raise_text("Project settings successfully saved!", 1)
except:
self.console.raise_text("Unable to save the project settings.", 3)
finally:
self.p.close()
self.clear_all()
self.refresh_all_fields()
def clear_all(self):
self.site.clear_site()
self.geo.clear_geography()
self.sun.clear_sun()
def refresh_all_fields(self):
try:
st = ConfigProject()
infoSite = st.get_site_settings()
self.site.set_site_info(infoSite[0], infoSite[1], infoSite[2])
infoGeo = st.get_geographic_settings()
self.geo.set_geography(infoGeo[0], infoGeo[1], infoGeo[2], infoGeo[3], infoGeo[4])
infoSun = st.get_moonsun_settings()
self.sun.set_sun(str(infoSun[0]), infoSun[1], str(infoSun[2]), str(infoSun[3]))
except Exception as e:
print(e)
def save_settings(self):
try:
st = ConfigProject()
self.save_site(st)
self.save_geo(st)
self.save_sun(st)
st.save_settings()
except Exception as e:
print(e)
def save_site(self, set):
info1 = self.site.get_site_info()
set.set_site_settings(info1[0], info1[1], info1[2])
def save_geo(self, set):
info2 = self.geo.get_geography()
set.set_geographic_settings(info2[0], info2[1], info2[2], info2[3], info2[4])
def save_sun(self, set):
info3 = self.sun.get_sun()
set.set_moonsun_settings(info3[0], info3[1], info3[2], info3[3])
def setting_up(self):
self.setLayout(set_lvbox(set_hbox(self.site),
set_hbox(self.geo),
set_hbox(self.sun),
set_hbox(self.button_ok, self.button_clear, self.button_cancel, stretch2=1))) | gpl-3.0 |
kenwmitchell/ansible-modules-core | utilities/logic/wait_for.py | 9 | 20324 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import binascii
import datetime
import math
import re
import select
import socket
import sys
import time
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
# just because we can import it on Linux doesn't mean we will use it
except ImportError:
pass
DOCUMENTATION = '''
---
module: wait_for
short_description: Waits for a condition before continuing.
description:
- You can wait for a set amount of time C(timeout), this is the default if nothing is specified.
- Waiting for a port to become available is useful for when services
are not immediately available after their init scripts return
which is true of certain Java application servers. It is also
useful when starting guests with the M(virt) module and
needing to pause until they are ready.
- This module can also be used to wait for a regex match a string to be present in a file.
- In 1.6 and later, this module can also be used to wait for a file to be available or
absent on the filesystem.
- In 1.8 and later, this module can also be used to wait for active
connections to be closed before continuing, useful if a node
is being rotated out of a load balancer pool.
version_added: "0.7"
options:
host:
description:
- A resolvable hostname or IP address to wait for
required: false
default: "127.0.0.1"
timeout:
description:
- maximum number of seconds to wait for
required: false
default: 300
connect_timeout:
description:
- maximum number of seconds to wait for a connection to happen before closing and retrying
required: false
default: 5
delay:
description:
- number of seconds to wait before starting to poll
required: false
default: 0
port:
description:
- port number to poll
required: false
state:
description:
- either C(present), C(started), or C(stopped), C(absent), or C(drained)
- When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections
- When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed
choices: [ "present", "started", "stopped", "absent", "drained" ]
default: "started"
path:
version_added: "1.4"
required: false
description:
- path to a file on the filesytem that must exist before continuing
search_regex:
version_added: "1.4"
required: false
description:
- Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex.
exclude_hosts:
version_added: "1.8"
required: false
description:
- list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state
notes:
- The ability to use search_regex with a port connection was added in 1.7.
requirements: []
author:
- "Jeroen Hoekx (@jhoekx)"
- "John Jarvis (@jarv)"
- "Andrii Radyk (@AnderEnder)"
'''
EXAMPLES = '''
# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds
- wait_for: port=8000 delay=10
# wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds
- wait_for: host=0.0.0.0 port=8000 delay=10 state=drained
# wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts
- wait_for: host=0.0.0.0 port=8000 state=drained exclude_hosts=10.2.1.2,10.2.1.3
# wait until the file /tmp/foo is present before continuing
- wait_for: path=/tmp/foo
# wait until the string "completed" is in the file /tmp/foo before continuing
- wait_for: path=/tmp/foo search_regex=completed
# wait until the lock file is removed
- wait_for: path=/var/lock/file.lock state=absent
# wait until the process is finished and pid was destroyed
- wait_for: path=/proc/3466/status state=absent
# wait 300 seconds for port 22 to become open and contain "OpenSSH", don't assume the inventory_hostname is resolvable
# and don't start checking for 10 seconds
- local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10
'''
class TCPConnectionInfo(object):
"""
This is a generic TCP Connection Info strategy class that relies
on the psutil module, which is not ideal for targets, but necessary
for cross platform support.
A subclass may wish to override some or all of these methods.
- _get_exclude_ips()
- get_active_connections()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
match_all_ips = {
socket.AF_INET: '0.0.0.0',
socket.AF_INET6: '::',
}
ipv4_mapped_ipv6_address = {
'prefix': '::ffff',
'match_all': '::ffff:0.0.0.0'
}
connection_states = {
'01': 'ESTABLISHED',
'02': 'SYN_SENT',
'03': 'SYN_RECV',
'04': 'FIN_WAIT1',
'05': 'FIN_WAIT2',
'06': 'TIME_WAIT',
}
def __new__(cls, *args, **kwargs):
return load_platform_subclass(TCPConnectionInfo, args, kwargs)
def __init__(self, module):
self.module = module
self.ips = _convert_host_to_ip(module.params['host'])
self.port = int(self.module.params['port'])
self.exclude_ips = self._get_exclude_ips()
if not HAS_PSUTIL:
module.fail_json(msg="psutil module required for wait_for")
def _get_exclude_ips(self):
exclude_hosts = self.module.params['exclude_hosts']
exclude_ips = []
if exclude_hosts is not None:
for host in exclude_hosts:
exclude_ips.extend(_convert_host_to_ip(host))
return exclude_ips
def get_active_connections_count(self):
active_connections = 0
for p in psutil.process_iter():
connections = p.get_connections(kind='inet')
for conn in connections:
if conn.status not in self.connection_states.values():
continue
(local_ip, local_port) = conn.local_address
if self.port != local_port:
continue
(remote_ip, remote_port) = conn.remote_address
if (conn.family, remote_ip) in self.exclude_ips:
continue
if any((
(conn.family, local_ip) in self.ips,
(conn.family, self.match_all_ips[conn.family]) in self.ips,
local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
(conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
)):
active_connections += 1
return active_connections
# ===========================================
# Subclass: Linux
class LinuxTCPConnectionInfo(TCPConnectionInfo):
"""
This is a TCP Connection Info evaluation strategy class
that utilizes information from Linux's procfs. While less universal,
does allow Linux targets to not require an additional library.
"""
platform = 'Linux'
distribution = None
source_file = {
socket.AF_INET: '/proc/net/tcp',
socket.AF_INET6: '/proc/net/tcp6'
}
match_all_ips = {
socket.AF_INET: '00000000',
socket.AF_INET6: '00000000000000000000000000000000',
}
ipv4_mapped_ipv6_address = {
'prefix': '0000000000000000FFFF0000',
'match_all': '0000000000000000FFFF000000000000'
}
local_address_field = 1
remote_address_field = 2
connection_state_field = 3
def __init__(self, module):
self.module = module
self.ips = _convert_host_to_hex(module.params['host'])
self.port = "%0.4X" % int(module.params['port'])
self.exclude_ips = self._get_exclude_ips()
def _get_exclude_ips(self):
exclude_hosts = self.module.params['exclude_hosts']
exclude_ips = []
if exclude_hosts is not None:
for host in exclude_hosts:
exclude_ips.extend(_convert_host_to_hex(host))
return exclude_ips
def get_active_connections_count(self):
active_connections = 0
for family in self.source_file.keys():
f = open(self.source_file[family])
for tcp_connection in f.readlines():
tcp_connection = tcp_connection.strip().split()
if tcp_connection[self.local_address_field] == 'local_address':
continue
if tcp_connection[self.connection_state_field] not in self.connection_states:
continue
(local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
if self.port != local_port:
continue
(remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
if (family, remote_ip) in self.exclude_ips:
continue
if any((
(family, local_ip) in self.ips,
(family, self.match_all_ips[family]) in self.ips,
local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
(family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
)):
active_connections += 1
f.close()
return active_connections
def _convert_host_to_ip(host):
"""
Perform forward DNS resolution on host, IP will give the same IP
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
List of tuples containing address family and IP
"""
addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)
ips = []
for family, socktype, proto, canonname, sockaddr in addrinfo:
ip = sockaddr[0]
ips.append((family, ip))
if family == socket.AF_INET:
ips.append((socket.AF_INET6, "::ffff:" + ip))
return ips
def _convert_host_to_hex(host):
"""
Convert the provided host to the format in /proc/net/tcp*
/proc/net/tcp uses little-endian four byte hex for ipv4
/proc/net/tcp6 uses little-endian per 4B word for ipv6
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
List of tuples containing address family and the
little-endian converted host
"""
ips = []
if host is not None:
for family, ip in _convert_host_to_ip(host):
hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip))
hexip_hf = ""
for i in range(0, len(hexip_nf), 8):
ipgroup_nf = hexip_nf[i:i+8]
ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16))
hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf)
ips.append((family, hexip_hf))
return ips
def _create_connection( (host, port), connect_timeout):
"""
Connect to a 2-tuple (host, port) and return
the socket object.
Args:
2-tuple (host, port) and connection timeout
Returns:
Socket object
"""
if sys.version_info < (2, 6):
(family, _) = _convert_host_to_ip(host)
connect_socket = socket.socket(family, socket.SOCK_STREAM)
connect_socket.settimeout(connect_timeout)
connect_socket.connect( (host, port) )
else:
connect_socket = socket.create_connection( (host, port), connect_timeout)
return connect_socket
def _timedelta_total_seconds(timedelta):
return (
timedelta.microseconds + 0.0 +
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
def main():
module = AnsibleModule(
argument_spec = dict(
host=dict(default='127.0.0.1'),
timeout=dict(default=300, type='int'),
connect_timeout=dict(default=5, type='int'),
delay=dict(default=0, type='int'),
port=dict(default=None, type='int'),
path=dict(default=None, type='path'),
search_regex=dict(default=None),
state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']),
exclude_hosts=dict(default=None, type='list')
),
)
params = module.params
host = params['host']
timeout = params['timeout']
connect_timeout = params['connect_timeout']
delay = params['delay']
port = params['port']
state = params['state']
path = params['path']
search_regex = params['search_regex']
if search_regex is not None:
compiled_search_re = re.compile(search_regex, re.MULTILINE)
else:
compiled_search_re = None
if port and path:
module.fail_json(msg="port and path parameter can not both be passed to wait_for")
if path and state == 'stopped':
module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module")
if path and state == 'drained':
module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module")
if params['exclude_hosts'] is not None and state != 'drained':
module.fail_json(msg="exclude_hosts should only be with state=drained")
start = datetime.datetime.now()
if delay:
time.sleep(delay)
if not port and not path and state != 'drained':
time.sleep(timeout)
elif state in [ 'stopped', 'absent' ]:
### first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
if path:
try:
f = open(path)
f.close()
time.sleep(1)
pass
except IOError:
break
elif port:
try:
s = _create_connection( (host, port), connect_timeout)
s.shutdown(socket.SHUT_RDWR)
s.close()
time.sleep(1)
except:
break
else:
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
if port:
module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
elif path:
module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
elif state in ['started', 'present']:
### wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
if path:
try:
os.stat(path)
except OSError, e:
# If anything except file not present, throw an error
if e.errno != 2:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
# File exists. Are there additional things to check?
if not compiled_search_re:
# nope, succeed!
break
try:
f = open(path)
try:
if re.search(compiled_search_re, f.read()):
# String found, success!
break
finally:
f.close()
except IOError:
pass
elif port:
alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now()))
try:
s = _create_connection((host, port), min(connect_timeout, alt_connect_timeout))
except:
# Failed to connect by connect_timeout. wait and try again
pass
else:
# Connected -- are there additional conditions?
if compiled_search_re:
data = ''
matched = False
while datetime.datetime.now() < end:
max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now()))
(readable, w, e) = select.select([s], [], [], max_timeout)
if not readable:
# No new data. Probably means our timeout
# expired
continue
response = s.recv(1024)
if not response:
# Server shutdown
break
data += response
if re.search(compiled_search_re, data):
matched = True
break
# Shutdown the client socket
s.shutdown(socket.SHUT_RDWR)
s.close()
if matched:
# Found our string, success!
break
else:
# Connection established, success!
s.shutdown(socket.SHUT_RDWR)
s.close()
break
# Conditions not yet met, wait and try again
time.sleep(1)
else: # while-else
# Timeout expired
elapsed = datetime.datetime.now() - start
if port:
if search_regex:
module.fail_json(msg="Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
else:
module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
elif path:
if search_regex:
module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
else:
module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
elif state == 'drained':
### wait until all active connections are gone
end = start + datetime.timedelta(seconds=timeout)
tcpconns = TCPConnectionInfo(module)
while datetime.datetime.now() < end:
try:
if tcpconns.get_active_connections_count() == 0:
break
except IOError:
pass
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
elapsed = datetime.datetime.now() - start
module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGLContext/scenegraph/box.py | 2 | 5301 | """Box node for use in geometry attribute of Shapes"""
from vrml import cache
from OpenGLContext.arrays import array
from OpenGL.arrays import vbo
from OpenGL.GL import *
from vrml.vrml97 import basenodes
from vrml import protofunctions
class Box( basenodes.Box ):
"""Simple Box object of given size centered about local origin
The Box geometry node can be used in the geometry
field of a Shape node to be displayed. Use Transform
nodes to position the box within the world.
The Box includes texture coordinates and normals.
Attributes of note within the Box object:
size -- x,y,z tuple giving the size of the box
listID -- used internally to store the display list
used to display the box during rendering
Reference:
http://www.web3d.org/technicalinfo/specifications/vrml97/part1/nodesRef.html#Box
"""
def compile( self, mode=None ):
"""Compile the box as a display-list"""
if vbo.get_implementation():
vb = vbo.VBO( array( list(yieldVertices( self.size )), 'f'))
def draw( textured=True,lit=True ):
vb.bind()
try:
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS)
try:
glEnableClientState( GL_VERTEX_ARRAY )
if lit:
glEnableClientState( GL_NORMAL_ARRAY )
glNormalPointer( GL_FLOAT, 32, vb+8 )
if textured:
glEnableClientState( GL_TEXTURE_COORD_ARRAY )
glTexCoordPointer( 2, GL_FLOAT, 32, vb )
glVertexPointer( 3, GL_FLOAT, 32, vb+20 )
glDrawArrays( GL_TRIANGLES, 0, 36 )
finally:
glPopClientAttrib()
finally:
vb.unbind()
else:
vb = array( list(yieldVertices( self.size )), 'f')
def draw(textured=True,lit=True):
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS)
try:
glInterleavedArrays( GL_T2F_N3F_V3F, 0, vb )
glDrawArrays( GL_TRIANGLES, 0, 36 )
finally:
glPopClientAttrib()
holder = mode.cache.holder(self, draw)
holder.depend( self, protofunctions.getField(self, 'size') )
return draw
def render (
self,
visible = 1, # can skip normals and textures if not
lit = 1, # can skip normals if not
textured = 1, # can skip textureCoordinates if not
transparent = 0, # XXX should sort triangle geometry...
mode = None, # the renderpass object for which we compile
):
"""Render the Box (build and) call the display list"""
# do we have a cached array-geometry?
vb = mode.cache.getData(self)
if not vb:
vb = self.compile( mode=mode )
if vb:
vb(textured=textured,lit=lit)
return 1
def boundingVolume( self, mode ):
"""Create a bounding-volume object for this node"""
from OpenGLContext.scenegraph import boundingvolume
current = boundingvolume.getCachedVolume( self )
if current:
return current
return boundingvolume.cacheVolume(
self,
boundingvolume.AABoundingBox(
size = self.size,
),
( (self, 'size'), ),
)
def yieldVertices(size):
x,y,z = size
x,y,z = x/2.0,y/2.0,z/2.0
normal = ( 0.0, 0.0, 1.0)
yield (0.0, 0.0)+ normal + (-x,-y,z);
yield (1.0, 0.0)+ normal + (x,-y,z);
yield (1.0, 1.0)+ normal + (x,y,z);
yield (0.0, 0.0)+ normal + (-x,-y,z);
yield (1.0, 1.0)+ normal + (x,y,z);
yield (0.0, 1.0)+ normal + (-x,y,z);
normal = ( 0.0, 0.0,-1.0);
yield (1.0, 0.0)+ normal + (-x,-y,-z);
yield (1.0, 1.0)+ normal + (-x,y,-z);
yield (0.0, 1.0)+ normal + (x,y,-z);
yield (1.0, 0.0)+ normal + (-x,-y,-z);
yield (0.0, 1.0)+ normal + (x,y,-z);
yield (0.0, 0.0)+ normal + (x,-y,-z);
normal = ( 0.0, 1.0, 0.0)
yield (0.0, 1.0)+ normal + (-x,y,-z);
yield (0.0, 0.0)+ normal + (-x,y,z);
yield (1.0, 0.0)+ normal + (x,y,z);
yield (0.0, 1.0)+ normal + (-x,y,-z);
yield (1.0, 0.0)+ normal + (x,y,z);
yield (1.0, 1.0)+ normal + (x,y,-z);
normal = ( 0.0,-1.0, 0.0)
yield (1.0, 1.0)+ normal + (-x,-y,-z);
yield (0.0, 1.0)+ normal + (x,-y,-z);
yield (0.0, 0.0)+ normal + (x,-y,z);
yield (1.0, 1.0)+ normal + (-x,-y,-z);
yield (0.0, 0.0)+ normal + (x,-y,z);
yield (1.0, 0.0)+ normal + (-x,-y,z);
normal = ( 1.0, 0.0, 0.0)
yield (1.0, 0.0)+ normal + (x,-y,-z);
yield (1.0, 1.0)+ normal + (x,y,-z);
yield (0.0, 1.0)+ normal + (x,y,z);
yield (1.0, 0.0)+ normal + (x,-y,-z);
yield (0.0, 1.0)+ normal + (x,y,z);
yield (0.0, 0.0)+ normal + (x,-y,z);
normal = (-1.0, 0.0, 0.0)
yield (0.0, 0.0)+ normal + (-x,-y,-z);
yield (1.0, 0.0)+ normal + (-x,-y,z);
yield (1.0, 1.0)+ normal + (-x,y,z);
yield (0.0, 0.0)+ normal + (-x,-y,-z);
yield (1.0, 1.0)+ normal + (-x,y,z);
yield (0.0, 1.0)+ normal + (-x,y,-z);
| lgpl-3.0 |
hickford/youtube-dl | youtube_dl/extractor/giantbomb.py | 172 | 2655 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import (
unescapeHTML,
qualities,
int_or_none,
)
class GiantBombIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?giantbomb\.com/videos/(?P<display_id>[^/]+)/(?P<id>\d+-\d+)'
_TEST = {
'url': 'http://www.giantbomb.com/videos/quick-look-destiny-the-dark-below/2300-9782/',
'md5': '57badeface303ecf6b98b812de1b9018',
'info_dict': {
'id': '2300-9782',
'display_id': 'quick-look-destiny-the-dark-below',
'ext': 'mp4',
'title': 'Quick Look: Destiny: The Dark Below',
'description': 'md5:0aa3aaf2772a41b91d44c63f30dfad24',
'duration': 2399,
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
video = json.loads(unescapeHTML(self._search_regex(
r'data-video="([^"]+)"', webpage, 'data-video')))
duration = int_or_none(video.get('lengthSeconds'))
quality = qualities([
'f4m_low', 'progressive_low', 'f4m_high',
'progressive_high', 'f4m_hd', 'progressive_hd'])
formats = []
for format_id, video_url in video['videoStreams'].items():
if format_id == 'f4m_stream':
continue
if video_url.endswith('.f4m'):
f4m_formats = self._extract_f4m_formats(video_url + '?hdcore=3.3.1', display_id)
if f4m_formats:
f4m_formats[0]['quality'] = quality(format_id)
formats.extend(f4m_formats)
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
if not formats:
youtube_id = video.get('youtubeID')
if youtube_id:
return self.url_result(youtube_id, 'Youtube')
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| unlicense |
MrSenko/Kiwi | kiwi_lint/dunder_attributes.py | 2 | 1555 | # Copyright (c) 2018 Alexander Todorov <[email protected]>
# Licensed under the GPL 2.0: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
import astroid
from pylint import checkers, interfaces
from pylint.checkers import utils
class DunderClassAttributeChecker(checkers.BaseChecker):
__implements__ = (interfaces.IAstroidChecker,)
name = "dunder-class-attribute-checker"
msgs = {
"C4401": (
"Class attributes should not contain double underscores",
"dunder-class-attribute",
'Dunders, e.g. "__some_name__", are reserved for Python. '
"Do not name your class attributes this way!",
)
}
@utils.check_messages("dunder-class-attribute")
def visit_classdef(self, node):
"""Detect when class attributes use double underscores."""
# we can redefine special methods (e.g. __iter__) and some attributes,
# e.g. __doc__, by declaring them as class attributes. Exclude them from the
# test below.
allowed_attributes = dir([])
for child in node.body:
if isinstance(child, astroid.Assign):
for target in child.targets:
if (
isinstance(target, astroid.AssignName)
and target.name not in allowed_attributes
and target.name.startswith("__")
and target.name.endswith("__")
):
self.add_message("dunder-class-attribute", node=child)
| gpl-2.0 |
dianarg/conlang-util | generator/map/map_gen.py | 1 | 5467 | #!/usr/bin/env python
# Toy Terrain Generator
import sys
# import and init pygame
import pygame
import random
pygame.init()
class PixelGrid:
def __init__(self, size, scale):
self.points = []
self.size = size
self.scale = scale
for i in range(size):
self.points.append([])
for j in range(size):
self.points[i].append(base_height)
def normalize(self):
min = self.points[0][0]
max = min
for i in range(self.size):
for j in range(self.size):
if (self.points[i][j] < min):
min = self.points[i][j]
if (self.points[i][j] > max):
max = self.points[i][j]
for i in range(self.size):
for j in range(self.size):
self.points[i][j] -= min
self.points[i][j] = ((self.points[i][j]*1.0)/max)*255
if(self.points[i][j] > 255):
self.points[i][j] = 255
if (self.points[i][j] < 0):
self.points[i][j] = 0
def apply_threshold(self, height):
color = (height, height, height)
if do_threshold:
if height > 200:
color = color # grey for snow
elif height > 150:
color = (height/2, height/3, height/3) # brown
elif height > 75:
color = (0, 255-height, 0) # green
elif height > 70:
color = (height*2, height*2, 0) # beach yellow
else:
color = (0, 0, height) # blue to black
return color
def draw(self, apply_threshold=True):
for i in range(self.size):
for j in range(self.size):
hc = self.points[i][j]
color = self.apply_threshold(hc)
pygame.draw.rect(window, color, (self.scale * i,
self.scale * j,
self.scale * i + self.scale,
self.scale * j + self.scale))
# draw it to the screen
pygame.display.flip()
def random_heights(self):
for i in range(self.size):
for j in range(self.size):
self.points[i][j] = random.randint(0, 255)
def diamond_square(self):
block = self.size
size = self.size
d = size/4
self.points[0][0] = random.randint(0, 255)
self.points[size-1][size-1] = random.randint(0, 255)
self.points[0][size-1] = random.randint(0, 255)
self.points[size-1][0] = random.randint(0, 255)
while block > 1:
# average four corners
i = 0
off = block/2
while i < self.size:
j = 0
while j < self.size:
self.points[i+off][j+off] = (
self.points[i][j] +
self.points[(i+block) % size][j] +
self.points[i][(j+block) % size] +
self.points[(i+block) % size][(j+block) % size]
)/4 + random.randint(-d, d)
j += block
i += block
# average edges
i = 0
off = block/2
while i < self.size:
j = 0
while j < self.size:
self.points[i][j+off] = (
self.points[i][j] +
self.points[i][(j+block) % size] +
self.points[(i+off) % size][(j+off) % size] +
self.points[(i-off) % size][(j+off) % size]
)/4 + random.randint(-d, d)
self.points[i+off][j] = (
self.points[i][j] +
self.points[(i+off) % size][(j+off) % size] +
self.points[(i+block) % size][j] +
self.points[(i+off) % size][(j-off) % size]
)/4 + random.randint(-d, d)
j += block
i += block
block = block/2
d = int(d/2.2)
if __name__ == '__main__':
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# create the screen
# pixel grid parameters
grid_size = 512
base_height = 127
scale = 1
screen_size = scale * grid_size
window = pygame.display.set_mode((screen_size, screen_size))
font = pygame.font.Font(None, 17)
grid = PixelGrid(grid_size, scale)
pygame.display.get_surface().fill(BLACK)
text = font.render('Press any key to generate a new map.', True, WHITE)
textrect = text.get_rect()
textrect.centerx = window.get_rect().centerx
textrect.centery = window.get_rect().centery
window.blit(text, textrect)
pygame.display.update()
# whether to make thresholded map or heightmap
do_threshold = True
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
elif event.type == pygame.KEYDOWN:
pygame.display.get_surface().fill(BLACK)
print('diamond square...')
grid.diamond_square()
print('normalize...')
grid.normalize()
print('draw...')
grid.draw(do_threshold)
print('done!')
| mit |
rdipietro/tensorflow | tensorflow/python/framework/op_def_library_test.py | 21 | 65553 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.op_def_library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework.op_def_library import OpDefLibrary
from tensorflow.python.platform import googletest
def _unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
# NOTE(mrry): Dummy shape registrations for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape("Attr")(_unknown_shape)
ops.RegisterShape("AttrBool")(_unknown_shape)
ops.RegisterShape("AttrBoolList")(_unknown_shape)
ops.RegisterShape("AttrDefault")(_unknown_shape)
ops.RegisterShape("AttrEmptyListDefault")(_unknown_shape)
ops.RegisterShape("AttrEnum")(_unknown_shape)
ops.RegisterShape("AttrEnumList")(_unknown_shape)
ops.RegisterShape("AttrFloat")(_unknown_shape)
ops.RegisterShape("AttrListDefault")(_unknown_shape)
ops.RegisterShape("AttrListMin")(_unknown_shape)
ops.RegisterShape("AttrMin")(_unknown_shape)
ops.RegisterShape("AttrShape")(_unknown_shape)
ops.RegisterShape("AttrShapeList")(_unknown_shape)
ops.RegisterShape("AttrPartialShape")(_unknown_shape)
ops.RegisterShape("AttrPartialShapeList")(_unknown_shape)
ops.RegisterShape("AttrTypeDefault")(_unknown_shape)
ops.RegisterShape("AttrListTypeDefault")(_unknown_shape)
ops.RegisterShape("Binary")(_unknown_shape)
ops.RegisterShape("ComplexStruct")(_unknown_shape)
ops.RegisterShape("InPolymorphicTwice")(_unknown_shape)
ops.RegisterShape("MixedStruct")(_unknown_shape)
ops.RegisterShape("NInPolymorphicTwice")(_unknown_shape)
ops.RegisterShape("NInTwice")(_unknown_shape)
ops.RegisterShape("NInTwoTypeVariables")(_unknown_shape)
ops.RegisterShape("NIntsIn")(_unknown_shape)
ops.RegisterShape("NIntsOut")(_unknown_shape)
ops.RegisterShape("NIntsOutDefault")(_unknown_shape)
ops.RegisterShape("NPolymorphicIn")(_unknown_shape)
ops.RegisterShape("NPolymorphicOut")(_unknown_shape)
ops.RegisterShape("NPolymorphicOutDefault")(_unknown_shape)
ops.RegisterShape("NPolymorphicRestrictIn")(_unknown_shape)
ops.RegisterShape("NPolymorphicRestrictOut")(_unknown_shape)
ops.RegisterShape("OutT")(_unknown_shape)
ops.RegisterShape("OutTypeList")(_unknown_shape)
ops.RegisterShape("OutTypeListRestrict")(_unknown_shape)
ops.RegisterShape("Polymorphic")(_unknown_shape)
ops.RegisterShape("PolymorphicDefaultOut")(_unknown_shape)
ops.RegisterShape("PolymorphicOut")(_unknown_shape)
ops.RegisterShape("RefIn")(_unknown_shape)
ops.RegisterShape("RefOut")(_unknown_shape)
ops.RegisterShape("ReservedAttr")(_unknown_shape)
ops.RegisterShape("ReservedInput")(_unknown_shape)
ops.RegisterShape("Restrict")(_unknown_shape)
ops.RegisterShape("Simple")(_unknown_shape)
ops.RegisterShape("SimpleStruct")(_unknown_shape)
ops.RegisterShape("TwoRefsIn")(_unknown_shape)
ops.RegisterShape("TypeList")(_unknown_shape)
ops.RegisterShape("TypeListRestrict")(_unknown_shape)
ops.RegisterShape("TypeListTwice")(_unknown_shape)
class OpDefLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = OpDefLibrary()
self._g = ops.Graph()
self._default_graph_controller = self._g.as_default()
self._default_graph_controller.__enter__()
self._add_op("name: 'Simple' input_arg { name: 'a' type: DT_INT32 } "
"output_arg { name: 'out' type: DT_FLOAT }")
self._add_op("name: 'OutT' output_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
def tearDown(self):
self._default_graph_controller.__exit__(None, None, None)
def _add_op(self, ascii):
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def Tensor(self, t, name="in"):
return self._lib.apply_op("OutT", T=t, name=name)
def testNoRegisteredOpFails(self):
with self.assertRaises(RuntimeError) as cm:
self._lib.apply_op("unknown")
self.assertEqual(str(cm.exception), "Unrecognized Op name unknown")
def testAddOpValidation(self):
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingTypeAttr' "
"input_arg { name: 'a' type_attr: 'T' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingTypeAttr', "
"missing attr 'T'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadTypeAttr' "
"output_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'int' }")
self.assertEqual(
str(cm.exception),
"Attr 'T' of 'BadTypeAttr' used as a type_attr but has type int")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingNumberAttr' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingNumberAttr', "
"missing attr 'N'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadNumberAttr' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'type' }")
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'BadNumberAttr' used as a number_attr but has type type")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesA' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesA' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesB' "
"input_arg { name: 'a' type: DT_INT32 type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesB' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'ThreeTypes' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' "
"type_list_attr: 'U' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'U' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'ThreeTypes' must have one type field not 3")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'NoTypes' output_arg { name: 'a' } ")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'NoTypes' must have one type field not 0")
def testSimple(self):
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'Simple' op: 'Simple' input: 'Simple/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=4)
self.assertProtoEquals("""
name: 'Simple_1' op: 'Simple' input: 'Simple_1/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=5, name="named")
self.assertProtoEquals("""
name: 'named' op: 'Simple' input: 'named/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=[[1, 2, 3], [4, 5, 6]], name="two_d")
self.assertProtoEquals("""
name: 'two_d' op: 'Simple' input: 'two_d/a'
""", out.op.node_def)
def testSimpleFailures(self):
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a="Bad string")
self.assertEqual(str(cm.exception),
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got 'Bad string' of type 'str' instead.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=self.Tensor(dtypes.string))
self.assertEqual(str(cm.exception),
"Input 'a' of 'Simple' Op has type string "
"that does not match expected type of int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra="bogus")
self.assertEqual(str(cm.exception),
"apply_op() got unexpected keyword arguments: extra")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra1="bogus", extra2="also_bogus")
self.assertEqual(str(cm.exception),
"apply_op() got unexpected keyword arguments: extra1, "
"extra2")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple")
self.assertEqual(str(cm.exception), "No argument for input a")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", wrong=7)
self.assertEqual(str(cm.exception), "No argument for input a")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a={"label": 1})
self.assertEqual(str(cm.exception),
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got {'label': 1} of type 'dict' instead.")
def testReservedInput(self):
self._add_op("name: 'ReservedInput' "
"input_arg { name: 'input' type: DT_INT32 } ")
op = self._lib.apply_op("ReservedInput", input_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedInput' input: 'x/input'
""", op.node_def)
def testPolymorphic(self):
self._add_op("name: 'Polymorphic' "
"input_arg { name: 'a' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
out = self._lib.apply_op("Polymorphic", a=7, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'Polymorphic' input: 'p/a'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a="s", name="q")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'Polymorphic' input: 'q/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a=["s", "t", "u"], name="r")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'r' op: 'Polymorphic' input: 'r/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Polymorphic", a="s", T=dtypes.string)
self.assertEqual(str(cm.exception),
"Should not specify value for inferred attr 'T'.")
def testPolymorphicOut(self):
self._add_op("name: 'PolymorphicOut' "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
out = self._lib.apply_op("PolymorphicOut", T=dtypes.int32, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut")
self.assertEqual(str(cm.exception),
"No argument for attr T")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut", T=None)
self.assertEqual(str(cm.exception),
"Expected DataType for argument 'T' not None.")
def testPolymorphicDefaultOut(self):
self._add_op("name: 'PolymorphicDefaultOut' "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' "
" default_value { type: DT_STRING } }")
out = self._lib.apply_op("PolymorphicDefaultOut", T=None, name="p")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicDefaultOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
def testBinary(self):
self._add_op("name: 'Binary' "
"input_arg { name: 'a' type_attr: 'T' } "
"input_arg { name: 'b' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
out = self._lib.apply_op("Binary", a=8, b=9, name="b")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'b' op: 'Binary' input: 'b/a' input: 'b/b'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Binary", a="left", b="right", name="c")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'c' op: 'Binary' input: 'c/a' input: 'c/b'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Binary", a="left", b=12)
self.assertEqual(str(cm.exception),
"Expected string passed to parameter 'b' of op 'Binary', "
"got 12 of type 'int' instead.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Binary",
a=self.Tensor(dtypes.string),
b=self.Tensor(dtypes.int32))
self.assertEqual(str(cm.exception),
"Input 'b' of 'Binary' Op has type int32 "
"that does not match type string of argument 'a'.")
def testRestrict(self):
self._add_op("name: 'Restrict' "
"input_arg { name: 'a' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' allowed_values { list { "
" type: DT_STRING type: DT_BOOL } } }")
out = self._lib.apply_op("Restrict", a="foo", name="g")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'g' op: 'Restrict' input: 'g/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Restrict", a=True, name="h")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'h' op: 'Restrict' input: 'h/a'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Restrict", a=17)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testTypeList(self):
self._add_op("name: 'TypeList' "
"input_arg { name: 'a' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
op = self._lib.apply_op("TypeList", a=["foo"], name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeList' input: 'z/a_0'
attr { key: 'T' value { list { type: DT_STRING } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[True, 12], name="y")
self.assertProtoEquals("""
name: 'y' op: 'TypeList' input: 'y/a_0' input: 'y/a_1'
attr { key: 'T' value { list { type: DT_BOOL type: DT_INT32 } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeList' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' "
"argument to 'TypeList' Op, not ")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=[self.Tensor(dtypes.int32), None])
self.assertStartsWith(str(cm.exception),
"Tensors in list passed to 'a' of 'TypeList' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>]")
def testTypeListTwice(self):
self._add_op("name: 'TypeListTwice' "
"input_arg { name: 'a' type_list_attr: 'T' } "
"input_arg { name: 'b' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
op = self._lib.apply_op("TypeListTwice",
a=["foo", True],
b=["bar", False],
name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeListTwice'
input: 'z/a_0' input: 'z/a_1' input: 'z/b_0' input: 'z/b_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
op = self._lib.apply_op("TypeListTwice", a=[], b=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeListTwice' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListTwice", a=["foo", True], b=["bar", 6])
self.assertEqual(str(cm.exception),
"Input 'b' of 'TypeListTwice' Op has type list of "
"string, int32 that does not match type list "
"string, bool of argument 'a'.")
def testOutTypeList(self):
self._add_op("name: 'OutTypeList' "
"output_arg { name: 'out' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
out, = self._lib.apply_op("OutTypeList", T=[dtypes.float32], name="x")
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'x' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_FLOAT } } }
""", out.op.node_def)
out1, out2 = self._lib.apply_op("OutTypeList",
T=[dtypes.int32, dtypes.bool],
name="w")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'w' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_INT32 type: DT_BOOL } } }
""", out1.op.node_def)
out = self._lib.apply_op("OutTypeList", T=[], name="empty")
self.assertEqual([], out)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeList", T=dtypes.int32)
self.assertEqual(str(cm.exception), "Expected list for attr T")
def testTypeListRestrict(self):
self._add_op("name: 'TypeListRestrict' "
"input_arg { name: 'a' type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' allowed_values { list { "
" type: DT_STRING type: DT_BOOL } } }")
op = self._lib.apply_op("TypeListRestrict", a=["foo", False], name="v")
self.assertProtoEquals("""
name: 'v' op: 'TypeListRestrict' input: 'v/a_0' input: 'v/a_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListRestrict", a=[True, 12])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testOutTypeListRestrict(self):
self._add_op("name: 'OutTypeListRestrict' "
"output_arg { name: 'out' type_list_attr: 't' } "
"attr { name: 't' type: 'list(type)' allowed_values { list { "
" type: DT_STRING type: DT_BOOL } } }")
out1, out2 = self._lib.apply_op("OutTypeListRestrict",
t=[dtypes.bool, dtypes.string],
name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertProtoEquals("""
name: 'u' op: 'OutTypeListRestrict'
attr { key: 't' value { list { type: DT_BOOL type: DT_STRING } } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeListRestrict", t=[dtypes.string, dtypes.int32])
self.assertEqual(str(cm.exception),
"Value passed to parameter 't' has DataType int32 "
"not in list of allowed values: string, bool")
def testAttr(self):
self._add_op("name: 'Attr' attr { name: 'a' type: 'int' }")
op = self._lib.apply_op("Attr", a=12, name="t")
self.assertProtoEquals("""
name: 't' op: 'Attr' attr { key: 'a' value { i: 12 } }
""", op.node_def)
op = self._lib.apply_op("Attr", a=tensor_shape.Dimension(13), name="u")
self.assertProtoEquals("""
name: 'u' op: 'Attr' attr { key: 'a' value { i: 13 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a="bad")
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not 'bad'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=[12])
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not [12].")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=None)
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not None.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr")
self.assertEqual(str(cm.exception), "No argument for attr a")
def testAttrFloat(self):
self._add_op("name: 'AttrFloat' attr { name: 'a' type: 'float' }")
op = self._lib.apply_op("AttrFloat", a=1.2, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrFloat' attr { key: 'a' value { f: 1.2 } }
""", op.node_def)
op = self._lib.apply_op("AttrFloat", a=12, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrFloat' attr { key: 'a' value { f: 12 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrFloat", a="bad")
self.assertEqual(str(cm.exception),
"Expected float for argument 'a' not 'bad'.")
def testAttrBool(self):
self._add_op("name: 'AttrBool' attr { name: 'a' type: 'bool' }")
op = self._lib.apply_op("AttrBool", a=True, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBool' attr { key: 'a' value { b: true } }
""", op.node_def)
op = self._lib.apply_op("AttrBool", a=False, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBool' attr { key: 'a' value { b: false } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=0)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=1)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 1.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=[])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not [].")
def testAttrBoolList(self):
self._add_op("name: 'AttrBoolList' attr { name: 'a' type: 'list(bool)' }")
op = self._lib.apply_op("AttrBoolList", a=[True, False, True], name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBoolList'
attr { key: 'a' value { list { b: true b: false b:true } } }
""", op.node_def)
op = self._lib.apply_op("AttrBoolList", a=[], name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBoolList' attr { key: 'a' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBoolList", a=[0])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
def testAttrMin(self):
self._add_op("name: 'AttrMin' attr { name: 'a' type: 'int' "
"has_minimum: true minimum: 5 }")
op = self._lib.apply_op("AttrMin", a=12, name="s")
self.assertProtoEquals("""
name: 's' op: 'AttrMin' attr { key: 'a' value { i: 12 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrMin", a=2)
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrMin' Op passed 2 less than minimum 5.")
def testAttrListMin(self):
self._add_op("name: 'AttrListMin' attr { name: 'a' type: 'list(int)' "
"has_minimum: true minimum: 2 }")
op = self._lib.apply_op("AttrListMin", a=[1, 2], name="r")
self.assertProtoEquals("""
name: 'r' op: 'AttrListMin'
attr { key: 'a' value { list { i: 1 i: 2 } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrListMin", a=[17])
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrListMin' Op "
"passed list of length 1 less than minimum 2.")
def testAttrEnum(self):
self._add_op("name: 'AttrEnum' "
"attr { name: 'a' type: 'string' "
" allowed_values { list { s: 'apples' s: 'oranges' } } }")
op = self._lib.apply_op("AttrEnum", a="oranges", name="e")
self.assertProtoEquals("""
name: 'e' op: 'AttrEnum' attr { key: 'a' value { s: 'oranges' } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnum", a="invalid")
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnum\' Op '
'passed string \'invalid\' not in: '
'"apples", "oranges".')
def testAttrEnumList(self):
self._add_op("name: 'AttrEnumList' "
"attr { name: 'a' type: 'list(string)' "
" allowed_values { list { s: 'apples' s: 'oranges' } } }")
op = self._lib.apply_op("AttrEnumList", a=["oranges", "apples"], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrEnumList'
attr { key: 'a' value { list { s: 'oranges' s: 'apples' } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnumList", a=["apples", "invalid", "oranges"])
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnumList\' Op '
'passed string \'invalid\' not '
'in: "apples", "oranges".')
def testAttrShape(self):
self._add_op("name: 'AttrShape' attr { name: 'a' type: 'shape' }")
op = self._lib.apply_op("AttrShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=(4, 3, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrShape", a=tensor_shape.TensorShape([3, 2]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrShape' attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = 6
shape.dim.add().size = 3
op = self._lib.apply_op("AttrShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 6 } dim { size: 3 } } } }
""", op.node_def)
# TODO(josh11b): Re-enable this test once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for "
# "argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrShape", a="ABC")
def testAttrShapeList(self):
self._add_op("name: 'AttrShapeList' attr { name: 'a' type: 'list(shape)' }")
op = self._lib.apply_op("AttrShapeList", a=[[3, 2], [6, 5, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: 5 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrShapeList' attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrPartialShape(self):
self._add_op(
"name: 'AttrPartialShape' attr { name: 'a' type: 'shape' }")
op = self._lib.apply_op("AttrPartialShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrPartialShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=(4, None, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: -1 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrPartialShape", a=tensor_shape.TensorShape([3, None]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: -1 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrPartialShape'
attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = -1
shape.dim.add().size = 3
op = self._lib.apply_op("AttrPartialShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: -1 } dim { size: 3 } } } }
""", op.node_def)
# TODO(ebrevdo): Re-enable once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrPartialShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for "
# "argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrPartialShape", a="ABC")
def testAttrPartialShapeList(self):
self._add_op("""
name: 'AttrPartialShapeList'
attr { name: 'a' type: 'list(shape)' }
""")
op = self._lib.apply_op(
"AttrPartialShapeList", a=[[3, 2], [6, None, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrPartialShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: -1 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrPartialShapeList' attr {
key: 'a' value { list { } } }
""", op.node_def)
def testAttrDefault(self):
self._add_op("name: 'AttrDefault' "
"attr { name: 'a' type: 'string' "
" default_value { s: 'banana' } }")
op = self._lib.apply_op("AttrDefault", a=None, name="d")
self.assertProtoEquals("""
name: 'd' op: 'AttrDefault' attr { key: 'a' value { s: 'banana' } }
""", op.node_def)
op = self._lib.apply_op("AttrDefault", a="kiwi", name="c")
self.assertProtoEquals("""
name: 'c' op: 'AttrDefault' attr { key: 'a' value { s: 'kiwi' } }
""", op.node_def)
def testAttrListDefault(self):
self._add_op("name: 'AttrListDefault' "
"attr { name: 'a' type: 'list(int)' "
" default_value { list { i: 5 i: 15 } } }")
op = self._lib.apply_op("AttrListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 5 i: 15 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrEmptyListDefault(self):
self._add_op("name: 'AttrEmptyListDefault' "
"attr { name: 'a' type: 'list(float)' "
" default_value { list { } } }")
op = self._lib.apply_op("AttrEmptyListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { f: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testReservedAttr(self):
self._add_op("name: 'ReservedAttr' "
"attr { name: 'range' type: 'int' } ")
op = self._lib.apply_op("ReservedAttr", range_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedAttr' attr { key: 'range' value { i: 7 } }
""", op.node_def)
def testDefaultAttrType(self):
self._add_op("name: 'AttrTypeDefault' "
"input_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'type' "
" default_value { type: DT_INT32 } }")
# Give an input whose type has no obvious output type.
op = self._lib.apply_op("AttrTypeDefault", a=[], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrTypeDefault' input: 'n/a'
attr { key: 'T' value { type: DT_INT32 } }
""", op.node_def)
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrTypeDefault", a=[1.0], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrTypeDefault' input: 'f/a'
attr { key: 'T' value { type: DT_FLOAT } }
""", op.node_def)
def testDefaultListAttrType(self):
self._add_op("name: 'AttrListTypeDefault' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' "
" default_value { type: DT_INT32 } }"
"attr { name: 'N' type: 'int' }")
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrListTypeDefault", a=[1.0], b=[2.0], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrListTypeDefault' input: 'n/a_0' input: 'n/b_0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
def testNIntsIn(self):
self._add_op("name: 'NIntsIn' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
op = self._lib.apply_op("NIntsIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NIntsIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NIntsIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NIntsIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'N' value { i: 5 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=["foo", "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[string, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.string),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have "
"types [string, string] that do not match expected type "
"int32.")
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NIntsIn' Op "
"with length 1 shorter than "
"minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[int32, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.int32),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op "
"have types [int32, string] that do not match expected "
"type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NIntsIn' Op, not ")
def testNPolymorphicIn(self):
self._add_op("name: 'NPolymorphicIn' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
op = self._lib.apply_op("NPolymorphicIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 5 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=["foo", "bar"], name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[1, self.Tensor(dtypes.float32, name="x")],
name="q")
self.assertProtoEquals("""
name: 'q' op: 'NPolymorphicIn' input: 'q/a_0' input: 'x'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[self.Tensor(dtypes.float32, name="y"),
self.Tensor(dtypes.float32_ref, name="z")],
name="r")
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicIn' input: 'y' input: 'z'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NPolymorphicIn' Op with length 1 "
"shorter than minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, None])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>] that "
"don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn",
a=["abcd", self.Tensor(dtypes.int32)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [string, int32] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NPolymorphicIn' Op, not ")
def testNPolymorphicRestrictIn(self):
self._add_op("name: 'NPolymorphicRestrictIn' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' allowed_values { "
" list { type: DT_STRING type: DT_BOOL } } } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
op = self._lib.apply_op("NPolymorphicRestrictIn", a=["foo", "bar"],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicRestrictIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicRestrictIn",
a=[False, True, False],
name="b")
self.assertProtoEquals("""
name: 'b' op: 'NPolymorphicRestrictIn'
input: 'b/a_0' input: 'b/a_1' input: 'b/a_2'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictIn", a=[1, 2])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 not in "
"list of allowed values: string, bool")
def testNInTwice(self):
self._add_op("name: 'NInTwice' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"input_arg { name: 'b' type: DT_STRING number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 }")
op = self._lib.apply_op("NInTwice", a=[1, 2], b=["one", "two"], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwice", a=[], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwice' attr { key: 'N' value { i: 0 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwice", a=[1, 2, 3], b=["too short"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwice' Op "
"with length 1 must match "
"length 3 of argument 'a'.")
def testNInPolymorphicTwice(self):
self._add_op("name: 'NInPolymorphicTwice' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 }")
op = self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=[3, 4], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInPolymorphicTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2, 3], b=[5])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInPolymorphicTwice' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'NInPolymorphicTwice' "
"Op have types [string, string] that do not match type "
"int32 inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of "
"'NInPolymorphicTwice' Op have types [string] that do not "
"match type int32 inferred from earlier arguments.")
def testNInTwoTypeVariables(self):
self._add_op("name: 'NInTwoTypeVariables' "
"input_arg { name: 'a' type_attr: 'S' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'S' type: 'type' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 }")
op = self._lib.apply_op("NInTwoTypeVariables",
a=[1, 2],
b=[True, False],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwoTypeVariables'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables", a=[1, 2], b=[3, 4], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwoTypeVariables'
input: 'o/a_0' input: 'o/a_1' input: 'o/b_0' input: 'o/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables",
a=[self.Tensor(dtypes.int32, name="q")],
b=[self.Tensor(dtypes.string, name="r")],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NInTwoTypeVariables' input: 'q' input: 'r'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwoTypeVariables", a=[1, 2, 3], b=["5"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwoTypeVariables' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
def testInPolymorphicTwice(self):
self._add_op("name: 'InPolymorphicTwice' "
"input_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"input_arg { name: 'b' type_attr: 'T' number_attr: 'M' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 0 } "
"attr { name: 'M' type: 'int' has_minimum: true minimum: 0 } ")
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[3, 4, 5], name="n")
self.assertProtoEquals("""
name: 'n' op: 'InPolymorphicTwice'
input: 'n/a_0' input: 'n/b_0' input: 'n/b_1' input: 'n/b_2'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 3 } }
""", op.node_def)
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'InPolymorphicTwice' input: 'o/a_0'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 0 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[], b=[3, 4, 5])
self.assertEqual(str(cm.exception),
"Don't know how to infer type variable from empty input "
"list passed to input 'a' of 'InPolymorphicTwice' Op.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' Op "
"have types [string, string] that do not match type int32 "
"inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' "
"Op have types [string] that do not match type int32 "
"inferred from earlier arguments.")
def testNIntsOut(self):
self._add_op("name: 'NIntsOut' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
out1, out2 = self._lib.apply_op("NIntsOut", N=2, name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NIntsOut' attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3, out4, out5 = self._lib.apply_op(
"NIntsOut", N=5, name="o")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertEqual(dtypes.int32, out4.dtype)
self.assertEqual(dtypes.int32, out5.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NIntsOut' attr { key: 'N' value { i: 5 } }
""", out5.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsOut", N=1)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NIntsOut' Op passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsOut", N=[3])
self.assertEqual(str(cm.exception),
"Expected int for argument 'N' not [3].")
def testNIntsOutDefault(self):
self._add_op("name: 'NIntsOutDefault' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2"
" default_value { i:3 } }")
out1, out2, out3 = self._lib.apply_op(
"NIntsOutDefault", N=None, name="z")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'z' op: 'NIntsOutDefault' attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op("NIntsOutDefault", N=2, name="y")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'y' op: 'NIntsOutDefault' attr { key: 'N' value { i: 2 } }
""", out2.op.node_def)
def testNPolymorphicOut(self):
self._add_op("name: 'NPolymorphicOut' "
"output_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
out1, out2 = self._lib.apply_op("NPolymorphicOut",
N=2,
T=dtypes.int32,
name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOut", T=dtypes.string, N=3, name="o")
self.assertEqual(dtypes.string, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertEqual(dtypes.string, out3.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 3 } }
""", out3.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicOut", N=1, T=dtypes.string)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NPolymorphicOut' Op "
"passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicOut", N=3, T=[dtypes.string])
self.assertEqual(
str(cm.exception),
"Expected DataType for argument 'T' not [tf.string].")
def testNPolymorphicOutDefault(self):
self._add_op("name: 'NPolymorphicOutDefault' "
"output_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type'"
" default_value { type: DT_BOOL } } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 "
" default_value { i: 2 } }")
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=None, name="r")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=None, name="s")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 's' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=dtypes.int32, name="t")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 't' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=dtypes.int32, name="u")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
def testNPolymorphicRestrictOut(self):
self._add_op("name: 'NPolymorphicRestrictOut' "
"output_arg { name: 'a' type_attr: 'T' number_attr: 'N' } "
"attr { name: 'T' type: 'type' allowed_values { "
" list { type: DT_STRING type: DT_BOOL } } } "
"attr { name: 'N' type: 'int' has_minimum: true minimum: 2 }")
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicRestrictOut", N=3, T=dtypes.bool, name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicRestrictOut'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictOut", N=2, T=dtypes.int32)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'T' has DataType int32 "
"not in list of allowed values: string, bool")
def testRef(self):
self._add_op("name: 'RefIn' "
"input_arg { name: 'a' type_attr: 'T' is_ref: true } "
"attr { name: 'T' type: 'type' } ")
self._add_op("name: 'TwoRefsIn' "
"input_arg { name: 'a' type_attr: 'T' is_ref: true } "
"input_arg { name: 'b' type_attr: 'T' is_ref: true } "
"attr { name: 'T' type: 'type' } ")
self._add_op("name: 'RefOut' "
"output_arg { name: 'a' type_attr: 'T' is_ref: true } "
"attr { name: 'T' type: 'type' } ")
out = self._lib.apply_op("RefOut", T=dtypes.bool, name="o")
self.assertEqual(dtypes.bool_ref, out.dtype)
self.assertProtoEquals("""
name: 'o' op: 'RefOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
op = self._lib.apply_op("RefIn", a=out, name="i")
self.assertProtoEquals("""
name: 'i' op: 'RefIn' input: 'o'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: "_class" value { list { s: "loc:@o" } } }
""", op.node_def)
# Can pass ref to non-ref input.
out = self._lib.apply_op("RefOut", T=dtypes.int32, name="r")
out = self._lib.apply_op("Simple", a=out, name="s")
self.assertProtoEquals("""
name: 's' op: 'Simple' input: 'r'
""", out.op.node_def)
# Can't pass non-ref to ref input.
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("RefIn", a=2)
self.assertEqual(str(cm.exception),
"Input 'a' of 'RefIn' Op requires l-value input")
input_a = self._lib.apply_op("RefOut", T=dtypes.int32, name="t")
input_b = self._lib.apply_op("RefOut", T=dtypes.int32, name="u")
op = self._lib.apply_op("TwoRefsIn", a=input_a, b=input_b, name="v")
# NOTE(mrry): The order of colocation constraints is an implementation
# detail.
self.assertProtoEquals("""
name: 'v' op: 'TwoRefsIn' input: 't' input: 'u'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: "_class" value { list { s: "loc:@t" s: "loc:@u" } } }
""", op.node_def)
def testSpecifyDevice(self):
with self._g.device("/job:ADevice"):
self._lib.apply_op("Simple", a=3)
# We look at the whole graph here to make sure the Const op is also given
# the specified device.
graph_def = self._g.as_graph_def()
self.assertEqual(len(graph_def.node), 2)
for node in graph_def.node:
self.assertDeviceEqual(node.device, "/job:ADevice")
def testStructuredOutputSingleList(self):
self._add_op("name: 'SimpleStruct' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'n_a' } "
"attr { name: 'n_a' type: 'int' }")
for n_a in [0, 1, 3]:
a = self._lib.apply_op("SimpleStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
def testStructuredOutputListAndSingle(self):
self._add_op("name: 'MixedStruct' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'n_a' } "
"output_arg { name: 'b' type: DT_FLOAT } "
"attr { name: 'n_a' type: 'int' }")
for n_a in [0, 1, 3]:
a, b = self._lib.apply_op("MixedStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertTrue(isinstance(b, ops.Tensor))
self.assertEqual(dtypes.float32, b.dtype)
def testStructuredOutputMultipleLists(self):
self._add_op("name: 'ComplexStruct' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'n_a' } "
"output_arg { name: 'b' type: DT_INT64 number_attr: 'n_b' } "
"output_arg { name: 'c' type_list_attr: 't_c' } "
"attr { name: 'n_a' type: 'int' } "
"attr { name: 'n_b' type: 'int' } "
"attr { name: 't_c' type: 'list(type)' }")
for n_a in [0, 1, 3]:
for n_b in [0, 1, 3]:
for t_c in [[],
[dtypes.int32],
[dtypes.int32, dtypes.float32]]:
a, b, c = self._lib.apply_op("ComplexStruct",
n_a=n_a,
n_b=n_b,
t_c=t_c)
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertEqual(n_b, len(b))
self.assertTrue(all(x.dtype == dtypes.int64 for x in b))
self.assertEqual(t_c, [x.dtype for x in c])
class OpDefLibraryGraphTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = OpDefLibrary()
self._g = ops.Graph()
self._add_op("name: 'Simple' input_arg { name: 'a' type: DT_INT32 } "
"output_arg { name: 'out' type: DT_FLOAT }")
self._add_op("name: 'Binary' "
"input_arg { name: 'a' type_attr: 'T' } "
"input_arg { name: 'b' type_attr: 'T' } "
"output_arg { name: 'out' type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
def _add_op(self, ascii):
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def testNoGraph(self):
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, ops.get_default_graph())
def testDefaultGraph(self):
with self._g.as_default():
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, self._g)
def testDifferentGraphFails(self):
with self._g.as_default():
a = self._lib.apply_op("Simple", a=3)
other_g = ops.Graph()
with other_g.as_default():
b = self._lib.apply_op("Simple", a=4)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("Binary", a=a, b=b)
self.assertTrue("must be from the same graph" in str(cm.exception))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
tjsavage/sfcsdatabase | django/forms/extras/widgets.py | 35 | 4446 | """
Extra HTML Widget classes
"""
import time
import datetime
import re
from django.forms.widgets import Widget, Select
from django.utils import datetime_safe
from django.utils.dates import MONTHS
from django.utils.safestring import mark_safe
from django.utils.formats import get_format
from django.conf import settings
__all__ = ('SelectDateWidget',)
RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def __init__(self, attrs=None, years=None, required=True):
# years is an optional list/tuple of years to use in the "year" select box.
self.attrs = attrs or {}
self.required = required
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year+10)
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, basestring):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
# Python 2.4 compatibility:
# v = datetime.datetime.strptime(value, input_format)
# would be clearer, but datetime.strptime was added in
# Python 2.5
v = datetime.datetime(*(time.strptime(value, input_format)[0:6]))
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
else:
match = RE_DATE.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
choices = [(i, i) for i in self.years]
year_html = self.create_select(name, self.year_field, value, year_val, choices)
choices = MONTHS.items()
month_html = self.create_select(name, self.month_field, value, month_val, choices)
choices = [(i, i) for i in range(1, 32)]
day_html = self.create_select(name, self.day_field, value, day_val, choices)
format = get_format('DATE_FORMAT')
escaped = False
output = []
for char in format:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
output.append(year_html)
elif char in 'bFMmNn':
output.append(month_html)
elif char in 'dj':
output.append(day_html)
return mark_safe(u'\n'.join(output))
def id_for_label(self, id_):
return '%s_month' % id_
id_for_label = classmethod(id_for_label)
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
pass
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name, None)
def create_select(self, name, field, value, val, choices):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not (self.required and val):
choices.insert(0, self.none_value)
local_attrs = self.build_attrs(id=field % id_)
s = Select(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html
| bsd-3-clause |
anirudhSK/chromium | third_party/tlslite/tlslite/integration/ClientHelper.py | 86 | 6851 | """
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from tlslite.Checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
def _handshake(self, tlsConnection):
if self.username and self.password:
tlsConnection.handshakeClientSRP(username=self.username,
password=self.password,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
elif self.username and self.sharedKey:
tlsConnection.handshakeClientSharedKey(username=self.username,
sharedKey=self.sharedKey,
settings=self.settings)
else:
tlsConnection.handshakeClientCert(certChain=self.certChain,
privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
self.tlsSession = tlsConnection.session | bsd-3-clause |
defionscode/ansible | lib/ansible/constants.py | 16 | 7696 | # Copyright: (c) 2012-2014, Michael DeHaan <[email protected]>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ast import literal_eval
from jinja2 import Template
from string import ascii_letters, digits
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean, BOOLEANS_TRUE
from ansible.module_utils.six import string_types
from ansible.config.manager import ConfigManager, ensure_type, get_ini_config_value
def _warning(msg):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
try:
from __main__ import display
display.warning(msg)
except Exception:
import sys
sys.stderr.write(' [WARNING] %s\n' % (msg))
def _deprecated(msg, version='2.8'):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
try:
from __main__ import display
display.deprecated(msg, version=version)
except Exception:
import sys
sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
def mk_boolean(value):
''' moved to module_utils'''
_deprecated('ansible.constants.mk_boolean() is deprecated. Use ansible.module_utils.parsing.convert_bool.boolean() instead')
return boolean(value, strict=False)
def get_config(parser, section, key, env_var, default_value, value_type=None, expand_relative_paths=False):
''' kept for backwarsd compatibility, but deprecated '''
_deprecated('ansible.constants.get_config() is deprecated. There is new config API, see porting docs.')
value = None
# small reconstruction of the old code env/ini/default
value = os.environ.get(env_var, None)
if value is None:
try:
value = get_ini_config_value(parser, {'key': key, 'section': section})
except Exception:
pass
if value is None:
value = default_value
value = ensure_type(value, value_type)
return value
def set_constant(name, value, export=vars()):
''' sets constants and returns resolved options dict '''
export[name] = value
# CONSTANTS ### yes, actual ones
BECOME_METHODS = ['sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'ksu', 'runas', 'pmrun', 'enable', 'machinectl']
BECOME_ERROR_STRINGS = {
'sudo': 'Sorry, try again.',
'su': 'Authentication failure',
'pbrun': '',
'pfexec': '',
'doas': 'Permission denied',
'dzdo': '',
'ksu': 'Password incorrect',
'pmrun': 'You are not permitted to run this command',
'enable': '',
'machinectl': '',
} # FIXME: deal with i18n
BECOME_MISSING_STRINGS = {
'sudo': 'sorry, a password is required to run sudo',
'su': '',
'pbrun': '',
'pfexec': '',
'doas': 'Authorization required',
'dzdo': '',
'ksu': 'No password given',
'pmrun': '',
'enable': '',
'machinectl': '',
} # FIXME: deal with i18n
BLACKLIST_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
BOOL_TRUE = BOOLEANS_TRUE
CONTROLER_LANG = os.getenv('LANG', 'en_US.UTF-8')
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
# FIXME: expand to other plugins, but never doc fragments
CONFIGURABLE_PLUGINS = ('cache', 'callback', 'connection', 'inventory', 'lookup', 'shell', 'cliconf', 'httpapi')
# NOTE: always update the docs/docsite/Makefile to match
DOCUMENTABLE_PLUGINS = CONFIGURABLE_PLUGINS + ('module', 'strategy', 'vars')
IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES") # ignore during module search
INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
LOCALHOST = ('127.0.0.1', 'localhost', '::1')
MODULE_REQUIRE_ARGS = ('command', 'win_command', 'shell', 'win_shell', 'raw', 'script')
MODULE_NO_JSON = ('command', 'win_command', 'shell', 'win_shell', 'raw')
RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python')
TREE_DIR = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
# FIXME: remove once play_context mangling is removed
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
COMMON_CONNECTION_VARS = frozenset(('ansible_connection', 'ansible_host', 'ansible_user', 'ansible_shell_executable',
'ansible_port', 'ansible_pipelining', 'ansible_password', 'ansible_timeout',
'ansible_shell_type', 'ansible_module_compression', 'ansible_private_key_file'))
MAGIC_VARIABLE_MAPPING = dict(
# base
connection=('ansible_connection', ),
module_compression=('ansible_module_compression', ),
shell=('ansible_shell_type', ),
executable=('ansible_shell_executable', ),
# connection common
remote_addr=('ansible_ssh_host', 'ansible_host'),
remote_user=('ansible_ssh_user', 'ansible_user'),
password=('ansible_ssh_pass', 'ansible_password'),
port=('ansible_ssh_port', 'ansible_port'),
pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
timeout=('ansible_ssh_timeout', 'ansible_timeout'),
private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
# networking modules
network_os=('ansible_network_os', ),
connection_user=('ansible_connection_user',),
# ssh TODO: remove
ssh_executable=('ansible_ssh_executable', ),
ssh_common_args=('ansible_ssh_common_args', ),
sftp_extra_args=('ansible_sftp_extra_args', ),
scp_extra_args=('ansible_scp_extra_args', ),
ssh_extra_args=('ansible_ssh_extra_args', ),
ssh_transfer_method=('ansible_ssh_transfer_method', ),
# docker TODO: remove
docker_extra_args=('ansible_docker_extra_args', ),
# become
become=('ansible_become', ),
become_method=('ansible_become_method', ),
become_user=('ansible_become_user', ),
become_pass=('ansible_become_password', 'ansible_become_pass'),
become_exe=('ansible_become_exe', ),
become_flags=('ansible_become_flags', ),
# deprecated
sudo=('ansible_sudo', ),
sudo_user=('ansible_sudo_user', ),
sudo_pass=('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe=('ansible_sudo_exe', ),
sudo_flags=('ansible_sudo_flags', ),
su=('ansible_su', ),
su_user=('ansible_su_user', ),
su_pass=('ansible_su_password', 'ansible_su_pass'),
su_exe=('ansible_su_exe', ),
su_flags=('ansible_su_flags', ),
)
# POPULATE SETTINGS FROM CONFIG ###
config = ConfigManager()
# Generate constants from config
for setting in config.data.get_settings():
value = setting.value
if setting.origin == 'default' and \
isinstance(setting.value, string_types) and \
(setting.value.startswith('{{') and setting.value.endswith('}}')):
try:
t = Template(setting.value)
value = t.render(vars())
try:
value = literal_eval(value)
except ValueError:
pass # not a python data structure
except Exception:
pass # not templatable
value = ensure_type(value, setting.type)
set_constant(setting.name, value)
for warn in config.WARNINGS:
_warning(warn)
| gpl-3.0 |
FHannes/intellij-community | python/helpers/py3only/docutils/languages/fi.py | 52 | 1958 | # $Id: fi.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Asko Soukka <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Finnish-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Tekij\u00e4',
'authors': 'Tekij\u00e4t',
'organization': 'Yhteis\u00f6',
'address': 'Osoite',
'contact': 'Yhteystiedot',
'version': 'Versio',
'revision': 'Vedos',
'status': 'Tila',
'date': 'P\u00e4iv\u00e4ys',
'copyright': 'Tekij\u00e4noikeudet',
'dedication': 'Omistuskirjoitus',
'abstract': 'Tiivistelm\u00e4',
'attention': 'Huomio!',
'caution': 'Varo!',
'danger': '!VAARA!',
'error': 'Virhe',
'hint': 'Vihje',
'important': 'T\u00e4rke\u00e4\u00e4',
'note': 'Huomautus',
'tip': 'Neuvo',
'warning': 'Varoitus',
'contents': 'Sis\u00e4llys'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'tekij\u00e4': 'author',
'tekij\u00e4t': 'authors',
'yhteis\u00f6': 'organization',
'osoite': 'address',
'yhteystiedot': 'contact',
'versio': 'version',
'vedos': 'revision',
'tila': 'status',
'p\u00e4iv\u00e4ys': 'date',
'tekij\u00e4noikeudet': 'copyright',
'omistuskirjoitus': 'dedication',
'tiivistelm\u00e4': 'abstract'}
"""Finnish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 |
pgmpy/pgmpy | pgmpy/utils/sets.py | 2 | 1145 | from collections.abc import Iterable
from itertools import combinations, chain
def _variable_or_iterable_to_set(x):
"""
Convert variable, set, or iterable x to a frozenset.
If x is None, returns the empty set.
Parameters
---------
x : None, str or Iterable[str]
Returns
-------
frozenset : frozenset representation of string or iterable input
"""
if x is None:
return frozenset([])
if isinstance(x, str):
return frozenset([x])
if not isinstance(x, Iterable) or not all(isinstance(xx, str) for xx in x):
raise ValueError(
f"{x} is expected to be either a string, set of strings, or an iterable of strings"
)
return frozenset(x)
def _powerset(iterable):
"""
https://docs.python.org/3/library/itertools.html#recipes
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
Parameters
----------
iterable: any iterable
Returns
-------
chain: a generator of the powerset of the input
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
| mit |
jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/numpy/core/einsumfunc.py | 31 | 35539 | """
Implementation of optimized einsum.
"""
from __future__ import division, absolute_import, print_function
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asarray, asanyarray, result_type
__all__ = ['einsum', 'einsum_path']
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
einsum_symbols_set = set(einsum_symbols)
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
def _find_contraction(positions, input_sets, output_set):
"""
Finds the contraction for a given set of input and output sets.
Paramaters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = (idx_contract - new_result)
remaining.append(new_result)
return (new_result, remaining, idx_removed, idx_contract)
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Paramaters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
comb_iter = []
for x in range(len(input_sets) - iteration):
for y in range(x + 1, len(input_sets) - iteration):
comb_iter.append((x, y))
for curr in full_results:
cost, positions, remaining = curr
for con in comb_iter:
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Find cost
new_cost = _compute_size_by_dict(idx_contract, idx_dict)
if idx_removed:
new_cost *= 2
# Build (total_cost, positions, indices_remaining)
new_cost += cost
new_pos = positions + [con]
iter_results.append((new_cost, new_pos, new_input_sets))
# Update list to iterate over
full_results = iter_results
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""
Finds the path by contracting the best pair until the input list is
exhausted. The best pair is found by minimizing the tuple
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
matrix multiplication or inner product operations, then Hadamard like
operations, and finally outer operations. Outer products are limited by
``memory_limit``. This algorithm scales cubically with respect to the
number of elements in the list ``input_sets``.
Paramaters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The greedy contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
if len(input_sets) == 1:
return [(0,)]
path = []
for iteration in range(len(input_sets) - 1):
iteration_results = []
comb_iter = []
# Compute all unique pairs
for x in range(len(input_sets)):
for y in range(x + 1, len(input_sets)):
comb_iter.append((x, y))
for positions in comb_iter:
# Find the contraction
contract = _find_contraction(positions, input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
# Sieve the results based on memory_limit
if _compute_size_by_dict(idx_result, idx_dict) > memory_limit:
continue
# Build sort tuple
removed_size = _compute_size_by_dict(idx_removed, idx_dict)
cost = _compute_size_by_dict(idx_contract, idx_dict)
sort = (-removed_size, cost)
# Add contraction to possible choices
iteration_results.append([sort, positions, new_input_sets])
# If we did not find a new contraction contract remaining
if len(iteration_results) == 0:
path.append(tuple(range(len(input_sets))))
break
# Sort based on first index
best = min(iteration_results, key=lambda x: x[0])
path.append(best[1])
input_sets = best[2]
return path
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> __parse_einsum_input(('...a,...a->...', a, b))
('za,xza', 'xz', [a, b])
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('za,xza', 'xz', [a, b])
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = [asanyarray(v) for v in operand_list]
subscripts = ""
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
if num != last:
subscripts += ","
if output_list is not None:
subscripts += "->"
for s in output_list:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
unused = list(einsum_symbols_set - set(used))
ellipse_inds = "".join(unused)
longest = 0
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(',')
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operands[num].shape == ():
ellipse_count = 0
else:
ellipse_count = max(len(operands[num].shape), 1)
ellipse_count -= (len(sub) - 3)
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace('...', '')
else:
rep_inds = ellipse_inds[-ellipse_count:]
split_subscripts[num] = sub.replace('...', rep_inds)
subscripts = ",".join(split_subscripts)
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = ""
tmp_subscripts = subscripts.replace(",", "")
for s in sorted(set(tmp_subscripts)):
if s not in (einsum_symbols):
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
normal_inds = ''.join(sorted(set(output_subscript) -
set(out_ellipse)))
subscripts += "->" + out_ellipse + normal_inds
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(",", "")
output_subscript = ""
for s in sorted(set(tmp_subscripts)):
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError("Output character %s did not appear in the input"
% char)
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
return (input_subscripts, output_subscript, operands)
def einsum_path(*operands, **kwargs):
"""
einsum_path(subscripts, *operands, optimize='greedy')
Evaluates the lowest cost contraction order for an einsum expression by
considering the creation of intermediate arrays.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
*operands : list of array_like
These are the arrays for the operation.
optimize : {bool, list, tuple, 'greedy', 'optimal'}
Choose the type of path. If a tuple is provided, the second argument is
assumed to be the maximum intermediate size created. If only a single
argument is provided the largest input or output array size is used
as a maximum intermediate size.
* if a list is given that starts with ``einsum_path``, uses this as the
contraction path
* if False no optimization is taken
* if True defaults to the 'greedy' algorithm
* 'optimal' An algorithm that combinatorially explores all possible
ways of contracting the listed tensors and choosest the least costly
path. Scales exponentially with the number of terms in the
contraction.
* 'greedy' An algorithm that chooses the best pair contraction
at each step. Effectively, this algorithm searches the largest inner,
Hadamard, and then outer products at each step. Scales cubically with
the number of terms in the contraction. Equivalent to the 'optimal'
path for most contractions.
Default is 'greedy'.
Returns
-------
path : list of tuples
A list representation of the einsum path.
string_repr : str
A printable representation of the einsum path.
Notes
-----
The resulting path indicates which terms of the input contraction should be
contracted first, the result of this contraction is then appended to the
end of the contraction list. This list can then be iterated over until all
intermediate contractions are complete.
See Also
--------
einsum, linalg.multi_dot
Examples
--------
We can begin with a chain dot example. In this case, it is optimal to
contract the ``b`` and ``c`` tensors first as reprsented by the first
element of the path ``(1, 2)``. The resulting tensor is added to the end
of the contraction and the remaining contraction ``(0, 1)`` is then
completed.
>>> a = np.random.rand(2, 2)
>>> b = np.random.rand(2, 5)
>>> c = np.random.rand(5, 2)
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
>>> print(path_info[0])
['einsum_path', (1, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ij,jk,kl->il
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 1.600e+02
Optimized FLOP count: 5.600e+01
Theoretical speedup: 2.857
Largest intermediate: 4.000e+00 elements
-------------------------------------------------------------------------
scaling current remaining
-------------------------------------------------------------------------
3 kl,jk->jl ij,jl->il
3 jl,ij->il il->il
A more complex index transformation example.
>>> I = np.random.rand(10, 10, 10, 10)
>>> C = np.random.rand(10, 10)
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
optimize='greedy')
>>> print(path_info[0])
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ea,fb,abcd,gc,hd->efgh
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 8.000e+08
Optimized FLOP count: 8.000e+05
Theoretical speedup: 1000.000
Largest intermediate: 1.000e+04 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
5 bcde,fb->cdef gc,hd,cdef->efgh
5 cdef,gc->defg hd,defg->efgh
5 defg,hd->efgh efgh->efgh
"""
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize', 'einsum_call']
unknown_kwargs = [k for (k, v) in kwargs.items() if k
not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs:"
" %s" % unknown_kwargs)
# Figure out what the path really is
path_type = kwargs.pop('optimize', False)
if path_type is True:
path_type = 'greedy'
if path_type is None:
path_type = False
memory_limit = None
# No optimization or a named path algorithm
if (path_type is False) or isinstance(path_type, str):
pass
# Given an explicit path
elif len(path_type) and (path_type[0] == 'einsum_path'):
pass
# Path tuple with memory limit
elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
else:
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
einsum_call_arg = kwargs.pop("einsum_call", False)
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
subscripts = input_subscripts + '->' + output_subscript
# Build a few useful list and sets
input_list = input_subscripts.split(',')
input_sets = [set(x) for x in input_list]
output_set = set(output_subscript)
indices = set(input_subscripts.replace(',', ''))
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError("Einstein sum subscript %s does not contain the "
"correct number of indices for operand %d.",
input_subscripts[tnum], tnum)
for cnum, char in enumerate(term):
dim = sh[cnum]
if char in dimension_dict.keys():
if dimension_dict[char] != dim:
raise ValueError("Size of label '%s' for operand %d does "
"not match previous terms.", char, tnum)
else:
dimension_dict[char] = dim
# Compute size of each input array plus the output array
size_list = []
for term in input_list + [output_subscript]:
size_list.append(_compute_size_by_dict(term, dimension_dict))
max_size = max(size_list)
if memory_limit is None:
memory_arg = max_size
else:
memory_arg = memory_limit
# Compute naive cost
# This isnt quite right, need to look into exactly how einsum does this
naive_cost = _compute_size_by_dict(indices, dimension_dict)
indices_in_input = input_subscripts.replace(',', '')
mult = max(len(input_list) - 1, 1)
if (len(indices_in_input) - len(set(indices_in_input))):
mult *= 2
naive_cost *= mult
# Compute the path
if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
# Maximum memory should be at most out_size for this algorithm
memory_arg = min(memory_arg, max_size)
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type[0] == 'einsum_path':
path = path_type[1:]
else:
raise KeyError("Path name %s not found", path_type)
cost_list, scale_list, size_list, contraction_list = [], [], [], []
# Build contraction tuple (positions, gemm, einsum_str, remaining)
for cnum, contract_inds in enumerate(path):
# Make sure we remove inds from right to left
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
contract = _find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
cost = _compute_size_by_dict(idx_contract, dimension_dict)
if idx_removed:
cost *= 2
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
else:
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:])
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
if einsum_call_arg:
return (operands, contraction_list)
# Return the path along with a nice string representation
overall_contraction = input_subscripts + "->" + output_subscript
header = ("scaling", "current", "remaining")
speedup = naive_cost / opt_cost
max_i = max(size_list)
path_print = " Complete contraction: %s\n" % overall_contraction
path_print += " Naive scaling: %d\n" % len(indices)
path_print += " Optimized scaling: %d\n" % max(scale_list)
path_print += " Naive FLOP count: %.3e\n" % naive_cost
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
path_print += " Theoretical speedup: %3.3f\n" % speedup
path_print += " Largest intermediate: %.3e elements\n" % max_i
path_print += "-" * 74 + "\n"
path_print += "%6s %24s %40s\n" % header
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
path = ['einsum_path'] + path
return (path, path_print)
# Rewrite einsum to handle different cases
def einsum(*operands, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : {ndarray, None}, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Default is False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as ``np.swapaxes(a, 0, 2)`` and
``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
.. versionadded:: 1.12.0
Added the ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
See ``np.einsum_path`` for more details.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, C)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> # since version 1.10.0
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
# Grab non-einsum kwargs
optimize_arg = kwargs.pop('optimize', False)
# If no optimization, run pure einsum
if optimize_arg is False:
return c_einsum(*operands, **kwargs)
valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
einsum_kwargs = {k: v for (k, v) in kwargs.items() if
k in valid_einsum_kwargs}
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs
unknown_kwargs = [k for (k, v) in kwargs.items() if
k not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
# Special handeling if out is specified
specified_out = False
out_array = einsum_kwargs.pop('out', None)
if out_array is not None:
specified_out = True
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
einsum_call=True)
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining = contraction
tmp_operands = []
for x in inds:
tmp_operands.append(operands.pop(x))
# If out was specified
if specified_out and ((num + 1) == len(contraction_list)):
einsum_kwargs["out"] = out_array
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
# Append new items and derefernce what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
return out_array
else:
return operands[0]
| mit |
mkim48/TensorDB | tensordb_pkg/algorithms/tucker.py | 2 | 11252 | #!/usr/bin/python
#
# tucker.py runs Tucker Decomposition. Given the input tensor, perform the tucker decomposition. The Tucker decomposition takes HOSVD of the input tensor as the initial factor matrices and core. HOSVD of the input tensor is obtained by running dta_full.py without an old tensor.
#
# Parameters
# - tensor_name: input tensor
# - tensor_size: input tensor size, e.g., 1000,1000,1000 for a tensor of size 1000x1000x1000
# - chunk_size: chunk size for the input tensor, e.g., 100,100,100 for a chunk size 100x100x100
# - rank: target ranks for each mode, e.g., 2,3,4
# - max_iter (optional): the maximum iteration count, default value: 50
# - debug (optional): 1 for debugging mode (not running the operations but showing the commands), default value: 0
#
# Output
# - The outputs include factor matrices and a core. The factor matrices are <tensor_name>_fac_0, <tensor_name>_fac_1, ..., and the core is <tensor_name>_core.
#
# Example
# tucker.py tensor1 1000,1000,1000 100,100,100 2,3,4
#
# - The example takes a 3-mode tensor of size 1000x1000x1000 with the chunk size 100x100x100 and decompose the tensor. The output of the example includes 3 factor matrices (tensor1_fac_0, tensor1_fac_1, and tensor1_fac_2) and a core (tensor1_core).
#
# BEGIN_COPYRIGHT
#
# This is the TensorDB, 2014.
# Reference:
# Mijung Kim (2014). TensorDB and Tensor-based Relational Model (TRM) for Efficient Tensor-Relational Operations. Ph.D. Thesis. Arizona State University.
#
# This file is part of SciDB.
# Copyright (C) 2008-2011 SciDB, Inc.
#
# SciDB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SciDB is distributed "AS-IS" AND WITHOUT ANY WARRANTY OF ANY KIND,
# INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,
# NON-INFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for the complete license terms.
#
# You should have received a copy of the GNU General Public License
# along with SciDB. If not, see <http://www.gnu.org/licenses/>.
# END_COPYRIGHT
import csv
import os
import sys
import traceback
import datetime
import math
sys.path.append(os.getcwd()) # NOCHECKIN
sys.path.append('/opt/scidb/12.12' + '/lib')
import scidbapi as scidb
try:
db = scidb.connect("localhost", 1239)
except Exception, inst:
handleException(inst, True, op="connecting")
def handleException(inst, exitWhenDone, op=None):
traceback.print_exc()
if op:
print >> sys.stderr, "Exception while ", op
print >> sys.stderr, " Exception Type: %s" % type(inst) # the exception instance
print >> sys.stderr, " Exception Value: %r" % inst
print >> sys.stderr, ""
if(exitWhenDone):
exit(2)
def main():
argcnt = len(sys.argv)
if argcnt < 5:
print "tucker.py <tensor_name> <tensor_size> <chunk_size> <rank> <max_iter> <debug>"
exit(2)
start = datetime.datetime.now();
tensor=sys.argv[1]
facmat = tensor + '_fac_'
core = tensor + '_core'
ztensor = tensor + '_z'
tmp = tensor + '_tmpp'
ttm = tensor + '_ttm'
if argcnt >= 6:
max_iter=int(sys.argv[5])
else:
max_iter=50
debug = 0
if argcnt == 7:
debug = 1
print 'debug='+str(debug)
if debug == 0:
resfile = 'res_' + tensor
f = open(resfile,'a')
msg = 'tucker start'
f.write(msg+'\n')
facsz=sys.argv[2].split(',')
facchunksz=sys.argv[3].split(',')
rank=sys.argv[4].split(',')
nmode=len(facsz)
fit = 0
fitchangetol = 1e-4
for r in range(max_iter):
fitold = fit
#compute z=ttm(tensor,facmat,-mode)
for m in range(nmode):
if m == 0:
prod_sz = 1
prod_chunksz = 1
for i in range(0,nmode):
if i == 1:
continue
prod_sz = prod_sz * int(facsz[i])
prod_chunksz = prod_chunksz * int(facchunksz[i])
query = "store(reshape("+tensor+",<val:double>[i=1:"+str(facsz[1])+","+str(facchunksz[1])+",0,j=1:"+str(prod_sz)+","+str(prod_chunksz)+",0]),"+ttm+str(m)+")"
if debug == 0:
print query
result=db.executeQuery(query,"afl")
db.completeQuery(result.queryID)
f.write(query+'\n');
else:
print query
else:
prod_sz = 1
prod_chunksz = 1
for i in range(1,nmode):
prod_sz = prod_sz * int(facsz[i])
prod_chunksz = prod_chunksz * int(facchunksz[i])
query = "store(reshape("+tensor+",<val:double>[i=1:"+str(facsz[0])+","+str(facchunksz[0])+",0,j=1:"+str(prod_sz)+","+str(prod_chunksz)+",0]),"+ttm+str(m)+")"
print '#1'
print prod_sz
if debug == 0:
print query
result=db.executeQuery(query,"afl")
db.completeQuery(result.queryID)
f.write(query+'\n');
else:
print query
query = ttm+str(m)
for i in range(nmode):
if m == i:
continue
if m != nmode and i == nmode:
query = "multiply_row(transpose("+query+"),"+facmat+str(i)+")"
else:
#next = i+1
#if m == i+1:
# next = i+2
#if next != nmode:
prod_sz = prod_sz / int(facsz[i])
prod_chunksz = prod_chunksz / int(facchunksz[i])
str_idx1 = "i=1:"+str(facsz[i])+","+str(facchunksz[i])+",0"
#prod_sz = 1
#prod_chunksz = 1
#for j in range(next+1,nmode):
#for j in range(0,next):
prod_sz = prod_sz * int(rank[i])
prod_chunksz = prod_chunksz * int(rank[i])
print prod_sz
str_idx2 = "j=1:"+str(prod_sz)+","+str(prod_chunksz)+",0"
#else:
# str_idx1 = "i=1:"+str(facsz[i])+","+str(facchunksz[i])+",0"
query = "reshape(multiply_row(transpose("+query+"),"+facmat+str(i)+"),<val:double>["+str_idx1+","+str_idx2+"])"
query="store("+query+","+ttm+str(m)+str(i)+")";
if debug == 0:
print query
result=db.executeQuery(query,"afl")
db.completeQuery(result.queryID)
f.write(query+'\n');
else:
print query
query = ttm+str(m)+str(i)
# compute ztensor
query="store(multiply_row("+query+","+query+"),"+ztensor+str(m)+")";
if debug == 0:
print query
result=db.executeQuery(query,"afl")
db.completeQuery(result.queryID)
f.write(query+'\n');
else:
print query
#remove factor matrices (the chunk size is same as the tensor size)
for i in range(nmode):
query="remove("+facmat+str(i)+")"
if debug == 0:
print query
f.write(query+'\n')
try:
result=db.executeQuery(query,"afl")
db.completeQuery(result.queryID)
except Exception, inst:
print >> sys.stderr, " Exception Type: %s" % type(inst) # the exception instance
print >> sys.stderr, " Exception Value: %r" % inst
else:
print query
#create factor matrices (the chunk size is same as the tensor size)
for i in range(nmode):
query="create array "+facmat+str(i)+"<val:double>[i=1:"+str(rank[i])+","+str(rank[i])+",0,j=1:"+str(facsz[i])+","+str(facchunksz[i])+",0]"
if debug == 0:
print query
f.write(query+'\n')
try:
result=db.executeQuery(query,"aql")
db.completeQuery(result.queryID)
except Exception, inst:
print >> sys.stderr, " Exception Type: %s" % type(inst) # the exception instance
print >> sys.stderr, " Exception Value: %r" % inst
else:
print query
#compute factor matrices
for i in range(nmode):
if facsz[i]==facchunksz[i]:
query="copyArray(eigen("+ztensor+str(i)+","+rank[i]+"),"+facmat+str(i)+")"
else:
query = "repart("+ztensor+str(i)+",<val:double>[i=1:"+str(facsz[i])+","+str(facsz[i])+",0,j=1:"+str(facsz[i])+","+str(facsz[i])+",0])"
query="repart(eigen("+query+","+rank[i]+"),<val:double>[i=1:"+str(facsz[i])+","+str(facsz[i])+",0,j=1:"+str(facsz[i])+","+str(facchunksz[i])+",0])"
query="copyArray("+query+","+facmat+str(i)+")"
if debug == 0:
print query
result=db.executeQuery(query,"afl")
db.completeQuery(result.queryID)
f.write(query+'\n')
else:
print query
#compute core=ttm(tensor,facmat)
prod_sz = 1
prod_chunksz = 1
for i in range(1,nmode):
prod_sz = prod_sz * int(facsz[i])
prod_chunksz = prod_chunksz * int(facchunksz[i])
query = "store(reshape("+tensor+",<val:double>[i=1:"+str(facsz[0])+","+str(facchunksz[0])+",0,j=1:"+str(prod_sz)+","+str(prod_chunksz)+",0]),"+tmp+str(0)+")"
if debug == 0:
print query
result=db.executeQuery(query,"afl")
db.completeQuery(result.queryID)
f.write(query+'\n');
else:
print query
query = tmp+str(0)
for i in range(1,nmode):
print i
str_idx1 = "i=1:"+str(facsz[i])+","+str(facchunksz[i])+",0"
prod_sz = 1
prod_chunksz = 1
for j in range(i+1,nmode):
prod_sz = prod_sz * int(facsz[j])
prod_chunksz = prod_chunksz * int(facchunksz[j])
for j in range(0,i):
prod_sz = prod_sz * int(rank[j])
prod_chunksz = prod_chunksz * int(rank[j])
str_idx2 = "j=1:"+str(prod_sz)+","+str(prod_chunksz)+",0"
query = "reshape(multiply_row(transpose("+query+"),"+facmat+str(i-1)+"),<val:double>["+str_idx1+","+str_idx2+"])"
query="store("+query+","+tmp+str(i)+")";
if debug == 0:
print query
result=db.executeQuery(query,"afl")
db.completeQuery(result.queryID)
f.write(query+'\n');
else:
print query
query = tmp+str(i)
#core
str_idx = "i0=1:"+str(rank[0])+","+str(rank[0])+",0"
for j in range(1,nmode):
str_idx = str_idx + ",i"+str(j)+"=1:"+str(rank[j])+","+str(rank[j])+",0"
query = "reshape(multiply_row(transpose("+query+"),"+facmat+str(nmode-1)+"),<val:double>["+str_idx+"])"
query="store("+query+","+core+")";
if debug == 0:
print query
result=db.executeQuery(query,"afl")
db.completeQuery(result.queryID)
f.write(query+'\n');
else:
print query
#compute norm^2 for input tensor
query="select sum(pow(val,2)) from " + tensor
if debug == 0:
print query
f.write(query+'\n')
result=db.executeQuery(query,"aql")
dataitem = result.array.getConstIterator(0).getChunk().getConstIterator().getItem()
attrs = result.array.getArrayDesc().getAttributes()
normx = scidb.getTypedValue(dataitem, attrs[0].getType())
db.completeQuery(result.queryID)
else:
print query
#compute norm^2 for core
query="select sum(pow(val,2)) from " + core
if debug == 0:
print query
f.write(query+'\n')
result=db.executeQuery(query,"aql")
dataitem = result.array.getConstIterator(0).getChunk().getConstIterator().getItem()
attrs = result.array.getArrayDesc().getAttributes()
normcore = scidb.getTypedValue(dataitem, attrs[0].getType())
db.completeQuery(result.queryID)
else:
print query
# frobenius norm of x - x^
if debug == 0:
f.write("normx="+str(normx)+"\n")
f.write("normcore="+str(normcore)+"\n")
norm_residual = math.sqrt(normx - normcore)
f.write("norm_residual="+str(norm_residual)+"\n")
fit = 1-norm_residual/math.sqrt(normx)
f.write("fit="+str(fit)+'\n')
end = datetime.datetime.now();
minutes, seconds = divmod((end-start).seconds, 60)
microsec = (end-start).microseconds
if debug == 0:
msg = 'tucker Time elapsed: %d min %d.%d sec.' % (minutes, seconds, microsec)
f.write(msg+'\n\n')
f.close()
#Disconnect from the SciDB server.
db.disconnect()
sys.exit(0) #success
if __name__ == "__main__":
main()
| agpl-3.0 |
BinPy/BinPy | BinPy/examples/source/ic/Series_4000/IC4002.py | 5 | 1249 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# Usage of IC 4002
# <codecell>
from __future__ import print_function
from BinPy import *
# <codecell>
# Usage of IC 4002:
ic = IC_4002()
print(ic.__doc__)
# <codecell>
# The Pin configuration is:
inp = {2: 0, 3: 0, 4: 0, 5: 0, 7: 0, 9: 1, 10: 1, 11: 1, 12: 1, 14: 1}
# Pin initinalization
# Powering up the IC - using -- ic.setIC({14: 1, 7: 0})
ic.setIC({14: 1, 7: 0})
# Setting the inputs of the ic
ic.setIC(inp)
# Draw the IC with the current configuration\n
ic.drawIC()
# <codecell>
# Run the IC with the current configuration using -- print ic.run() --
# Note that the ic.run() returns a dict of pin configuration similar to
print (ic.run())
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --\n
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# Run the IC
print (ic.run())
# <codecell>
# Connector Outputs
c = Connector()
# Set the output connector to a particular pin of the ic
ic.setOutput(13, c)
print(c)
| bsd-3-clause |
epidataio/epidata-community | ipython/home/tutorials/sensor_data_query.py | 1 | 5017 | ############################
# Import Required Modules #
############################
import argparse
import base64
from datetime import datetime, timedelta
import httplib
import json
import numpy as np
from pytz import UTC, timezone
import random
from decimal import Decimal
import struct
import time
from time import sleep
import urllib
import urllib2
##################################
# Define Variables and Functions #
##################################
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--host')
args = arg_parser.parse_args()
HOST = args.host or '127.0.0.1'
AUTHENTICATION_URL = 'https://' + HOST + '/authenticate/app'
AUTHENTICATION_ROUTE = '/authenticate/app'
QUERY_MEASUREMENTS_ORIGINAL_URL = 'https://' + HOST + '/measurements_original?'
QUERY_MEASUREMENTS_CLEANSED_URL = 'https://' + HOST + '/measurements_cleansed?'
QUERY_MEASUREMENTS_SUMMARY_URL = 'https://' + HOST + '/measurements_summary?'
def get_time(time_string):
date_object = datetime.strptime(time_string, '%m/%d/%Y %H:%M:%S.%f')
return long(time.mktime(date_object.timetuple())
* 1e3 + date_object.microsecond / 1e3)
def add_time(time_string, delta):
date_object = datetime.strptime(
time_string, '%m/%d/%Y %H:%M:%S.%f') + timedelta(seconds=delta)
return long(time.mktime(date_object.timetuple())
* 1e3 + date_object.microsecond / 1e3)
current_time_string = datetime.now().strftime("%m/%d/%Y %H:%M:%S.%f")
current_time = get_time(current_time_string)
#####################
# EDIT THIS SECTION #
#####################
# Replace quoted string with API Token or GitHub Personal Access Token
# (REQUIRED)
ACCESS_TOKEN = 'API Token'
# Modify default values (OPTIONAL)
COMPANY = 'EpiData'
SITE = 'San_Jose'
STATION = 'WSN-1'
SENSOR = "Temperature_Probe"
#########################
# SKIP SSL VERIFICATION #
#########################
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
#############################
# Authenticate with EpiData #
#############################
conn = httplib.HTTPSConnection(HOST)
# Authentication is achieved by posting to the AUTHENTICATION_URL.
url = AUTHENTICATION_URL
# An HTTP POST with JSON content requires the HTTP Content-type header.
json_header = {'Content-type': 'application/json'}
# The access token is povided via JSON.
json_body = json.dumps({'accessToken': ACCESS_TOKEN})
# Send the POST request and receive the HTTP response.
conn.request('POST', AUTHENTICATION_ROUTE, json_body, json_header)
post_response = conn.getresponse()
response_status = post_response.status
response_text = post_response.read()
# Check that the response's HTTP response code is 200 (OK).
assert response_status == 200
# Parse the JSON response.
response_json = json.loads(response_text)
# Retrieve the new session id from the JSON response.
session_id = response_json['sessionId']
# Construct the session cookie.
session_cookie = 'epidata=' + session_id
###########################################
# Query Data from EpiData in a While Loop #
###########################################
print "Sending Query Request to EpiData ..."
iteration = 0
while (True):
try:
# Create instances that connect to the server
conn = httplib.HTTPSConnection(HOST)
# Specify measurement query parameters
begin_time = get_time("8/1/2017 00:00:00.000")
end_time = get_time("9/1/2017 00:00:00.000")
parameters = {
'company': COMPANY,
'site': SITE,
'station': STATION,
'sensor': SENSOR,
'beginTime': begin_time,
'endTime': end_time}
# Construct url with parameters
url = QUERY_MEASUREMENTS_ORIGINAL_URL + urllib.urlencode(parameters)
print url
json_header = {'Cookie': session_cookie, 'Accept': 'text/plain'}
# Send the GET request and receive the HTTP response.
conn.request('GET', url, "", json_header)
get_response = conn.getresponse()
response_status = get_response.status
response_text = get_response.read()
print response_status, response_text
# Check that the response's HTTP response code is 200 (OK) and read the
# response.
assert response_status == 200
response_json = json.loads(response_text)
print response_json
# increment iteration and current time
iteration += 1
# Exit the while loop
break
# Handle keyboard interrupt
except (KeyboardInterrupt, SystemExit):
print '\n...Program Stopped Manually!'
raise
break
################################
# End of Data Query Script #
################################
| apache-2.0 |
khink/xhtml2pdf | xhtml2pdf/w3c/cssDOMElementInterface.py | 79 | 4201 | #!/usr/bin/env python
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~ Copyright (C) 2002-2004 TechGame Networks, LLC.
##~
##~ This library is free software; you can redistribute it and/or
##~ modify it under the terms of the BSD style License as found in the
##~ LICENSE file included with this distribution.
##
## Modified by Dirk Holtwick <[email protected]>, 2007-2008
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import css
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Definitions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSDOMElementInterface(css.CSSElementInterfaceAbstract):
"""An implementation of css.CSSElementInterfaceAbstract for xml.dom Element Nodes"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Constants / Variables / Etc.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
style = None
_pseudoStateHandlerLookup = {
'first-child':
lambda self: not bool(self.getPreviousSibling()),
'not-first-child':
lambda self: bool(self.getPreviousSibling()),
'last-child':
lambda self: not bool(self.getNextSibling()),
'not-last-child':
lambda self: bool(self.getNextSibling()),
'middle-child':
lambda self: not bool(self.getPreviousSibling()) and not bool(self.getNextSibling()),
'not-middle-child':
lambda self: bool(self.getPreviousSibling()) or bool(self.getNextSibling()),
# XXX 'first-line':
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Definitions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def __init__(self, domElement, cssParser=None):
self.domElement = domElement
# print self.domElement.attributes
if cssParser is not None:
self.onCSSParserVisit(cssParser)
def onCSSParserVisit(self, cssParser):
styleSrc = self.getStyleAttr()
if styleSrc:
style = cssParser.parseInline(styleSrc)
self.setInlineStyle(style)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def matchesNode(self, (namespace, tagName)):
if tagName not in ('*', self.domElement.tagName):
return False
if namespace in (None, '', '*'):
# matches any namespace
return True
else: # full compare
return namespace == self.domElement.namespaceURI
def getAttr(self, name, default=NotImplemented):
attrValue = self.domElement.attributes.get(name)
if attrValue is not None:
return attrValue.value
else:
return default
def getIdAttr(self):
return self.getAttr('id', '')
def getClassAttr(self):
return self.getAttr('class', '')
def getStyleAttr(self):
return self.getAttr('style', None)
def inPseudoState(self, name, params=()):
handler = self._pseudoStateHandlerLookup.get(name, lambda self: False)
return handler(self)
def iterXMLParents(self, includeSelf=False):
klass = self.__class__
current = self.domElement
if not includeSelf:
current = current.parentNode
while (current is not None) and (current.nodeType == current.ELEMENT_NODE):
yield klass(current)
current = current.parentNode
def getPreviousSibling(self):
sibling = self.domElement.previousSibling
while sibling:
if sibling.nodeType == sibling.ELEMENT_NODE:
return sibling
else:
sibling = sibling.previousSibling
return None
def getNextSibling(self):
sibling = self.domElement.nextSibling
while sibling:
if sibling.nodeType == sibling.ELEMENT_NODE:
return sibling
else:
sibling = sibling.nextSibling
return None
def getInlineStyle(self):
return self.style
def setInlineStyle(self, style):
self.style = style
| apache-2.0 |
unt-libraries/coda | coda/coda_mdstore/tests/test_urls.py | 1 | 3321 | from django.contrib import sitemaps
from django.urls import resolve
from django.conf import settings
import pytest
from coda_mdstore import resourcesync
from coda_mdstore import views
def test_index():
assert resolve('/').func == views.index
def test_all_bags():
assert resolve('/bag/').func == views.all_bags
def test_app_bag_no_parameters():
assert resolve('/APP/bag/').func == views.app_bag
def test_app_with_parameters():
assert resolve('/APP/bag/ark:/%d/coda2/' % settings.ARK_NAAN).func == views.app_bag
def test_bagHTML():
assert resolve('/bag/ark:/%d/coda2/' % settings.ARK_NAAN).func == views.bagHTML
def test_bagURLList():
assert resolve('/bag/ark:/%d/coda2.urls' % settings.ARK_NAAN).func == views.bagURLList
def test_bag_zip_download():
assert resolve('/bag/ark:/%d/coda2.zip' % settings.ARK_NAAN).func == views.bagDownload
def test_bag_links():
assert resolve('/bag/ark:/%d/coda2/links/' % settings.ARK_NAAN).func == views.bagURLLinks
def test_bagProxy():
assert resolve('/bag/ark:/%d/foo/bar' % settings.ARK_NAAN).func == views.bagProxy
def test_stats():
assert resolve('/stats/').func == views.stats
def test_json_stats():
assert resolve('/stats.json').func == views.json_stats
def test_app_node():
assert resolve('/APP/node/').func == views.app_node
def test_app_node_with_identifier():
assert resolve('/APP/node/coda-123/').func == views.app_node
def test_showNodeStatus():
assert resolve('/node/').func == views.showNodeStatus
def test_showNodeStatus_with_identifier():
assert resolve('/node/coda-123/').func == views.showNodeStatus
def test_externalIdentifierSearch_with_identifier():
url = resolve('/extidentifier/test_value/')
assert url.func == views.externalIdentifierSearch
def test_externalIdentifierSearch():
url = resolve('/extidentifier/')
assert url.func == views.externalIdentifierSearch
def test_externalIdentifierSearchJSON():
url = resolve('/extidentifier.json')
assert url.func == views.externalIdentifierSearchJSON
def test_bagFullTextSearchHTML():
url = resolve('/search/')
assert url.func == views.bagFullTextSearchHTML
def test_about():
url = resolve('/about/')
assert url.func == views.about
def test_robots():
url = resolve('/robots.txt')
assert url.func == views.shooRobot
def test_feed():
assert resolve('/feed/').func.__class__ == views.AtomSiteNewsFeed
@pytest.mark.django_db
def test_resourceindex(client):
assert resolve('/resourceindex.xml').func == sitemaps.views.index
# Verify correct arguments are being passed in urls.py.
assert client.get('/resourceindex.xml').status_code == 200
@pytest.mark.django_db
def test_resourcelist_section(client):
assert resolve('/resourcelist-001.xml').func == sitemaps.views.sitemap
# Verify correct arguments are being passed in urls.py.
assert client.get('/resourcelist-001.xml').status_code == 200
@pytest.mark.django_db
def test_changelist(client):
assert resolve('/changelist.xml').func == resourcesync.changelist
# Verify correct arguments are being passed in urls.py.
assert client.get('/changelist.xml').status_code == 200
def test_capabilitylist():
assert resolve('/capabilitylist.xml').func == resourcesync.capabilitylist
| bsd-3-clause |
eeshangarg/oh-mainline | vendor/packages/Django/django/contrib/auth/tests/models.py | 94 | 5106 | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import (Group, User, SiteProfileNotAvailable,
UserManager)
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
@skipIfCustomUser
@override_settings(USE_TZ=False, AUTH_PROFILE_MODULE='')
class ProfileTestCase(TestCase):
def test_site_profile_not_available(self):
user = User.objects.create(username='testclient')
# calling get_profile without AUTH_PROFILE_MODULE set
del settings.AUTH_PROFILE_MODULE
with six.assertRaisesRegex(self, SiteProfileNotAvailable,
"You need to set AUTH_PROFILE_MODULE in your project"):
user.get_profile()
# Bad syntax in AUTH_PROFILE_MODULE:
settings.AUTH_PROFILE_MODULE = 'foobar'
with six.assertRaisesRegex(self, SiteProfileNotAvailable,
"app_label and model_name should be separated by a dot"):
user.get_profile()
# module that doesn't exist
settings.AUTH_PROFILE_MODULE = 'foo.bar'
with six.assertRaisesRegex(self, SiteProfileNotAvailable,
"Unable to load the profile model"):
user.get_profile()
@skipIfCustomUser
@override_settings(USE_TZ=False)
class NaturalKeysTestCase(TestCase):
fixtures = ['authtestdata.json']
def test_user_natural_key(self):
staff_user = User.objects.get(username='staff')
self.assertEqual(User.objects.get_by_natural_key('staff'), staff_user)
self.assertEqual(staff_user.natural_key(), ('staff',))
def test_group_natural_key(self):
users_group = Group.objects.create(name='users')
self.assertEqual(Group.objects.get_by_natural_key('users'), users_group)
@skipIfCustomUser
@override_settings(USE_TZ=False)
class LoadDataWithoutNaturalKeysTestCase(TestCase):
fixtures = ['regular.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEqual(group, user.groups.get())
@skipIfCustomUser
@override_settings(USE_TZ=False)
class LoadDataWithNaturalKeysTestCase(TestCase):
fixtures = ['natural.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEqual(group, user.groups.get())
@skipIfCustomUser
class UserManagerTestCase(TestCase):
def test_create_user(self):
email_lowercase = '[email protected]'
user = User.objects.create_user('user', email_lowercase)
self.assertEqual(user.email, email_lowercase)
self.assertEqual(user.username, 'user')
self.assertEqual(user.password, '!')
def test_create_user_email_domain_normalize_rfc3696(self):
# According to http://tools.ietf.org/html/rfc3696#section-3
# the "@" symbol can be part of the local part of an email address
returned = UserManager.normalize_email(r'Abc\@[email protected]')
self.assertEqual(returned, r'Abc\@[email protected]')
def test_create_user_email_domain_normalize(self):
returned = UserManager.normalize_email('[email protected]')
self.assertEqual(returned, '[email protected]')
def test_create_user_email_domain_normalize_with_whitespace(self):
returned = UserManager.normalize_email('email\ [email protected]')
self.assertEqual(returned, 'email\ [email protected]')
def test_empty_username(self):
self.assertRaisesMessage(ValueError,
'The given username must be set',
User.objects.create_user, username='')
class IsActiveTestCase(TestCase):
"""
Tests the behavior of the guaranteed is_active attribute
"""
@skipIfCustomUser
def test_builtin_user_isactive(self):
user = User.objects.create(username='foo', email='[email protected]')
# is_active is true by default
self.assertEqual(user.is_active, True)
user.is_active = False
user.save()
user_fetched = User.objects.get(pk=user.pk)
# the is_active flag is saved
self.assertFalse(user_fetched.is_active)
@override_settings(AUTH_USER_MODEL='auth.IsActiveTestUser1')
def test_is_active_field_default(self):
"""
tests that the default value for is_active is provided
"""
UserModel = get_user_model()
user = UserModel(username='foo')
self.assertEqual(user.is_active, True)
# you can set the attribute - but it will not save
user.is_active = False
# there should be no problem saving - but the attribute is not saved
user.save()
user_fetched = UserModel._default_manager.get(pk=user.pk)
# the attribute is always true for newly retrieved instance
self.assertEqual(user_fetched.is_active, True)
| agpl-3.0 |
Donkyhotay/MoonPy | zope/app/publisher/browser/tests/testi18nfileresource.py | 1 | 5545 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""I18n File-Resource Tests
$Id: testi18nfileresource.py 29143 2005-02-14 22:43:16Z srichter $
"""
from unittest import main, makeSuite
import os
from zope.publisher.interfaces import NotFound
from zope.app.testing.placelesssetup import PlacelessSetup
from zope.app.testing import ztapi
from zope.i18n.interfaces import IUserPreferredCharsets, IUserPreferredLanguages
from zope.publisher.http import IHTTPRequest, HTTPCharsets
from zope.publisher.browser import BrowserLanguages, TestRequest
from zope.app.publisher.browser.i18nfileresource import I18nFileResource
from zope.app.publisher.browser.i18nfileresource import I18nFileResourceFactory
from zope.app.publisher.fileresource import File
import zope.app.publisher.browser.tests as p
from zope.i18n.interfaces import INegotiator
from zope.i18n.negotiator import negotiator
from zope.i18n.tests.testii18naware import TestII18nAware
test_directory = os.path.dirname(p.__file__)
class Test(PlacelessSetup, TestII18nAware):
def setUp(self):
super(Test, self).setUp()
TestII18nAware.setUp(self)
ztapi.provideAdapter(IHTTPRequest, IUserPreferredCharsets,
HTTPCharsets)
ztapi.provideAdapter(IHTTPRequest, IUserPreferredLanguages,
BrowserLanguages)
# Setup the negotiator utility
ztapi.provideUtility(INegotiator, negotiator)
def _createObject(self):
obj = I18nFileResource({'en':None, 'lt':None, 'fr':None},
TestRequest(), 'fr')
return obj
def _createDict(self, filename1='test.pt', filename2='test2.pt'):
path1 = os.path.join(test_directory, 'testfiles', filename1)
path2 = os.path.join(test_directory, 'testfiles', filename2)
return { 'en': File(path1, filename1),
'fr': File(path2, filename2) }
def testNoTraversal(self):
resource = I18nFileResourceFactory(self._createDict(), 'en')\
(TestRequest())
self.assertRaises(NotFound,
resource.publishTraverse,
resource.request,
'_testData')
def testFileGET(self):
# case 1: no language preference, should get en
path = os.path.join(test_directory, 'testfiles', 'test.txt')
resource = I18nFileResourceFactory(self._createDict('test.txt'), 'en')\
(TestRequest())
self.assertEqual(resource.GET(), open(path, 'rb').read())
response = resource.request.response
self.assertEqual(response.getHeader('Content-Type'), 'text/plain')
# case 2: prefer lt, have only en and fr, should get en
resource = I18nFileResourceFactory(
self._createDict('test.txt'), 'en')\
(TestRequest(HTTP_ACCEPT_LANGUAGE='lt'))
self.assertEqual(resource.GET(), open(path, 'rb').read())
response = resource.request.response
self.assertEqual(response.getHeader('Content-Type'), 'text/plain')
# case 3: prefer fr, have it, should get fr
path = os.path.join(test_directory, 'testfiles', 'test2.pt')
resource = I18nFileResourceFactory(
self._createDict('test.pt', 'test2.pt'), 'en')\
(TestRequest(HTTP_ACCEPT_LANGUAGE='fr'))
self.assertEqual(resource.GET(), open(path, 'rb').read())
response = resource.request.response
self.assertEqual(response.getHeader('Content-Type'), 'text/html')
def testFileHEAD(self):
# case 1: no language preference, should get en
resource = I18nFileResourceFactory(self._createDict('test.txt'), 'en')\
(TestRequest())
self.assertEqual(resource.HEAD(), '')
response = resource.request.response
self.assertEqual(response.getHeader('Content-Type'), 'text/plain')
# case 2: prefer lt, have only en and fr, should get en
resource = I18nFileResourceFactory(
self._createDict('test.txt'), 'en')\
(TestRequest(HTTP_ACCEPT_LANGUAGE='lt'))
self.assertEqual(resource.HEAD(), '')
response = resource.request.response
self.assertEqual(response.getHeader('Content-Type'), 'text/plain')
# case 3: prefer fr, have it, should get fr
resource = I18nFileResourceFactory(
self._createDict('test.pt', 'test2.pt'), 'en')\
(TestRequest(HTTP_ACCEPT_LANGUAGE='fr'))
self.assertEqual(resource.HEAD(), '')
response = resource.request.response
self.assertEqual(response.getHeader('Content-Type'), 'text/html')
def test_suite():
return makeSuite(Test)
if __name__=='__main__':
main(defaultTest='test_suite')
| gpl-3.0 |
irblsensitivity/irblsensitivity | scripts/analysis/MWU_Project_EMSE.py | 1 | 9231 | #-*- coding: utf-8 -*-
'''
Created on 2017. 02. 12
Updated on 2017. 02. 12
'''
from __future__ import print_function
import os
import re
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from scipy.stats import mannwhitneyu, pearsonr
from ExpBase import ExpBase
import numpy as np
from commons import Subjects
class MWUTest(ExpBase):
techniques = ['BugLocator', 'BRTracer', 'BLUiR', 'AmaLgam', 'BLIA', 'Locus']
validDigits = {
'AvgLOC': 2, 'InvNSrc': 4, 'AvgCC': 4, 'SrcAvgDistTk': 2, 'SrcAvgNTk': 2, 'SrcRatioDict': 4, 'NSrc': 2, 'SrcNumCmt': 4, 'SrcNDistTk': 0, 'SrcLocalDistTk': 3, 'SrcRatioCmt': 4, 'SrcNumMhd': 4, 'RatioEnum': 4,
'RepAvgTk': 2, 'NReport': 0, 'RepNDistTk': 0, 'RepAvgDistTk': 3, 'RepAvgLocalTk':4, 'RepAvgCE': 4, 'RatioCode': 4, 'RatioSTrace': 4, '|STinterRT|': 0,
'AvgMinIRf': 4, 'AvgMaxIRf': 4, 'AvgMeanIRf': 4, 'KSDist': 4, 'AvgUIRf': 4, 'AvgProdIRf': 4, 'hasCE': 4,
'hasSTrace': 4, 'hasCR': 4, 'hasEnum': 4,
'NTk':2, 'NDistTk':3, 'NLocalTk':4, 'NDistCE':3
}
featureorders = {
'01': ['AvgLOC', 'AvgCC', 'SrcAvgNTk', 'SrcAvgDistTk', 'SrcLocalDistTk', 'SrcNDistTk', 'NSrc', 'InvNSrc',
'SrcNumMhd',
'SrcNumCmt', 'SrcRatioCmt', 'SrcRatioDict'],
'02': ['RatioEnum', 'RatioSTrace', 'RatioCode', 'RepNDistTk', 'RepAvgTk', 'RepAvgDistTk', 'RepAvgLocalTk', 'RepAvgCE',
'NReport'],
'03': ['|STinterRT|', 'KSDist', 'AvgProdIRf', 'AvgMinIRf', 'AvgMaxIRf', 'AvgMeanIRf', 'AvgUIRf'],
'04': ['hasEnum', 'hasSTrace', 'hasCR', 'hasCE'],
'05': ['NTk', 'NDistTk', 'NLocalTk', 'NDistCE']
}
def MWUtest(self, _dataA, _dataB, _bugsA=None, _bugsB=None):
'''
Mann-Whitney U Test between IRBL technique results
:param _nameA: The results of Type A
:param _nameB: The results of Type B
:param _bugsA: the count of bugs for each techniques
:param _bugsB: the count of bugs for each techniques
:return: {technique : pvalue, techinique: pvalue, ...}
'''
results = {}
for idx in range(len(self.techniques)):
filteredDataA = [items[idx] for items in _dataA.values()]
filteredDataB = [items[idx] for items in _dataB.values()]
#filteredDataA, labels = self.get_array_items(_dataA, idx)
#filteredDataB, labels = self.get_array_items(_dataB, idx)
if _bugsA is not None:
if isinstance(_bugsA, dict) is True:
filteredDataA += ([0] * (_bugsA[self.techniques[idx]] - len(filteredDataA)))
else:
filteredDataA += ([0] * (_bugsA - len(filteredDataA)))
if _bugsB is not None:
if isinstance(_bugsB, dict) is True:
filteredDataB += ([0] * (_bugsB[self.techniques[idx]] - len(filteredDataB)))
else:
filteredDataB += ([0] * (_bugsB - len(filteredDataB)))
#slope, intercept, r_value, p_value, stderr = stats.linregress(dataMAP, dataFeature)
t_statistic, t_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='two-sided')
l_statistic, l_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='less')
g_statistic, g_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='greater')
pvalue = min(t_pvalue , l_pvalue, g_pvalue)
#statistic, pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='two-sided') # 'less', 'two-sided', 'greater'
results[self.techniques[idx]] = pvalue
return results
def get_technique_averages(self, _source, _counts):
'''
:param _source: project's bug results dict
:param _count: original bug counts for each technique
:return:
'''
results = {}
for idx in range(len(self.techniques)):
sumValue = 0
for itemID, item in _source.iteritems():
sumValue += item[idx]
results[self.techniques[idx]] = sumValue / float(_counts[self.techniques[idx]])
return results
def compare_single_results(self, _basepath):
'''
for Table 7 : single results
:param _basepath:
:return:
'''
techinques, CNTdata = self.load_results(os.path.join(_basepath, u'BugCNT.txt'), ['str'] * 2 + ['int'] * 6)
def get_averages(_itemType):
results = {}
for tData in ['Old', 'New_Single']:
filepath = os.path.join(_basepath, u'%s_%s.txt' % (tData, _itemType))
titles, data = self.load_results_items(filepath, ['str'] * 3 + ['float'] * 6)
for group in data:
if group not in results: results[group] = {}
for project in data[group]:
CNTs = dict(zip(titles, CNTdata[group][project]))
results[group][project] = self.get_technique_averages(data[group][project], CNTs)
return results
APresults = get_averages('AP')
TPresults = get_averages('TP')
features = self.extract_features(_basepath)
print(u'Technique Mann-Whitney U Test p-values')
print(u'\t' + u'\t\t'.join(self.techniques))
print(u'Subject\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR')
S = Subjects()
S.groups.append(u'Previous')
S.projects[u'Previous'] = [u'AspectJ', u'ZXing', u'PDE', u'JDT', u'SWT']
for group in S.groups:
for project in S.projects[group]:
text = u'%s' % project
APmax = self.techniques[0]
TPmax = self.techniques[0]
for tech in self.techniques:
if APresults[group][project][APmax] < APresults[group][project][tech]:
APmax = tech
if TPresults[group][project][TPmax] < TPresults[group][project][tech]:
TPmax = tech
for tech in self.techniques:
if APmax != tech: text += u' & %.4f' % APresults[group][project][tech]
else: text += u' & \\cellcolor{blue!25}\\textbf{%.4f}' % APresults[group][project][tech]
if TPmax != tech: text += u' & %.4f' % TPresults[group][project][tech]
else: text += u' & \\cellcolor{green!25}\\textbf{%.4f}' % TPresults[group][project][tech]
# if group in features:
# for fid in [u'RatioEnum', u'RatioSTrace', u'RatioCode', u'RepAvgTk']:
# text += u' & %.4f' % features[group][project][fid]
# text += u' \\\\'
# else:
# text += u' & & & & \\\\'
text += u' \\\\'
print(text)
pass
def compare_multi_results(self, _basepath):
'''
for Table 7 : single results
:param _basepath:
:return:
'''
techinques, CNTdata = self.load_results(os.path.join(_basepath, u'BugCNT.txt'), ['str'] * 2 + ['int'] * 6)
def get_average_mwu(_itemType):
results = {}
multi = os.path.join(_basepath, u'New_Multiple_%s.txt' % _itemType)
titles, dataM = self.load_results_items(multi, ['str'] * 3 + ['float'] * 6)
# MWUresults = {}
# single = os.path.join(_basepath, u'New_Single_%s.txt' % _itemType)
# titles, dataS = self.load_results_items(single, ['str'] * 3 + ['float'] * 6)
for group in dataM:
if group not in results: results[group] = {}
#if group not in MWUresults: MWUresults[group] = {}
for project in dataM[group]:
CNTs = dict(zip(titles, CNTdata[group][project]))
results[group][project] = self.get_technique_averages(dataM[group][project], CNTs)
#MWUresults[group][project] = self.MWUtest(dataS[group][project], dataM[group][project], CNTs, CNTs)
return results #, MWUresults
APresults = get_average_mwu('AP')
TPresults = get_average_mwu('TP')
print(u'')
print(u'\t' + u'\t\t'.join(self.techniques))
print(u'Subject\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR')
S = Subjects()
for group in S.groups:
for project in S.projects[group]:
text = u'%s' % project
APmax = self.techniques[0]
TPmax = self.techniques[0]
for tech in self.techniques:
if APresults[group][project][APmax] < APresults[group][project][tech]:
APmax = tech
if TPresults[group][project][TPmax] < TPresults[group][project][tech]:
TPmax = tech
for tech in self.techniques:
if APmax != tech: text += u' & %.4f' % APresults[group][project][tech]
else: text += u' & \\cellcolor{blue!25}\\textbf{%.4f}' % APresults[group][project][tech]
if TPmax != tech: text += u' & %.4f ' % TPresults[group][project][tech]
else: text += u' & \\cellcolor{green!25}\\textbf{%.4f} ' % TPresults[group][project][tech]
print(text, end=u'')
print(u' \\\\')
pass
def extract_features(self, _basepath):
titles, data = self.load_results(os.path.join(_basepath, u'02_PW_Bug_Features.txt'), ['str'] * 2 + ['int'] + ['float'] * 3 + ['int', 'float'] )
for group in data:
for project in data[group]:
item = data[group][project]
data[group][project] = dict(zip([u'RatioEnum', u'RatioSTrace', u'RatioCode', u'RepAvgTk'], [item[1], item[2], item[3], item[5]]))
return data
###############################################################################################################
###############################################################################################################
if __name__ == "__main__":
basepath = u'/mnt/exp/Bug/analysis/'
obj = MWUTest()
obj.compare_multi_results(basepath)
obj.compare_single_results(basepath)
# obj.compare_test(basepath)
#obj.calc_pearson(basepath)
#obj.compare_dup_results(basepath)
| apache-2.0 |
epam/DLab | infrastructure-provisioning/src/general/lib/aws/meta_lib.py | 1 | 37481 | # *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import boto3
from botocore.client import Config
import json, urllib2
import time
import logging
import traceback
import sys
import backoff
import random
import string
from dlab.fab import *
import actions_lib
def get_instance_hostname(tag_name, instance_name):
try:
public = ''
private = ''
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(
Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
public = getattr(instance, 'public_dns_name')
private = getattr(instance, 'private_dns_name')
if public:
return public
else:
return private
if public == '' and private == '':
raise Exception("Unable to find instance hostname with instance name: " + instance_name)
except Exception as err:
logging.error("Error with finding instance hostname with instance name: " + instance_name + " : " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with finding instance hostname", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_vpc_endpoints(vpc_id):
try:
# Returns LIST of Endpoint DICTIONARIES
ec2 = boto3.client('ec2')
endpoints = ec2.describe_vpc_endpoints(
Filters=[{
'Name': 'vpc-id',
'Values': [vpc_id]
}]
).get('VpcEndpoints')
return endpoints
except Exception as err:
logging.error("Error with getting VPC Endpoints: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting VPC Endpoints", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_route_tables(vpc, tags):
try:
ec2 = boto3.client('ec2')
tag_name = json.loads(tags).get('Key')
tag_value = json.loads(tags).get('Value')
rts = []
result = ec2.describe_route_tables(
Filters=[
{'Name': 'vpc-id', 'Values': [vpc]},
{'Name': 'tag-key', 'Values': [tag_name]},
{'Name': 'tag-value', 'Values': [tag_value]}
]
).get('RouteTables')
for i in result:
rts.append(i.get('RouteTableId'))
return rts
except Exception as err:
logging.error("Error with getting Route tables: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting Route tables", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_bucket_by_name(bucket_name):
try:
s3 = boto3.resource('s3', config=Config(signature_version='s3v4'))
for bucket in s3.buckets.all():
if bucket.name == bucket_name:
return bucket.name
return ''
except Exception as err:
logging.error("Error with getting bucket by name: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting bucket by name", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_instance_ip_address(tag_name, instance_name):
try:
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(
Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']}])
ips = {}
for instance in instances:
public = getattr(instance, 'public_ip_address')
private = getattr(instance, 'private_ip_address')
ips = {'Public': public, 'Private': private}
if ips == {}:
raise Exception("Unable to find instance IP addresses with instance name: " + instance_name)
return ips
except Exception as err:
logging.error("Error with getting ip address by name: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting ip address by name", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_instance_ip_address_by_id(instance_id):
try:
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(
Filters = [{'Name': 'instance-id', 'Values': [instance_id]},
{'Name': 'instance-state-name', 'Values': ['running']}])
ips = {}
for instance in instances:
public = getattr(instance, 'public_ip_address')
private = getattr(instance, 'private_ip_address')
ips = {'Public': public, 'Private': private}
if ips == {}:
raise Exception("Unable to find instance IP addresses with instance id: " + instance_id)
return ips
except Exception as err:
logging.error("Error with getting ip address by id: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting ip address by id", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_instance_private_ip_address(tag_name, instance_name):
try:
actions_lib.create_aws_config_files()
return get_instance_ip_address(tag_name, instance_name).get('Private')
except Exception as err:
logging.error("Error with getting private ip address by name: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting private ip address by name", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
@backoff.on_predicate(backoff.fibo, max_tries=5)
def get_ami_id_by_name(ami_name, state="*"):
ec2 = boto3.resource('ec2')
try:
for image in ec2.images.filter(Filters=[{'Name': 'name', 'Values': [ami_name]}, {'Name': 'state', 'Values': [state]}]):
return image.id
except Exception as err:
logging.error("Error with getting AMI ID by name: " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting AMI ID by name",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
return ''
def get_ami_id_by_instance_name(instance_name):
ec2 = boto3.resource('ec2')
try:
for instance in ec2.instances.filter(Filters=[{'Name': 'tag:{}'.format('Name'), 'Values': [instance_name]}]):
return instance.image_id
except Exception as err:
logging.error("Error with getting AMI ID by instance name: " + str(
err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting AMI ID by instance name",
"error_message": str(
err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
return ''
def get_security_group_by_name(security_group_name):
try:
ec2 = boto3.resource('ec2')
for security_group in ec2.security_groups.filter(Filters=[{'Name': 'group-name', 'Values': [security_group_name]}]):
return security_group.id
except Exception as err:
logging.error("Error with getting Security Group ID by name: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting Security Group ID by name",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
return ''
return ''
def get_instance_attr(instance_id, attribute_name):
try:
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(
Filters=[{'Name': 'instance-id', 'Values': [instance_id]},
{'Name': 'instance-state-name', 'Values': ['running']}])
for instance in instances:
return getattr(instance, attribute_name)
return ''
except Exception as err:
logging.error("Error with getting instance attribute: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting instance attribute",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_instance_by_name(tag_name, instance_name):
try:
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(
Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running','pending','stopping','stopped']}])
for instance in instances:
return instance.id
return ''
except Exception as err:
logging.error("Error with getting instance ID by name: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting instance ID by name",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_role_by_name(role_name):
try:
iam = boto3.resource('iam')
for role in iam.roles.all():
if role.name == role_name:
return role.name
return ''
except Exception as err:
logging.error("Error with getting role by name: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting role by name",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_subnet_by_cidr(cidr, vpc_id=''):
try:
ec2 = boto3.resource('ec2')
if vpc_id:
for subnet in ec2.subnets.filter(Filters=[
{'Name': 'cidrBlock', 'Values': [cidr]},
{'Name': 'vpc-id', 'Values': [vpc_id]}
]):
return subnet.id
else:
for subnet in ec2.subnets.filter(Filters=[
{'Name': 'cidrBlock', 'Values': [cidr]}
]):
return subnet.id
return ''
except Exception as err:
logging.error("Error with getting Subnet ID by CIDR: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting Subnet ID by CIDR",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_subnet_by_tag(tag, subnet_id=False, vpc_id=''):
try:
ec2 = boto3.resource('ec2')
if vpc_id:
for subnet in ec2.subnets.filter(Filters=[
{'Name': 'tag-key', 'Values': [tag.get('Key')]},
{'Name': 'tag-value', 'Values': [tag.get('Value')]},
{'Name': 'vpc-id', 'Values': [vpc_id]}
]):
if subnet_id:
return subnet.id
else:
return subnet.cidr_block
else:
for subnet in ec2.subnets.filter(Filters=[
{'Name': 'tag-key', 'Values': [tag.get('Key')]},
{'Name': 'tag-value', 'Values': [tag.get('Value')]}
]):
if subnet_id:
return subnet.id
else:
return subnet.cidr_block
return ''
except Exception as err:
logging.error("Error with getting Subnet CIDR block by tag: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting Subnet CIDR block by tag",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_vpc_by_cidr(cidr):
try:
ec2 = boto3.resource('ec2')
for vpc in ec2.vpcs.filter(Filters=[{'Name': 'cidr', 'Values': [cidr]}]):
return vpc.id
return ''
except Exception as err:
logging.error("Error with getting VPC ID by CIDR: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting VPC ID by CIDR",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_vpc_by_tag(tag_name, tag_value):
try:
ec2 = boto3.resource('ec2')
for vpc in ec2.vpcs.filter(Filters=[{'Name': 'tag-key', 'Values': [tag_name]}, {'Name': 'tag-value', 'Values': [tag_value]}]):
return vpc.id
return ''
except Exception as err:
logging.error("Error with getting VPC ID by tag: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting VPC ID by tag",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_emr_info(id, key=''):
try:
emr = boto3.client('emr')
info = emr.describe_cluster(ClusterId=id)['Cluster']
if key:
try:
result = info[key]
except:
print("Cluster has no {} attribute".format(key))
result = info
else:
result = info
return result
except Exception as err:
logging.error("Error with getting EMR information: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting EMR information",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_emr_list(tag_name, type='Key', emr_count=False, emr_active=False):
try:
emr = boto3.client('emr')
if emr_count:
clusters = emr.list_clusters(
ClusterStates=['RUNNING', 'WAITING', 'STARTING', 'BOOTSTRAPPING', 'TERMINATING']
)
else:
clusters = emr.list_clusters(
ClusterStates=['RUNNING', 'WAITING', 'STARTING', 'BOOTSTRAPPING']
)
if emr_active:
clusters = emr.list_clusters(
ClusterStates=['RUNNING', 'STARTING', 'BOOTSTRAPPING', 'TERMINATING']
)
clusters = clusters.get('Clusters')
clusters_list = []
for i in clusters:
response = emr.describe_cluster(ClusterId=i.get('Id'))
time.sleep(5)
tag = response.get('Cluster').get('Tags')
for j in tag:
if tag_name in j.get(type):
clusters_list.append(i.get('Id'))
return clusters_list
except Exception as err:
logging.error("Error with getting EMR list: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting EMR list",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_not_configured_emr_list(tag_name, instance_name):
try:
emr = boto3.client('emr')
clusters = emr.list_clusters(ClusterStates=['WAITING'])
clusters = clusters.get('Clusters')
clusters_list = []
for i in clusters:
tags_found = 0
response = emr.describe_cluster(ClusterId=i.get('Id'))
time.sleep(5)
tag = response.get('Cluster').get('Tags')
for j in tag:
if tag_name in j.get('Key'):
tags_found += 1
if instance_name in j.get('Value'):
tags_found += 1
if tags_found >= 2:
clusters_list.append(i.get('Id'))
return clusters_list
except Exception as err:
logging.error("Error with getting not configured EMR list: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting not configured EMR list",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_not_configured_emr(tag_name, instance_name, return_name=False):
try:
emr = boto3.client('emr')
clusters_list = get_not_configured_emr_list(tag_name, instance_name)
if clusters_list:
for cluster_id in clusters_list:
response = emr.describe_cluster(ClusterId=cluster_id)
time.sleep(5)
tag = response.get('Cluster').get('Tags')
for j in tag:
if j.get('Value') == 'not-configured':
if return_name:
return response.get('Cluster').get('Name')
else:
return True
return False
else:
return False
except Exception as err:
logging.error("Error with getting not configured EMR list: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting not configured EMR list",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_emr_id_by_name(name):
try:
cluster_id = ''
emr = boto3.client('emr')
clusters = emr.list_clusters(
ClusterStates=['RUNNING', 'WAITING', 'STARTING', 'BOOTSTRAPPING']
)
clusters = clusters.get('Clusters')
for i in clusters:
response = emr.describe_cluster(ClusterId=i.get('Id'))
time.sleep(5)
if response.get('Cluster').get('Name') == name:
cluster_id = i.get('Id')
if cluster_id == '':
raise Exception("Unable to find EMR cluster by name: " + name)
return cluster_id
except Exception as err:
logging.error("Error with getting EMR ID by name: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting EMR ID by name",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_emr_instances_list(cluster_id, instance_type=''):
#instance_type 'MASTER' or 'CORE'
try:
emr = boto3.client('emr')
if instance_type != '':
instances = emr.list_instances(ClusterId=cluster_id, InstanceGroupTypes=[instance_type])
else:
instances = emr.list_instances(ClusterId=cluster_id)
return instances.get('Instances')
except Exception as err:
logging.error("Error with getting EMR instances list: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting EMR instances list",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_ec2_list(tag_name, value=''):
try:
ec2 = boto3.resource('ec2')
if value:
notebook_instances = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running', 'stopped']},
{'Name': 'tag:{}'.format(tag_name), 'Values': ['{}*'.format(value)]}])
else:
notebook_instances = ec2.instances.filter(
Filters=[{'Name': 'instance-state-name', 'Values': ['running', 'stopped']},
{'Name': 'tag:{}'.format(tag_name), 'Values': ['*nb*']}])
return notebook_instances
except Exception as err:
logging.error("Error with getting EC2 list: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting EC2 list",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def provide_index(resource_type, tag_name, tag_value=''):
try:
ids = []
if resource_type == 'EMR':
if tag_value:
list = get_emr_list(tag_value, 'Value', True)
else:
list = get_emr_list(tag_name, 'Key', True)
emr = boto3.client('emr')
for i in list:
response = emr.describe_cluster(ClusterId=i)
time.sleep(5)
number = response.get('Cluster').get('Name').split('-')[-1]
if number not in ids:
ids.append(int(number))
elif resource_type == 'EC2':
if tag_value:
list = get_ec2_list(tag_name, tag_value)
else:
list = get_ec2_list(tag_name)
for i in list:
for tag in i.tags:
if tag['Key'] == 'Name':
ids.append(int(tag['Value'].split('-')[-1]))
else:
print("Incorrect resource type!")
index = 1
while True:
if index not in ids:
break
else:
index += 1
return index
except Exception as err:
logging.error("Error with providing index: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with providing index",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_route_table_by_tag(tag_name, tag_value):
try:
client = boto3.client('ec2')
route_tables = client.describe_route_tables(
Filters=[{'Name': 'tag:{}'.format(tag_name), 'Values': ['{}'.format(tag_value)]}])
rt_id = route_tables.get('RouteTables')[0].get('RouteTableId')
return rt_id
except Exception as err:
logging.error("Error with getting Route table by tag: " + str(err) + "\n Traceback: " + traceback.print_exc(
file=sys.stdout))
append_result(str({"error": "Error with getting Route table by tag",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
@backoff.on_predicate(backoff.fibo, max_tries=4)
def get_ami_id(ami_name):
try:
client = boto3.client('ec2')
image_id = ''
response = client.describe_images(
Filters=[
{
'Name': 'name',
'Values': [ami_name]
},
{
'Name': 'virtualization-type', 'Values': ['hvm']
},
{
'Name': 'state', 'Values': ['available']
},
{
'Name': 'root-device-name', 'Values': ['/dev/sda1']
},
{
'Name': 'root-device-type', 'Values': ['ebs']
},
{
'Name': 'architecture', 'Values': ['x86_64']
}
])
response = response.get('Images')
for i in response:
image_id = i.get('ImageId')
if image_id == '':
raise Exception("Unable to find image id with name: " + ami_name)
return image_id
except Exception as err:
logging.error("Failed to find AMI: " + ami_name + " : " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Unable to find AMI", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_iam_profile(profile_name, count=0):
client = boto3.client('iam')
iam_profile = ''
try:
if count < 10:
response = client.get_instance_profile(InstanceProfileName=profile_name)
iam_profile = response.get('InstanceProfile').get('InstanceProfileName')
time.sleep(5)
print('IAM profile checked. Creating instance...')
else:
print("Unable to find IAM profile by name: {}".format(profile_name))
return False
except:
count += 1
print('IAM profile is not available yet. Waiting...')
time.sleep(5)
get_iam_profile(profile_name, count)
print(iam_profile)
return iam_profile
def check_security_group(security_group_name, count=0):
try:
ec2 = boto3.resource('ec2')
if count < 20:
for security_group in ec2.security_groups.filter(Filters=[{'Name': 'group-name', 'Values': [security_group_name]}]):
while security_group.id == '':
count = count + 1
time.sleep(10)
print("Security group is not available yet. Waiting...")
check_security_group(security_group_name, count)
if security_group.id == '':
raise Exception("Unable to check Security group by name: " + security_group_name)
return security_group.id
except Exception as err:
logging.error("Error with checking Security group by name: " + security_group_name + " : " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with checking Security group by name", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def emr_waiter(tag_name):
if len(get_emr_list(tag_name, 'Value', False, True)) > 0 or os.path.exists('/response/.emr_creating_' + os.environ['exploratory_name'] or get_not_configured_emr(tag_name)):
with hide('stderr', 'running', 'warnings'):
local("echo 'Some EMR cluster is still being created/terminated, waiting..'")
time.sleep(60)
emr_waiter(tag_name)
else:
return True
def get_spark_version(cluster_name):
spark_version = ''
emr = boto3.client('emr')
clusters = emr.list_clusters(ClusterStates=['WAITING'])
clusters = clusters.get('Clusters')
for i in clusters:
response = emr.describe_cluster(ClusterId=i.get('Id'))
time.sleep(5)
if response.get("Cluster").get("Name") == cluster_name:
response = response.get("Cluster").get("Applications")
for j in response:
if j.get("Name") == 'Spark':
spark_version = j.get("Version")
return spark_version
def get_hadoop_version(cluster_name):
hadoop_version = ''
emr = boto3.client('emr')
clusters = emr.list_clusters(ClusterStates=['WAITING'])
clusters = clusters.get('Clusters')
for i in clusters:
response = emr.describe_cluster(ClusterId=i.get('Id'))
time.sleep(5)
if response.get("Cluster").get("Name") == cluster_name:
response = response.get("Cluster").get("Applications")
for j in response:
if j.get("Name") == 'Hadoop':
hadoop_version = j.get("Version")
return hadoop_version[0:3]
def get_instance_status(tag_name, instance_name):
client = boto3.client('ec2')
response = client.describe_instances(Filters=[
{'Name': 'tag:{}'.format(tag_name), 'Values': [instance_name]}]).get('Reservations')
for i in response:
if len(response) > 1:
inst = i.get('Instances')
for j in inst:
if j.get('State').get('Name') == 'running':
return j.get('State').get('Name')
else:
inst = i.get('Instances')
for j in inst:
return j.get('State').get('Name')
return 'not-running'
def get_list_instance_statuses(instance_ids):
data = []
client = boto3.client('ec2')
for h in instance_ids:
host = {}
try:
response = client.describe_instances(InstanceIds=[h.get('id')]).get('Reservations')
for i in response:
inst = i.get('Instances')
for j in inst:
host['id'] = j.get('InstanceId')
host['status'] = j.get('State').get('Name')
data.append(host)
except:
host['id'] = h.get('id')
host['status'] = 'terminated'
data.append(host)
return data
def get_list_cluster_statuses(cluster_ids, data=[]):
client = boto3.client('emr')
for i in cluster_ids:
host = {}
try:
response = client.describe_cluster(ClusterId=i.get('id')).get('Cluster')
host['id'] = i.get('id')
if response.get('Status').get('State').lower() == 'waiting':
host['status'] = 'running'
elif response.get('Status').get('State').lower() == 'running':
host['status'] = 'configuring'
else:
host['status'] = response.get('Status').get('State').lower()
data.append(host)
except:
host['id'] = i.get('id')
host['status'] = 'terminated'
data.append(host)
return data
def get_allocation_id_by_elastic_ip(elastic_ip):
try:
client = boto3.client('ec2')
response = client.describe_addresses(PublicIps=[elastic_ip]).get('Addresses')
for i in response:
return i.get('AllocationId')
except Exception as err:
logging.error("Error with getting allocation id by elastic ip: " + elastic_ip + " : " + str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting allocation id by elastic ip", "error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_ec2_price(instance_shape, region):
try:
regions = {'us-west-2': 'Oregon', 'us-east-1': 'N. Virginia', 'us-east-2': 'Ohio', 'us-west-1': 'N. California',
'ca-central-1': 'Central', 'eu-west-1': 'Ireland', 'eu-central-1': 'Frankfurt',
'eu-west-2': 'London', 'ap-northeast-1': 'Tokyo', 'ap-northeast-2': 'Seoul',
'ap-southeast-1': 'Singapore', 'ap-southeast-2': 'Sydney', 'ap-south-1': 'Mumbai',
'sa-east-1': 'Sao Paulo'}
response = urllib2.urlopen(
'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json')
price_json = response.read()
pricing = json.loads(price_json)
for i in pricing.get('products'):
if pricing.get('products').get(i).get('attributes').get('instanceType') == instance_shape\
and pricing.get('products').get(i).get('attributes').get('operatingSystem') == 'Linux' \
and regions.get(region) in pricing.get('products').get(i).get('attributes').get('location') \
and pricing.get('products').get(i).get('attributes').get('tenancy') == 'Shared':
for j in pricing.get('terms').get('OnDemand').get(i):
for h in pricing.get('terms').get('OnDemand').get(i).get(j).get('priceDimensions'):
return float(pricing.get('terms').get('OnDemand').get(i).get(j).get('priceDimensions').get(h).
get('pricePerUnit').get('USD'))
except Exception as err:
logging.error("Error with getting EC2 price: " + str(err) + "\n Traceback: " +
traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting EC2 price",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_spot_instances_status(cluster_id):
try:
ec2 = boto3.client('ec2')
emr = boto3.client('emr')
ec2_ids = emr.list_instances(ClusterId=cluster_id).get('Instances')
ids_list = []
for ins in ec2_ids:
ids_list.append(ins.get('Ec2InstanceId'))
response = ec2.describe_spot_instance_requests(Filters=[
{'Name': 'instance-id', 'Values': ids_list}]).get('SpotInstanceRequests')
if response:
for i in response:
if i.get('Status').get('Code') != 'fulfilled':
return False, i.get('Status').get('Code'), i.get('Status').get('Message')
return True, i.get('Status').get('Code'), "Spot instances have been successfully created!"
return False, None, "Spot instances status weren't received for cluster id {}".format(cluster_id)
except Exception as err:
logging.error("Error with getting Spot instances status: " + str(err) + "\n Traceback: " +
traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with getting Spot instances status",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def node_count(cluster_name):
try:
ec2 = boto3.client('ec2')
node_list = ec2.describe_instances(Filters=[
{'Name': 'instance-state-name', 'Values': ['running']},
{'Name': 'tag:Name', 'Values': [cluster_name + '*']}]).get('Reservations')
result = len(node_list)
return result
except Exception as err:
logging.error("Error with counting nodes in cluster: " + str(err) + "\n Traceback: " +
traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error with counting nodes in cluster",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
def get_list_private_ip_by_conf_type_and_id(conf_type, instance_id):
try:
private_list_ip = []
if conf_type == 'edge_node':
private_list_ip.append(
get_instance_ip_address_by_id(
instance_id).get('Private'))
elif conf_type == 'exploratory':
private_list_ip.append(
get_instance_ip_address('Name', instance_id).get('Private'))
elif conf_type == 'computational_resource':
group_tag_name = os.environ['conf_service_base_name'] + ':' + instance_id
print(group_tag_name)
instance_list = get_ec2_list('user:tag', group_tag_name)
for instance in instance_list:
private_list_ip.append(
get_instance_ip_address_by_id(instance.id).get('Private'))
return private_list_ip
except Exception as err:
logging.error("Error getting private ip by conf_type and id: " + str(err) + "\n Traceback: " +
traceback.print_exc(file=sys.stdout))
append_result(str({"error": "Error getting private ip by conf_type and id",
"error_message": str(err) + "\n Traceback: " + traceback.print_exc(file=sys.stdout)}))
traceback.print_exc(file=sys.stdout)
| apache-2.0 |
bestvibes/neo4j-social-network | env/lib/python2.7/encodings/mac_turkish.py | 593 | 13769 | """ Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-turkish',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u011e' # 0xDA -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0xDB -> LATIN SMALL LETTER G WITH BREVE
u'\u0130' # 0xDC -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u0131' # 0xDD -> LATIN SMALL LETTER DOTLESS I
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0xDF -> LATIN SMALL LETTER S WITH CEDILLA
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\uf8a0' # 0xF5 -> undefined1
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
icic13/IntroductiontoComputerScienceandProgrammingSpring2011 | Problem Set 1: Paying Off Credit Card Debt/Problem Set 1.py | 1 | 1075 | #Rana
principal = float(
raw_input('Enter the outstanding balance on your credit card: '))
print(principal)
interest_rate = float(
raw_input('Enter the annual credit card interest rate as decimal: '))
print(interest_rate)
min_monthly_payment_rate = float(
raw_input('Enter the minimal monthly payment rate as decimal: '))
print(min_monthly_payment_rate)
x = 1
total_paid = 0
for num in range(1, 13):
print('Month: '+str(x))
x = x + 1
minimum_payment = min_monthly_payment_rate * principal
total_paid+=minimum_payment
print('Minimum monthly payment: $' + str(round(minimum_payment,2)))
interest_paid = interest_rate / 12.0 * principal
principal_paid = minimum_payment - interest_paid
print('Principal paid: $' + str(round(principal_paid,2)))
remaining_balance = principal - principal_paid
print('Remaining balance: $' + str(round(remaining_balance,2)))
principal = remaining_balance
print('RESULT')
print('Total amount paid: $'+str(round(total_paid,2)))
print('Remaining balance: $'+str(round(remaining_balance,2)))
| gpl-3.0 |
acq4/acq4 | acq4/analysis/modules/pbm_ImageAnalysis/ctrlPhysiologyTemplate.py | 3 | 10355 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'acq4/analysis/modules/pbm_ImageAnalysis/ctrlPhysiologyTemplate.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from acq4.util import Qt
try:
_fromUtf8 = Qt.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = Qt.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return Qt.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return Qt.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(315, 410)
sizePolicy = Qt.QSizePolicy(Qt.QSizePolicy.Preferred, Qt.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
font = Qt.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
Form.setFont(font)
self.gridLayout = Qt.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setHorizontalSpacing(10)
self.gridLayout.setVerticalSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox = Qt.QGroupBox(Form)
sizePolicy = Qt.QSizePolicy(Qt.QSizePolicy.Preferred, Qt.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setAlignment(Qt.Qt.AlignCenter)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = Qt.QGridLayout(self.groupBox)
self.gridLayout_2.setSizeConstraint(Qt.QLayout.SetNoConstraint)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setHorizontalSpacing(5)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.line = Qt.QFrame(self.groupBox)
self.line.setFrameShape(Qt.QFrame.VLine)
self.line.setFrameShadow(Qt.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.gridLayout_2.addWidget(self.line, 1, 1, 2, 1)
self.line_2 = Qt.QFrame(self.groupBox)
self.line_2.setFrameShape(Qt.QFrame.HLine)
self.line_2.setFrameShadow(Qt.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.gridLayout_2.addWidget(self.line_2, 0, 0, 1, 1)
self.widget_2 = Qt.QWidget(self.groupBox)
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.groupBox_2 = Qt.QGroupBox(self.widget_2)
self.groupBox_2.setGeometry(Qt.QRect(10, 5, 286, 156))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.layoutWidget = Qt.QWidget(self.groupBox_2)
self.layoutWidget.setGeometry(Qt.QRect(10, 25, 266, 66))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout_4 = Qt.QGridLayout(self.layoutWidget)
self.gridLayout_4.setMargin(9)
self.gridLayout_4.setSpacing(6)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.label_7 = Qt.QLabel(self.layoutWidget)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_4.addWidget(self.label_7, 0, 0, 1, 1)
self.label_4 = Qt.QLabel(self.layoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_4.addWidget(self.label_4, 1, 0, 1, 1)
self.ImagePhys_PhysThresh = Qt.QDoubleSpinBox(self.layoutWidget)
self.ImagePhys_PhysThresh.setAlignment(Qt.Qt.AlignRight|Qt.Qt.AlignTrailing|Qt.Qt.AlignVCenter)
self.ImagePhys_PhysThresh.setDecimals(1)
self.ImagePhys_PhysThresh.setMinimum(-1998.0)
self.ImagePhys_PhysThresh.setMaximum(2000.0)
self.ImagePhys_PhysThresh.setSingleStep(5.0)
self.ImagePhys_PhysThresh.setProperty("value", -50.0)
self.ImagePhys_PhysThresh.setObjectName(_fromUtf8("ImagePhys_PhysThresh"))
self.gridLayout_4.addWidget(self.ImagePhys_PhysThresh, 1, 1, 1, 1)
self.ImagePhys_PhysLPF = Qt.QDoubleSpinBox(self.layoutWidget)
self.ImagePhys_PhysLPF.setAlignment(Qt.Qt.AlignRight|Qt.Qt.AlignTrailing|Qt.Qt.AlignVCenter)
self.ImagePhys_PhysLPF.setMinimum(-5000.0)
self.ImagePhys_PhysLPF.setMaximum(50000.0)
self.ImagePhys_PhysLPF.setProperty("value", 2500.0)
self.ImagePhys_PhysLPF.setObjectName(_fromUtf8("ImagePhys_PhysLPF"))
self.gridLayout_4.addWidget(self.ImagePhys_PhysLPF, 0, 1, 1, 1)
self.ImagePhys_DetectSpikes = Qt.QPushButton(self.groupBox_2)
self.ImagePhys_DetectSpikes.setGeometry(Qt.QRect(75, 100, 137, 32))
self.ImagePhys_DetectSpikes.setMinimumSize(Qt.QSize(5, 0))
self.ImagePhys_DetectSpikes.setObjectName(_fromUtf8("ImagePhys_DetectSpikes"))
self.groupBox_3 = Qt.QGroupBox(self.widget_2)
self.groupBox_3.setGeometry(Qt.QRect(15, 160, 281, 221))
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.layoutWidget1 = Qt.QWidget(self.groupBox_3)
self.layoutWidget1.setGeometry(Qt.QRect(15, 25, 236, 97))
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.gridLayout_3 = Qt.QGridLayout(self.layoutWidget1)
self.gridLayout_3.setMargin(9)
self.gridLayout_3.setSpacing(6)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.label_5 = Qt.QLabel(self.layoutWidget1)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_3.addWidget(self.label_5, 2, 0, 1, 2)
self.ImagePhys_burstISI = Qt.QDoubleSpinBox(self.layoutWidget1)
font = Qt.QFont()
font.setPointSize(12)
self.ImagePhys_burstISI.setFont(font)
self.ImagePhys_burstISI.setAlignment(Qt.Qt.AlignRight|Qt.Qt.AlignTrailing|Qt.Qt.AlignVCenter)
self.ImagePhys_burstISI.setDecimals(1)
self.ImagePhys_burstISI.setMinimum(1.0)
self.ImagePhys_burstISI.setMaximum(1000.0)
self.ImagePhys_burstISI.setSingleStep(10.0)
self.ImagePhys_burstISI.setProperty("value", 100.0)
self.ImagePhys_burstISI.setObjectName(_fromUtf8("ImagePhys_burstISI"))
self.gridLayout_3.addWidget(self.ImagePhys_burstISI, 0, 2, 1, 1)
self.ImagePhys_minBurstSpikes = Qt.QSpinBox(self.layoutWidget1)
font = Qt.QFont()
font.setPointSize(12)
self.ImagePhys_minBurstSpikes.setFont(font)
self.ImagePhys_minBurstSpikes.setAlignment(Qt.Qt.AlignRight|Qt.Qt.AlignTrailing|Qt.Qt.AlignVCenter)
self.ImagePhys_minBurstSpikes.setMinimum(2)
self.ImagePhys_minBurstSpikes.setMaximum(20)
self.ImagePhys_minBurstSpikes.setProperty("value", 3)
self.ImagePhys_minBurstSpikes.setObjectName(_fromUtf8("ImagePhys_minBurstSpikes"))
self.gridLayout_3.addWidget(self.ImagePhys_minBurstSpikes, 2, 2, 1, 1)
self.label_9 = Qt.QLabel(self.layoutWidget1)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_3.addWidget(self.label_9, 0, 0, 1, 2)
self.ImagePhys_withinBurstISI = Qt.QDoubleSpinBox(self.layoutWidget1)
font = Qt.QFont()
font.setPointSize(12)
self.ImagePhys_withinBurstISI.setFont(font)
self.ImagePhys_withinBurstISI.setAlignment(Qt.Qt.AlignRight|Qt.Qt.AlignTrailing|Qt.Qt.AlignVCenter)
self.ImagePhys_withinBurstISI.setDecimals(1)
self.ImagePhys_withinBurstISI.setMinimum(1.0)
self.ImagePhys_withinBurstISI.setMaximum(1000.0)
self.ImagePhys_withinBurstISI.setSingleStep(2.0)
self.ImagePhys_withinBurstISI.setProperty("value", 40.0)
self.ImagePhys_withinBurstISI.setObjectName(_fromUtf8("ImagePhys_withinBurstISI"))
self.gridLayout_3.addWidget(self.ImagePhys_withinBurstISI, 1, 2, 1, 1)
self.label_8 = Qt.QLabel(self.layoutWidget1)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_3.addWidget(self.label_8, 1, 0, 1, 1)
self.ImagePhys_RevSTA = Qt.QPushButton(self.groupBox_3)
self.ImagePhys_RevSTA.setEnabled(False)
self.ImagePhys_RevSTA.setGeometry(Qt.QRect(35, 185, 93, 32))
self.ImagePhys_RevSTA.setObjectName(_fromUtf8("ImagePhys_RevSTA"))
self.ImagePhys_BTA = Qt.QPushButton(self.groupBox_3)
self.ImagePhys_BTA.setGeometry(Qt.QRect(35, 155, 195, 32))
self.ImagePhys_BTA.setObjectName(_fromUtf8("ImagePhys_BTA"))
self.ImagePhys_STA = Qt.QPushButton(self.groupBox_3)
self.ImagePhys_STA.setGeometry(Qt.QRect(35, 125, 197, 32))
self.ImagePhys_STA.setObjectName(_fromUtf8("ImagePhys_STA"))
self.gridLayout_2.addWidget(self.widget_2, 1, 0, 2, 1)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
self.retranslateUi(Form)
Qt.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Physiology Analysis Functions", None))
self.groupBox_2.setTitle(_translate("Form", "Physiology", None))
self.label_7.setText(_translate("Form", "LPF", None))
self.label_4.setText(_translate("Form", "Event Thresh", None))
self.ImagePhys_PhysThresh.setSuffix(_translate("Form", " pA", None))
self.ImagePhys_DetectSpikes.setText(_translate("Form", "Detect Spikes", None))
self.groupBox_3.setTitle(_translate("Form", "Spike Triggered Averages", None))
self.label_5.setText(_translate("Form", "Minimum # spikes/burst", None))
self.label_9.setText(_translate("Form", "Min Interburst Interval", None))
self.label_8.setText(_translate("Form", "Max burst ISI (msec)", None))
self.ImagePhys_RevSTA.setText(_translate("Form", "Rev STA", None))
self.ImagePhys_BTA.setText(_translate("Form", "Burst-triggered Average", None))
self.ImagePhys_STA.setText(_translate("Form", "Spike-triggered Average", None))
| mit |
minzhang28/docker-py | tests/helpers.py | 1 | 3669 | import os
import os.path
import shutil
import tarfile
import tempfile
import unittest
import docker
import six
BUSYBOX = 'busybox:buildroot-2014.02'
EXEC_DRIVER = []
def make_tree(dirs, files):
base = tempfile.mkdtemp()
for path in dirs:
os.makedirs(os.path.join(base, path))
for path in files:
with open(os.path.join(base, path), 'w') as f:
f.write("content")
return base
def simple_tar(path):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
abs_path = os.path.abspath(path)
t.add(abs_path, arcname=os.path.basename(path), recursive=False)
t.close()
f.seek(0)
return f
def untar_file(tardata, filename):
with tarfile.open(mode='r', fileobj=tardata) as t:
f = t.extractfile(filename)
result = f.read()
f.close()
return result
def docker_client(**kwargs):
return docker.Client(**docker_client_kwargs(**kwargs))
def docker_client_kwargs(**kwargs):
client_kwargs = docker.utils.kwargs_from_env(assert_hostname=False)
client_kwargs.update(kwargs)
return client_kwargs
class BaseTestCase(unittest.TestCase):
tmp_imgs = []
tmp_containers = []
tmp_folders = []
tmp_volumes = []
def setUp(self):
if six.PY2:
self.assertRegex = self.assertRegexpMatches
self.assertCountEqual = self.assertItemsEqual
self.client = docker_client(timeout=60)
self.tmp_imgs = []
self.tmp_containers = []
self.tmp_folders = []
self.tmp_volumes = []
self.tmp_networks = []
def tearDown(self):
for img in self.tmp_imgs:
try:
self.client.remove_image(img)
except docker.errors.APIError:
pass
for container in self.tmp_containers:
try:
self.client.stop(container, timeout=1)
self.client.remove_container(container)
except docker.errors.APIError:
pass
for network in self.tmp_networks:
try:
self.client.remove_network(network)
except docker.errors.APIError:
pass
for folder in self.tmp_folders:
shutil.rmtree(folder)
for volume in self.tmp_volumes:
try:
self.client.remove_volume(volume)
except docker.errors.APIError:
pass
self.client.close()
def run_container(self, *args, **kwargs):
container = self.client.create_container(*args, **kwargs)
self.tmp_containers.append(container)
self.client.start(container)
exitcode = self.client.wait(container)
if exitcode != 0:
output = self.client.logs(container)
raise Exception(
"Container exited with code {}:\n{}"
.format(exitcode, output))
return container
def create_and_start(self, image='busybox', command='top', **kwargs):
container = self.client.create_container(
image=image, command=command, **kwargs)
self.tmp_containers.append(container)
self.client.start(container)
return container
def execute(self, container, cmd, exit_code=0, **kwargs):
exc = self.client.exec_create(container, cmd, **kwargs)
output = self.client.exec_start(exc)
actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
" ".join(cmd), exit_code, actual_exit_code, output)
assert actual_exit_code == exit_code, msg
| apache-2.0 |
morpheby/levelup-by | common/lib/xmodule/xmodule/modulestore/tests/test_xml.py | 16 | 1779 | import os.path
from nose.tools import assert_raises, assert_equals # pylint: disable=E0611
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore import XML_MODULESTORE_TYPE
from .test_modulestore import check_path_to_location
from xmodule.tests import DATA_DIR
class TestXMLModuleStore(object):
def test_path_to_location(self):
"""Make sure that path_to_location works properly"""
print "Starting import"
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
print "finished import"
check_path_to_location(modulestore)
def test_xml_modulestore_type(self):
store = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
assert_equals(store.get_modulestore_type('foo/bar/baz'), XML_MODULESTORE_TYPE)
def test_unicode_chars_in_xml_content(self):
# edX/full/6.002_Spring_2012 has non-ASCII chars, and during
# uniquification of names, would raise a UnicodeError. It no longer does.
# Ensure that there really is a non-ASCII character in the course.
with open(os.path.join(DATA_DIR, "toy/sequential/vertical_sequential.xml")) as xmlf:
xml = xmlf.read()
with assert_raises(UnicodeDecodeError):
xml.decode('ascii')
# Load the course, but don't make error modules. This will succeed,
# but will record the errors.
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy'], load_error_modules=False)
# Look up the errors during load. There should be none.
location = CourseDescriptor.id_to_location("edX/toy/2012_Fall")
errors = modulestore.get_item_errors(location)
assert errors == []
| agpl-3.0 |
iABC2XYZ/abc | DM_RFGAP/GenPartilces.py | 1 | 3025 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 26 12:23:48 2017
@author: A
"""
import tensorflow as tf
import numpy as np
from BetaGammaC import *
from RFCal import *
from ConstPhysics import *
def GenGammaT5Twiss(alphaT2D,betaT2D):
gammaT2D=tf.div(1.+tf.square(alphaT2D),betaT2D)
return gammaT2D
def Gen2DPart5Twiss(emitG2D,alphaT2D,betaT2D,numPart):
x=tf.random_normal([numPart])
xp=tf.random_normal([numPart])
gammaT2D=GenGammaT5Twiss(alphaT2D,betaT2D)
X=tf.sqrt(emitG2D/gammaT2D)*(x-alphaT2D*xp)
XP=tf.sqrt(emitG2D*gammaT2D)*xp
return X,XP
def Gen2DPart5Twiss_1N(emitG2D,alphaT2D,betaT2D,numPart):
x=tf.random_normal([1,numPart])
xp=tf.random_normal([1,numPart])
gammaT2D=GenGammaT5Twiss(alphaT2D,betaT2D)
X=tf.sqrt(emitG2D/gammaT2D)*(x-alphaT2D*xp)
XP=tf.sqrt(emitG2D*gammaT2D)*xp
return X,XP
def Gen6DPart5Twiss(emitG6D,alphaT6D,betaT6D,numPart):
X,XP=Gen2DPart5Twiss(emitG6D[0],alphaT6D[0],betaT6D[0],numPart)
Y,YP=Gen2DPart5Twiss(emitG6D[1],alphaT6D[1],betaT6D[1],numPart)
Z,ZP=Gen2DPart5Twiss(emitG6D[2],alphaT6D[2],betaT6D[2],numPart)
return X,XP,Y,YP,Z,ZP
def Gen2DPart5TwissMat(emitG2D,alphaT2D,betaT2D,numPart):
X,XP=Gen2DPart5Twiss_1N(emitG2D,alphaT2D,betaT2D,numPart)
disPart=tf.concat([X,XP],0)
return disPart
def Gen6DPart5TwissMat(emitG6D,alphaT6D,betaT6D,numPart):
X,XP=Gen2DPart5Twiss_1N(emitG6D[0],alphaT6D[0],betaT6D[0],numPart)
Y,YP=Gen2DPart5Twiss_1N(emitG6D[1],alphaT6D[1],betaT6D[1],numPart)
Z,ZP=Gen2DPart5Twiss_1N(emitG6D[2],alphaT6D[2],betaT6D[2],numPart)
disPart=tf.concat([X,XP,Y,YP,Z,ZP],0)
return disPart
def Gen6DPart5Twiss4RFgap(emitG6D,alphaT6D,betaT6D,energySyn,freqMHz,numPart):
X,XP=Gen2DPart5Twiss_1N(emitG6D[0],alphaT6D[0],betaT6D[0],numPart)
Y,YP=Gen2DPart5Twiss_1N(emitG6D[1],alphaT6D[1],betaT6D[1],numPart)
Z,ZP=Gen2DPart5Twiss_1N(emitG6D[2],alphaT6D[2],betaT6D[2],numPart) # Emit: z-dp_p [mm - mrad]
betaLAmbdaM=FreqMHz2BetaLambdaM(freqMHz,energySyn)
disPhiPi=Z/1000./betaLAmbdaM*Pi*2.
betaGammaCSyn=Energy2BetaGammaC(energySyn)
betaGammaC=(1.+ZP/1000.)*betaGammaCSyn
disEnergy=BetaGammaC2Energy(betaGammaC)
disTrans=tf.concat([X,XP,Y,YP],0)
return disTrans,disPhiPi,disEnergy
def Gen6D4RFgap(emitG6D,alphaT6D,betaT6D,energySyn,freqMHz,numPart):
X,XP=Gen2DPart5Twiss_1N(emitG6D[0],alphaT6D[0],betaT6D[0],numPart)
Y,YP=Gen2DPart5Twiss_1N(emitG6D[1],alphaT6D[1],betaT6D[1],numPart)
Z,ZP=Gen2DPart5Twiss_1N(emitG6D[2],alphaT6D[2],betaT6D[2],numPart) # Emit: z-dp_p [mm - mrad]
betaLAmbdaM=FreqMHz2BetaLambdaM(freqMHz,energySyn)
disPhiPi=Z/1000./betaLAmbdaM*Pi*2.
betaGammaCSyn=Energy2BetaGammaC(energySyn)
betaGammaC=(1.+ZP/1000.)*betaGammaCSyn
disEnergy=BetaGammaC2Energy(betaGammaC)
disX=X
disXP=XP
disY=Y
disYP=YP
return disX,disXP,disY,disYP,disPhiPi,disEnergy
| gpl-3.0 |
redhat-openstack/neutron | neutron/agent/linux/ovs_lib.py | 2 | 22922 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import operator
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
# Default timeout for ovs-vsctl command
DEFAULT_OVS_VSCTL_TIMEOUT = 10
# Special return value for an invalid OVS ofport
INVALID_OFPORT = '-1'
# OVS bridge fail modes
FAILMODE_SECURE = 'secure'
OPTS = [
cfg.IntOpt('ovs_vsctl_timeout',
default=DEFAULT_OVS_VSCTL_TIMEOUT,
help=_('Timeout in seconds for ovs-vsctl commands')),
]
cfg.CONF.register_opts(OPTS)
LOG = logging.getLogger(__name__)
class VifPort:
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ("iface-id=" + self.vif_id + ", vif_mac=" +
self.vif_mac + ", port_name=" + self.port_name +
", ofport=" + str(self.ofport) + ", bridge_name=" +
self.switch.br_name)
class BaseOVS(object):
def __init__(self, root_helper):
self.root_helper = root_helper
self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout
def run_vsctl(self, args, check_error=False):
full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
if not check_error:
ctxt.reraise = False
def add_bridge(self, bridge_name, secure_mode=False):
cmd = ["--", "--may-exist", "add-br", bridge_name]
if secure_mode:
cmd += ["--", "set-fail-mode", bridge_name, FAILMODE_SECURE]
self.run_vsctl(cmd)
return OVSBridge(bridge_name, self.root_helper)
def delete_bridge(self, bridge_name):
self.run_vsctl(["--", "--if-exists", "del-br", bridge_name])
def bridge_exists(self, bridge_name):
try:
self.run_vsctl(['br-exists', bridge_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 2\n' in str(e):
ctxt.reraise = False
return False
return True
def get_bridge_name_for_port_name(self, port_name):
try:
return self.run_vsctl(['port-to-br', port_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 1\n' in str(e):
ctxt.reraise = False
def port_exists(self, port_name):
return bool(self.get_bridge_name_for_port_name(port_name))
class OVSBridge(BaseOVS):
def __init__(self, br_name, root_helper):
super(OVSBridge, self).__init__(root_helper)
self.br_name = br_name
def set_controller(self, controller_names):
vsctl_command = ['--', 'set-controller', self.br_name]
vsctl_command.extend(controller_names)
self.run_vsctl(vsctl_command, check_error=True)
def del_controller(self):
self.run_vsctl(['--', 'del-controller', self.br_name],
check_error=True)
def get_controller(self):
res = self.run_vsctl(['--', 'get-controller', self.br_name],
check_error=True)
if res:
return res.strip().split('\n')
return res
def set_secure_mode(self):
self.run_vsctl(['--', 'set-fail-mode', self.br_name, FAILMODE_SECURE],
check_error=True)
def set_protocols(self, protocols):
self.run_vsctl(['--', 'set', 'bridge', self.br_name,
"protocols=%s" % protocols],
check_error=True)
def create(self, secure_mode=False):
self.add_bridge(self.br_name, secure_mode)
def destroy(self):
self.delete_bridge(self.br_name)
def reset_bridge(self, secure_mode=False):
self.destroy()
self.create(secure_mode)
def add_port(self, port_name):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
return self.get_port_ofport(port_name)
def delete_port(self, port_name):
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
port_name])
def set_db_attribute(self, table_name, record, column, value):
args = ["set", table_name, record, "%s=%s" % (column, value)]
self.run_vsctl(args)
def clear_db_attribute(self, table_name, record, column):
args = ["clear", table_name, record, column]
self.run_vsctl(args)
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
try:
return utils.execute(full_args, root_helper=self.root_helper,
process_input=process_input)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def count_flows(self):
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
return len(flow_list) - 1
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
def get_port_ofport(self, port_name):
ofport = self.db_get_val("Interface", port_name, "ofport")
# This can return a non-integer string, like '[]' so ensure a
# common failure case
try:
int(ofport)
return ofport
except (ValueError, TypeError):
return INVALID_OFPORT
def get_datapath_id(self):
return self.db_get_val('Bridge',
self.br_name, 'datapath_id').strip('"')
def do_action_flows(self, action, kwargs_list):
flow_strs = [_build_flow_expr_str(kw, action) for kw in kwargs_list]
self.run_ofctl('%s-flows' % action, ['-'], '\n'.join(flow_strs))
def add_flow(self, **kwargs):
self.do_action_flows('add', [kwargs])
def mod_flow(self, **kwargs):
self.do_action_flows('mod', [kwargs])
def delete_flows(self, **kwargs):
self.do_action_flows('del', [kwargs])
def dump_flows_for_table(self, table):
retval = None
flow_str = "table=%s" % table
flows = self.run_ofctl("dump-flows", [flow_str])
if flows:
retval = '\n'.join(item for item in flows.splitlines()
if 'NXST' not in item)
return retval
def deferred(self, **kwargs):
return DeferredOVSBridge(self, **kwargs)
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=constants.TYPE_GRE,
vxlan_udp_port=constants.VXLAN_UDP_PORT,
dont_fragment=True):
vsctl_command = ["--", "--may-exist", "add-port", self.br_name,
port_name]
vsctl_command.extend(["--", "set", "Interface", port_name,
"type=%s" % tunnel_type])
if tunnel_type == constants.TYPE_VXLAN:
# Only set the VXLAN UDP port if it's not the default
if vxlan_udp_port != constants.VXLAN_UDP_PORT:
vsctl_command.append("options:dst_port=%s" % vxlan_udp_port)
vsctl_command.append(("options:df_default=%s" %
bool(dont_fragment)).lower())
vsctl_command.extend(["options:remote_ip=%s" % remote_ip,
"options:local_ip=%s" % local_ip,
"options:in_key=flow",
"options:out_key=flow"])
self.run_vsctl(vsctl_command)
ofport = self.get_port_ofport(port_name)
if (tunnel_type == constants.TYPE_VXLAN and
ofport == INVALID_OFPORT):
LOG.error(_('Unable to create VXLAN tunnel port. Please ensure '
'that an openvswitch version that supports VXLAN is '
'installed.'))
return ofport
def add_patch_port(self, local_name, remote_name):
self.run_vsctl(["add-port", self.br_name, local_name,
"--", "set", "Interface", local_name,
"type=patch", "options:peer=%s" % remote_name])
return self.get_port_ofport(local_name)
def db_get_map(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
output_str = output.rstrip("\n\r")
return self.db_str_to_map(output_str)
return {}
def db_get_val(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
return output.rstrip("\n\r")
def db_str_to_map(self, full_str):
list = full_str.strip("{}").split(", ")
ret = {}
for e in list:
if e.find("=") == -1:
continue
arr = e.split("=")
ret[arr[0]] = arr[1].strip("\"")
return ret
def get_port_name_list(self):
res = self.run_vsctl(["list-ports", self.br_name], check_error=True)
if res:
return res.strip().split("\n")
return []
def get_port_stats(self, port_name):
return self.db_get_map("Interface", port_name, "statistics")
def get_xapi_iface_id(self, xs_vif_uuid):
args = ["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
try:
return utils.execute(args, root_helper=self.root_helper).strip()
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': args, 'exception': e})
# returns a VIF object for each VIF port
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
for name in port_names:
external_ids = self.db_get_map("Interface", name, "external_ids",
check_error=True)
ofport = self.db_get_val("Interface", name, "ofport",
check_error=True)
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
edge_ports.append(p)
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
p = VifPort(name, ofport, iface_id,
external_ids["attached-mac"], self)
edge_ports.append(p)
return edge_ports
def get_vif_port_set(self):
port_names = self.get_port_name_list()
edge_ports = set()
args = ['--format=json', '--', '--columns=name,external_ids,ofport',
'list', 'Interface']
result = self.run_vsctl(args, check_error=True)
if not result:
return edge_ports
for row in jsonutils.loads(result)['data']:
name = row[0]
if name not in port_names:
continue
external_ids = dict(row[1][1])
# Do not consider VIFs which aren't yet ready
# This can happen when ofport values are either [] or ["set", []]
# We will therefore consider only integer values for ofport
ofport = row[2]
try:
int_ofport = int(ofport)
except (ValueError, TypeError):
LOG.warn(_("Found not yet ready openvswitch port: %s"), row)
else:
if int_ofport > 0:
if ("iface-id" in external_ids and
"attached-mac" in external_ids):
edge_ports.add(external_ids['iface-id'])
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not
# automatically synced to OVS from XAPI, we grab it
# from XAPI directly
iface_id = self.get_xapi_iface_id(
external_ids["xs-vif-uuid"])
edge_ports.add(iface_id)
else:
LOG.warn(_("Found failed openvswitch port: %s"), row)
return edge_ports
def get_port_tag_dict(self):
"""Get a dict of port names and associated vlan tags.
e.g. the returned dict is of the following form::
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
The TAG ID is only available in the "Port" table and is not available
in the "Interface" table queried by the get_vif_port_set() method.
"""
port_names = self.get_port_name_list()
args = ['--format=json', '--', '--columns=name,tag', 'list', 'Port']
result = self.run_vsctl(args, check_error=True)
port_tag_dict = {}
if not result:
return port_tag_dict
for name, tag in jsonutils.loads(result)['data']:
if name not in port_names:
continue
# 'tag' can be [u'set', []] or an integer
if isinstance(tag, list):
tag = tag[1]
port_tag_dict[name] = tag
return port_tag_dict
def get_vif_port_by_id(self, port_id):
args = ['--format=json', '--', '--columns=external_ids,name,ofport',
'find', 'Interface',
'external_ids:iface-id="%s"' % port_id]
result = self.run_vsctl(args)
if not result:
return
json_result = jsonutils.loads(result)
try:
# Retrieve the indexes of the columns we're looking for
headings = json_result['headings']
ext_ids_idx = headings.index('external_ids')
name_idx = headings.index('name')
ofport_idx = headings.index('ofport')
# If data attribute is missing or empty the line below will raise
# an exeception which will be captured in this block.
# We won't deal with the possibility of ovs-vsctl return multiple
# rows since the interface identifier is unique
data = json_result['data'][0]
port_name = data[name_idx]
switch = get_bridge_for_iface(self.root_helper, port_name)
if switch != self.br_name:
LOG.info(_("Port: %(port_name)s is on %(switch)s,"
" not on %(br_name)s"), {'port_name': port_name,
'switch': switch,
'br_name': self.br_name})
return
ofport = data[ofport_idx]
# ofport must be integer otherwise return None
if not isinstance(ofport, int) or ofport == -1:
LOG.warn(_("ofport: %(ofport)s for VIF: %(vif)s is not a "
"positive integer"), {'ofport': ofport,
'vif': port_id})
return
# Find VIF's mac address in external ids
ext_id_dict = dict((item[0], item[1]) for item in
data[ext_ids_idx][1])
vif_mac = ext_id_dict['attached-mac']
return VifPort(port_name, ofport, port_id, vif_mac, self)
except Exception as e:
LOG.warn(_("Unable to parse interface details. Exception: %s"), e)
return
def delete_ports(self, all_ports=False):
if all_ports:
port_names = self.get_port_name_list()
else:
port_names = (port.port_name for port in self.get_vif_ports())
for port_name in port_names:
self.delete_port(port_name)
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.destroy()
class DeferredOVSBridge(object):
'''Deferred OVSBridge.
This class wraps add_flow, mod_flow and delete_flows calls to an OVSBridge
and defers their application until apply_flows call in order to perform
bulk calls. It wraps also ALLOWED_PASSTHROUGHS calls to avoid mixing
OVSBridge and DeferredOVSBridge uses.
This class can be used as a context, in such case apply_flows is called on
__exit__ except if an exception is raised.
This class is not thread-safe, that's why for every use a new instance
must be implemented.
'''
ALLOWED_PASSTHROUGHS = 'add_port', 'add_tunnel_port', 'delete_port'
def __init__(self, br, full_ordered=False,
order=('add', 'mod', 'del')):
'''Constructor.
:param br: wrapped bridge
:param full_ordered: Optional, disable flow reordering (slower)
:param order: Optional, define in which order flow are applied
'''
self.br = br
self.full_ordered = full_ordered
self.order = order
if not self.full_ordered:
self.weights = dict((y, x) for x, y in enumerate(self.order))
self.action_flow_tuples = []
def __getattr__(self, name):
if name in self.ALLOWED_PASSTHROUGHS:
return getattr(self.br, name)
raise AttributeError(name)
def add_flow(self, **kwargs):
self.action_flow_tuples.append(('add', kwargs))
def mod_flow(self, **kwargs):
self.action_flow_tuples.append(('mod', kwargs))
def delete_flows(self, **kwargs):
self.action_flow_tuples.append(('del', kwargs))
def apply_flows(self):
action_flow_tuples = self.action_flow_tuples
self.action_flow_tuples = []
if not action_flow_tuples:
return
if not self.full_ordered:
action_flow_tuples.sort(key=lambda af: self.weights[af[0]])
grouped = itertools.groupby(action_flow_tuples,
key=operator.itemgetter(0))
itemgetter_1 = operator.itemgetter(1)
for action, action_flow_list in grouped:
flows = map(itemgetter_1, action_flow_list)
self.br.do_action_flows(action, flows)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.apply_flows()
else:
LOG.exception(_("OVS flows could not be applied on bridge %s"),
self.br.br_name)
def get_bridge_for_iface(root_helper, iface):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"iface-to-br", iface]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Interface %s not found."), iface)
return None
def get_bridges(root_helper):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"list-br"]
try:
return utils.execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e)
def get_bridge_external_bridge_id(root_helper, bridge):
args = ["ovs-vsctl", "--timeout=2", "br-get-external-id",
bridge, "bridge-id"]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Bridge %s not found."), bridge)
return None
def _build_flow_expr_str(flow_dict, cmd):
flow_expr_arr = []
actions = None
if cmd == 'add':
flow_expr_arr.append("hard_timeout=%s" %
flow_dict.pop('hard_timeout', '0'))
flow_expr_arr.append("idle_timeout=%s" %
flow_dict.pop('idle_timeout', '0'))
flow_expr_arr.append("priority=%s" %
flow_dict.pop('priority', '1'))
elif 'priority' in flow_dict:
msg = _("Cannot match priority on flow deletion or modification")
raise exceptions.InvalidInput(error_message=msg)
if cmd != 'del':
if "actions" not in flow_dict:
msg = _("Must specify one or more actions on flow addition"
" or modification")
raise exceptions.InvalidInput(error_message=msg)
actions = "actions=%s" % flow_dict.pop('actions')
for key, value in flow_dict.iteritems():
if key == 'proto':
flow_expr_arr.append(value)
else:
flow_expr_arr.append("%s=%s" % (key, str(value)))
if actions:
flow_expr_arr.append(actions)
return ','.join(flow_expr_arr)
| apache-2.0 |
cjaymes/pyscap | src/scap/model/oval_5/defs/linux/SlackwarePkgInfoObjectElement.py | 1 | 1047 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.defs.linux.ObjectType import ObjectType
logger = logging.getLogger(__name__)
class SlackwarePkgInfoObjectElement(ObjectType):
MODEL_MAP = {
'tag_name': 'slackwarepkginfo_object',
'elements': [
{'tag_name': 'name', 'class': 'scap.model.oval_5.defs.EntityObjectType', 'min': 0},
],
}
| gpl-3.0 |
alanfranz/duplicity | duplicity/collections.py | 3 | 43704 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <[email protected]>
# Copyright 2007 Kenneth Loafman <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Classes and functions on collections of backup volumes"""
import types
import gettext
from duplicity import log
from duplicity import file_naming
from duplicity import path
from duplicity import util
from duplicity import dup_time
from duplicity import globals
from duplicity import manifest
from duplicity import util
from duplicity.gpg import GPGError
class CollectionsError(Exception):
pass
class BackupSet:
"""
Backup set - the backup information produced by one session
"""
def __init__(self, backend):
"""
Initialize new backup set, only backend is required at first
"""
self.backend = backend
self.info_set = False # true if fields are set
self.volume_name_dict = {} # dict from volume number to filename
self.remote_manifest_name = None # full name of remote manifest
self.local_manifest_path = None # full path to local manifest
self.time = None # will be set if is full backup set
self.start_time = None # will be set if inc
self.end_time = None # will be set if inc
self.partial = False # true if a partial backup
self.encrypted = False # true if an encrypted backup
def is_complete(self):
"""
Assume complete if found manifest file
"""
return self.remote_manifest_name
def add_filename(self, filename):
"""
Add a filename to given set. Return true if it fits.
The filename will match the given set if it has the right
times and is of the right type. The information will be set
from the first filename given.
@param filename: name of file to add
@type filename: string
"""
pr = file_naming.parse(filename)
if not pr or not (pr.type == "full" or pr.type == "inc"):
return False
if not self.info_set:
self.set_info(pr)
else:
if pr.type != self.type:
return False
if pr.time != self.time:
return False
if (pr.start_time != self.start_time or
pr.end_time != self.end_time):
return False
if bool(pr.encrypted) != bool(self.encrypted):
if self.partial and pr.encrypted:
self.encrypted = pr.encrypted
if pr.manifest:
self.set_manifest(filename)
else:
assert pr.volume_number is not None
assert not self.volume_name_dict.has_key(pr.volume_number), \
(self.volume_name_dict, filename)
self.volume_name_dict[pr.volume_number] = filename
return True
def set_info(self, pr):
"""
Set BackupSet information from ParseResults object
@param pr: parse results
@type pf: ParseResults
"""
assert not self.info_set
self.type = pr.type
self.time = pr.time
self.start_time = pr.start_time
self.end_time = pr.end_time
self.time = pr.time
self.partial = pr.partial
self.encrypted = bool(pr.encrypted)
self.info_set = True
def set_manifest(self, remote_filename):
"""
Add local and remote manifest filenames to backup set
"""
assert not self.remote_manifest_name, (self.remote_manifest_name,
remote_filename)
self.remote_manifest_name = remote_filename
for local_filename in globals.archive_dir.listdir():
pr = file_naming.parse(local_filename)
if (pr and pr.manifest
and pr.type == self.type
and pr.time == self.time
and pr.start_time == self.start_time
and pr.end_time == self.end_time):
self.local_manifest_path = \
globals.archive_dir.append(local_filename)
break
def delete(self):
"""
Remove all files in set, both local and remote
"""
rfn = self.get_filenames()
rfn.reverse()
try:
self.backend.delete(rfn)
except Exception:
log.Debug(_("BackupSet.delete: missing %s") % map(util.ufn, rfn))
pass
for lfn in globals.archive_dir.listdir():
pr = file_naming.parse(lfn)
if (pr
and pr.time == self.time
and pr.start_time == self.start_time
and pr.end_time == self.end_time):
try:
globals.archive_dir.append(lfn).delete()
except Exception:
log.Debug(_("BackupSet.delete: missing %s") % map(util.ufn, lfn))
pass
util.release_lockfile()
def __unicode__(self):
"""
For now just list files in set
"""
filelist = []
if self.remote_manifest_name:
filelist.append(self.remote_manifest_name)
filelist.extend(self.volume_name_dict.values())
return u"[%s]" % u", ".join(map(util.ufn, filelist))
def get_timestr(self):
"""
Return time string suitable for log statements
"""
return dup_time.timetopretty(self.time or self.end_time)
def check_manifests(self):
"""
Make sure remote manifest is equal to local one
"""
if not self.remote_manifest_name and not self.local_manifest_path:
log.FatalError(_("Fatal Error: No manifests found for most recent backup"),
log.ErrorCode.no_manifests)
assert self.remote_manifest_name, "if only one, should be remote"
remote_manifest = self.get_remote_manifest()
if self.local_manifest_path:
local_manifest = self.get_local_manifest()
if remote_manifest and self.local_manifest_path and local_manifest:
if remote_manifest != local_manifest:
log.FatalError(_("Fatal Error: Remote manifest does not match "
"local one. Either the remote backup set or "
"the local archive directory has been corrupted."),
log.ErrorCode.mismatched_manifests)
if not remote_manifest:
if self.local_manifest_path:
remote_manifest = local_manifest
else:
log.FatalError(_("Fatal Error: Neither remote nor local "
"manifest is readable."),
log.ErrorCode.unreadable_manifests)
remote_manifest.check_dirinfo()
def get_local_manifest(self):
"""
Return manifest object by reading local manifest file
"""
assert self.local_manifest_path
manifest_buffer = self.local_manifest_path.get_data()
return manifest.Manifest().from_string(manifest_buffer)
def get_remote_manifest(self):
"""
Return manifest by reading remote manifest on backend
"""
assert self.remote_manifest_name
# Following by MDR. Should catch if remote encrypted with
# public key w/o secret key
try:
manifest_buffer = self.backend.get_data(self.remote_manifest_name)
except GPGError, message:
#TODO: We check for gpg v1 and v2 messages, should be an error code.
if ("secret key not available" in message.args[0] or
"No secret key" in message.args[0]):
return None
else:
raise
return manifest.Manifest().from_string(manifest_buffer)
def get_manifest(self):
"""
Return manifest object, showing preference for local copy
"""
if self.local_manifest_path:
return self.get_local_manifest()
else:
return self.get_remote_manifest()
def get_filenames(self):
"""
Return sorted list of (remote) filenames of files in set
"""
assert self.info_set
volume_num_list = self.volume_name_dict.keys()
volume_num_list.sort()
volume_filenames = map(lambda x: self.volume_name_dict[x],
volume_num_list)
if self.remote_manifest_name:
# For convenience of implementation for restart support, we treat
# local partial manifests as this set's remote manifest. But
# when specifically asked for a list of remote filenames, we
# should not include it.
pr = file_naming.parse(self.remote_manifest_name)
if not pr or not pr.partial:
volume_filenames.append(self.remote_manifest_name)
return volume_filenames
def get_time(self):
"""
Return time if full backup, or end_time if incremental
"""
if self.time:
return self.time
if self.end_time:
return self.end_time
assert 0, "Neither self.time nor self.end_time set"
def __len__(self):
"""
Return the number of volumes in the set
"""
return len(self.volume_name_dict.keys())
class BackupChain:
"""
BackupChain - a number of linked BackupSets
A BackupChain always starts with a full backup set and continues
with incremental ones.
"""
def __init__(self, backend):
"""
Initialize new chain, only backend is required at first
"""
self.backend = backend
self.fullset = None
self.incset_list = [] # sorted list of BackupSets
self.start_time, self.end_time = None, None
def set_full(self, fullset):
"""
Add full backup set
"""
assert not self.fullset and isinstance(fullset, BackupSet)
self.fullset = fullset
assert fullset.time
self.start_time, self.end_time = fullset.time, fullset.time
def add_inc(self, incset):
"""
Add incset to self. Return False if incset does not match
"""
if self.end_time == incset.start_time:
self.incset_list.append(incset)
else:
if (self.incset_list
and incset.start_time == self.incset_list[-1].start_time
and incset.end_time > self.incset_list[-1]):
log.Info(_("Preferring Backupset over previous one!"))
self.incset_list[-1] = incset
else:
log.Info(_("Ignoring incremental Backupset (start_time: %s; needed: %s)") %
(dup_time.timetopretty(incset.start_time),
dup_time.timetopretty(self.end_time)))
return False
self.end_time = incset.end_time
log.Info(_("Added incremental Backupset (start_time: %s / end_time: %s)") %
(dup_time.timetopretty(incset.start_time),
dup_time.timetopretty(incset.end_time)))
assert self.end_time
return True
def delete(self, keep_full=False):
"""
Delete all sets in chain, in reverse order
"""
for i in range(len(self.incset_list)-1, -1, -1):
self.incset_list[i].delete()
if self.fullset and not keep_full:
self.fullset.delete()
def get_sets_at_time(self, time):
"""
Return a list of sets in chain earlier or equal to time
"""
older_incsets = filter(lambda s: s.end_time <= time, self.incset_list)
return [self.fullset] + older_incsets
def get_last(self):
"""
Return last BackupSet in chain
"""
if self.incset_list:
return self.incset_list[-1]
else:
return self.fullset
def get_first(self):
"""
Return first BackupSet in chain (ie the full backup)
"""
return self.fullset
def short_desc(self):
"""
Return a short one-line description of the chain,
suitable for log messages.
"""
return "[%s]-[%s]" % (dup_time.timetopretty(self.start_time),
dup_time.timetopretty(self.end_time))
def to_log_info(self, prefix=''):
"""
Return summary, suitable for printing to log
"""
l = []
for s in self.get_all_sets():
if s.time:
type = "full"
time = s.time
else:
type = "inc"
time = s.end_time
if s.encrypted:
enc = "enc"
else:
enc = "noenc"
l.append("%s%s %s %d %s" % (prefix, type, dup_time.timetostring(time), (len(s)), enc))
return l
def __str__(self):
"""
Return string representation, for testing purposes
"""
set_schema = "%20s %30s %15s"
l = ["-------------------------",
_("Chain start time: ") + dup_time.timetopretty(self.start_time),
_("Chain end time: ") + dup_time.timetopretty(self.end_time),
_("Number of contained backup sets: %d") %
(len(self.incset_list)+1,),
_("Total number of contained volumes: %d") %
(self.get_num_volumes(),),
set_schema % (_("Type of backup set:"), _("Time:"), _("Num volumes:"))]
for s in self.get_all_sets():
if s.time:
type = _("Full")
time = s.time
else:
type = _("Incremental")
time = s.end_time
l.append(set_schema % (type, dup_time.timetopretty(time), len(s)))
l.append("-------------------------")
return "\n".join(l)
def get_num_volumes(self):
"""
Return the total number of volumes in the chain
"""
n = 0
for s in self.get_all_sets():
n += len(s)
return n
def get_all_sets(self):
"""
Return list of all backup sets in chain
"""
if self.fullset:
return [self.fullset] + self.incset_list
else:
return self.incset_list
class SignatureChain:
"""
A number of linked SignatureSets
Analog to BackupChain - start with a full-sig, and continue with
new-sigs.
"""
def __init__(self, local, location):
"""
Return new SignatureChain.
local should be true iff the signature chain resides in
globals.archive_dir and false if the chain is in
globals.backend.
@param local: True if sig chain in globals.archive_dir
@type local: Boolean
@param location: Where the sig chain is located
@type location: globals.archive_dir or globals.backend
"""
if local:
self.archive_dir, self.backend = location, None
else:
self.archive_dir, self.backend = None, location
self.fullsig = None # filename of full signature
self.inclist = [] # list of filenames of incremental signatures
self.start_time, self.end_time = None, None
def __str__(self):
"""
Local or Remote and List of files in the set
"""
if self.archive_dir:
place = _("local")
else:
place = _("remote")
filelist = []
if self.fullsig:
filelist.append(self.fullsig)
filelist.extend(self.inclist)
return "%s: [%s]" % (place, ", ".join(filelist))
def check_times(self, time_list):
"""
Check to make sure times are in whole seconds
"""
for time in time_list:
if type(time) not in (types.LongType, types.IntType):
assert 0, "Time %s in %s wrong type" % (time, time_list)
def islocal(self):
"""
Return true if represents a signature chain in archive_dir
"""
if self.archive_dir:
return True
else:
return False
def add_filename(self, filename, pr = None):
"""
Add new sig filename to current chain. Return true if fits
"""
if not pr:
pr = file_naming.parse(filename)
if not pr:
return None
if self.fullsig:
if pr.type != "new-sig":
return None
if pr.start_time != self.end_time:
return None
self.inclist.append(filename)
self.check_times([pr.end_time])
self.end_time = pr.end_time
return 1
else:
if pr.type != "full-sig":
return None
self.fullsig = filename
self.check_times([pr.time, pr.time])
self.start_time, self.end_time = pr.time, pr.time
return 1
def get_fileobjs(self, time = None):
"""
Return ordered list of signature fileobjs opened for reading,
optionally at a certain time
"""
assert self.fullsig
if self.archive_dir: # local
def filename_to_fileobj(filename):
"""Open filename in archive_dir, return filtered fileobj"""
sig_dp = path.DupPath(self.archive_dir.name, (filename,))
return sig_dp.filtered_open("rb")
else:
filename_to_fileobj = self.backend.get_fileobj_read
return map(filename_to_fileobj, self.get_filenames(time))
def delete(self, keep_full=False):
"""
Remove all files in signature set
"""
# Try to delete in opposite order, so something useful even if aborted
if self.archive_dir:
for i in range(len(self.inclist)-1, -1, -1):
self.archive_dir.append(self.inclist[i]).delete()
if not keep_full:
self.archive_dir.append(self.fullsig).delete()
else:
assert self.backend
inclist_copy = self.inclist[:]
inclist_copy.reverse()
if not keep_full:
inclist_copy.append(self.fullsig)
self.backend.delete(inclist_copy)
def get_filenames(self, time = None):
"""
Return ordered list of filenames in set, up to a provided time
"""
if self.fullsig:
l = [self.fullsig]
else:
l = []
inclist = self.inclist
if time:
inclist = filter(lambda n: file_naming.parse(n).end_time <= time,
inclist)
l.extend(inclist)
return l
class CollectionsStatus:
"""
Hold information about available chains and sets
"""
def __init__(self, backend, archive_dir):
"""
Make new object. Does not set values
"""
self.backend = backend
self.archive_dir = archive_dir
# Will hold (signature chain, backup chain) pair of active
# (most recent) chains
self.matched_chain_pair = None
# These should be sorted by end_time
self.all_backup_chains = None
self.other_backup_chains = None
self.all_sig_chains = None
# Other misc paths and sets which shouldn't be there
self.local_orphaned_sig_names = []
self.remote_orphaned_sig_names = []
self.orphaned_backup_sets = None
self.incomplete_backup_sets = None
# True if set_values() below has run
self.values_set = None
def to_log_info(self):
"""
Return summary of the collection, suitable for printing to log
"""
l = ["backend %s" % (self.backend.__class__.__name__,),
"archive-dir %s" % (self.archive_dir,)]
for i in range(len(self.other_backup_chains)):
# A bit of a misnomer. Chain might have a sig.
l.append("chain-no-sig %d" % (i,))
l += self.other_backup_chains[i].to_log_info(' ')
if self.matched_chain_pair:
l.append("chain-complete")
l += self.matched_chain_pair[1].to_log_info(' ')
l.append("orphaned-sets-num %d" % (len(self.orphaned_backup_sets),))
l.append("incomplete-sets-num %d" % (len(self.incomplete_backup_sets),))
return l
def __unicode__(self):
"""
Return string summary of the collection
"""
l = [_("Collection Status"),
u"-----------------",
_("Connecting with backend: %s") %
(self.backend.__class__.__name__,),
_("Archive dir: %s") % (util.ufn(self.archive_dir.name),)]
l.append("\n" +
ngettext("Found %d secondary backup chain.",
"Found %d secondary backup chains.",
len(self.other_backup_chains))
% len(self.other_backup_chains))
for i in range(len(self.other_backup_chains)):
l.append(_("Secondary chain %d of %d:") %
(i+1, len(self.other_backup_chains)))
l.append(unicode(self.other_backup_chains[i]))
l.append("")
if self.matched_chain_pair:
l.append("\n" + _("Found primary backup chain with matching "
"signature chain:"))
l.append(unicode(self.matched_chain_pair[1]))
else:
l.append(_("No backup chains with active signatures found"))
if self.orphaned_backup_sets or self.incomplete_backup_sets:
l.append(ngettext("Also found %d backup set not part of any chain,",
"Also found %d backup sets not part of any chain,",
len(self.orphaned_backup_sets))
% (len(self.orphaned_backup_sets),))
l.append(ngettext("and %d incomplete backup set.",
"and %d incomplete backup sets.",
len(self.incomplete_backup_sets))
% (len(self.incomplete_backup_sets),))
# TRANSL: "cleanup" is a hard-coded command, so do not translate it
l.append(_('These may be deleted by running duplicity with the '
'"cleanup" command.'))
else:
l.append(_("No orphaned or incomplete backup sets found."))
return u"\n".join(l)
def set_values(self, sig_chain_warning = 1):
"""
Set values from archive_dir and backend.
Returns self for convenience. If sig_chain_warning is set to None,
do not warn about unnecessary sig chains. This is because there may
naturally be some unecessary ones after a full backup.
"""
self.values_set = 1
# get remote filename list
backend_filename_list = self.backend.list()
log.Debug(ngettext("%d file exists on backend",
"%d files exist on backend",
len(backend_filename_list)) %
len(backend_filename_list))
# get local filename list
local_filename_list = self.archive_dir.listdir()
log.Debug(ngettext("%d file exists in cache",
"%d files exist in cache",
len(local_filename_list)) %
len(local_filename_list))
# check for partial backups
partials = []
for local_filename in local_filename_list:
pr = file_naming.parse(local_filename)
if pr and pr.partial:
partials.append(local_filename)
# get various backup sets and chains
(backup_chains, self.orphaned_backup_sets,
self.incomplete_backup_sets) = \
self.get_backup_chains(partials + backend_filename_list)
backup_chains = self.get_sorted_chains(backup_chains)
self.all_backup_chains = backup_chains
assert len(backup_chains) == len(self.all_backup_chains), "get_sorted_chains() did something more than re-ordering"
local_sig_chains, self.local_orphaned_sig_names = \
self.get_signature_chains(True)
remote_sig_chains, self.remote_orphaned_sig_names = \
self.get_signature_chains(False, filelist = backend_filename_list)
self.set_matched_chain_pair(local_sig_chains + remote_sig_chains,
backup_chains)
self.warn(sig_chain_warning)
return self
def set_matched_chain_pair(self, sig_chains, backup_chains):
"""
Set self.matched_chain_pair and self.other_sig/backup_chains
The latest matched_chain_pair will be set. If there are both
remote and local signature chains capable of matching the
latest backup chain, use the local sig chain (it does not need
to be downloaded).
"""
sig_chains = sig_chains and self.get_sorted_chains(sig_chains)
self.all_sig_chains = sig_chains
self.other_backup_chains = backup_chains[:]
self.matched_chain_pair = None
if sig_chains and backup_chains:
latest_backup_chain = backup_chains[-1]
for i in range(len(sig_chains)-1, -1, -1):
if sig_chains[i].end_time == latest_backup_chain.end_time:
pass
# See if the set before last matches:
elif (len(latest_backup_chain.get_all_sets()) >= 2 and
sig_chains[i].end_time == latest_backup_chain.get_all_sets()[-2].end_time):
# It matches, remove the last backup set:
log.Warn(_("Warning, discarding last backup set, because "
"of missing signature file."))
self.incomplete_backup_sets.append(latest_backup_chain.incset_list[-1])
latest_backup_chain.incset_list = latest_backup_chain.incset_list[:-1]
else:
continue
# Found a matching pair:
if self.matched_chain_pair == None:
self.matched_chain_pair = (sig_chains[i], latest_backup_chain)
break
if self.matched_chain_pair:
self.other_backup_chains.remove(self.matched_chain_pair[1])
def warn(self, sig_chain_warning):
"""
Log various error messages if find incomplete/orphaned files
"""
assert self.values_set
if self.local_orphaned_sig_names:
log.Warn(ngettext("Warning, found the following local orphaned "
"signature file:",
"Warning, found the following local orphaned "
"signature files:",
len(self.local_orphaned_sig_names))
+ u"\n" + u"\n".join(map(util.ufn, self.local_orphaned_sig_names)),
log.WarningCode.orphaned_sig)
if self.remote_orphaned_sig_names:
log.Warn(ngettext("Warning, found the following remote orphaned "
"signature file:",
"Warning, found the following remote orphaned "
"signature files:",
len(self.remote_orphaned_sig_names))
+ u"\n" + u"\n".join(map(util.ufn, self.remote_orphaned_sig_names)),
log.WarningCode.orphaned_sig)
if self.all_sig_chains and sig_chain_warning and not self.matched_chain_pair:
log.Warn(_("Warning, found signatures but no corresponding "
"backup files"), log.WarningCode.unmatched_sig)
if self.incomplete_backup_sets:
log.Warn(_("Warning, found incomplete backup sets, probably left "
"from aborted session"), log.WarningCode.incomplete_backup)
if self.orphaned_backup_sets:
log.Warn(ngettext("Warning, found the following orphaned "
"backup file:",
"Warning, found the following orphaned "
"backup files:",
len(self.orphaned_backup_sets))
+ u"\n" + u"\n".join(map(unicode, self.orphaned_backup_sets)),
log.WarningCode.orphaned_backup)
def get_backup_chains(self, filename_list):
"""
Split given filename_list into chains
Return value will be tuple (list of chains, list of sets, list
of incomplete sets), where the list of sets will comprise sets
not fitting into any chain, and the incomplete sets are sets
missing files.
"""
log.Debug(_("Extracting backup chains from list of files: %s")
% map(util.ufn, filename_list))
# First put filenames in set form
sets = []
def add_to_sets(filename):
"""
Try adding filename to existing sets, or make new one
"""
for set in sets:
if set.add_filename(filename):
log.Debug(_("File %s is part of known set") % (util.ufn(filename),))
break
else:
log.Debug(_("File %s is not part of a known set; creating new set") % (util.ufn(filename),))
new_set = BackupSet(self.backend)
if new_set.add_filename(filename):
sets.append(new_set)
else:
log.Debug(_("Ignoring file (rejected by backup set) '%s'") % util.ufn(filename))
map(add_to_sets, filename_list)
sets, incomplete_sets = self.get_sorted_sets(sets)
chains, orphaned_sets = [], []
def add_to_chains(set):
"""
Try adding set to existing chains, or make new one
"""
if set.type == "full":
new_chain = BackupChain(self.backend)
new_chain.set_full(set)
chains.append(new_chain)
log.Debug(_("Found backup chain %s") % (new_chain.short_desc()))
else:
assert set.type == "inc"
for chain in chains:
if chain.add_inc(set):
log.Debug(_("Added set %s to pre-existing chain %s") % (set.get_timestr(),
chain.short_desc()))
break
else:
log.Debug(_("Found orphaned set %s") % (set.get_timestr(),))
orphaned_sets.append(set)
map(add_to_chains, sets)
return (chains, orphaned_sets, incomplete_sets)
def get_sorted_sets(self, set_list):
"""
Sort set list by end time, return (sorted list, incomplete)
"""
time_set_pairs, incomplete_sets = [], []
for set in set_list:
if not set.is_complete():
incomplete_sets.append(set)
elif set.type == "full":
time_set_pairs.append((set.time, set))
else:
time_set_pairs.append((set.end_time, set))
time_set_pairs.sort()
return (map(lambda p: p[1], time_set_pairs), incomplete_sets)
def get_signature_chains(self, local, filelist = None):
"""
Find chains in archive_dir (if local is true) or backend
Use filelist if given, otherwise regenerate. Return value is
pair (list of chains, list of signature paths not in any
chains).
"""
def get_filelist():
if filelist is not None:
return filelist
elif local:
return self.archive_dir.listdir()
else:
return self.backend.list()
def get_new_sigchain():
"""
Return new empty signature chain
"""
if local:
return SignatureChain(True, self.archive_dir)
else:
return SignatureChain(False, self.backend)
# Build initial chains from full sig filenames
chains, new_sig_filenames = [], []
for filename in get_filelist():
pr = file_naming.parse(filename)
if pr:
if pr.type == "full-sig":
new_chain = get_new_sigchain()
assert new_chain.add_filename(filename, pr)
chains.append(new_chain)
elif pr.type == "new-sig":
new_sig_filenames.append(filename)
# compare by file time
def by_start_time(a, b):
return int(file_naming.parse(a).start_time) - int(file_naming.parse(b).start_time)
# Try adding new signatures to existing chains
orphaned_filenames = []
new_sig_filenames.sort(by_start_time)
for sig_filename in new_sig_filenames:
for chain in chains:
if chain.add_filename(sig_filename):
break
else:
orphaned_filenames.append(sig_filename)
return (chains, orphaned_filenames)
def get_sorted_chains(self, chain_list):
"""
Return chains sorted by end_time. If tie, local goes last
"""
# Build dictionary from end_times to lists of corresponding chains
endtime_chain_dict = {}
for chain in chain_list:
if endtime_chain_dict.has_key(chain.end_time):
endtime_chain_dict[chain.end_time].append(chain)
else:
endtime_chain_dict[chain.end_time] = [chain]
# Use dictionary to build final sorted list
sorted_end_times = endtime_chain_dict.keys()
sorted_end_times.sort()
sorted_chain_list = []
for end_time in sorted_end_times:
chain_list = endtime_chain_dict[end_time]
if len(chain_list) == 1:
sorted_chain_list.append(chain_list[0])
else:
assert len(chain_list) == 2
if chain_list[0].backend: # is remote, goes first
sorted_chain_list.append(chain_list[0])
sorted_chain_list.append(chain_list[1])
else: # is local, goes second
sorted_chain_list.append(chain_list[1])
sorted_chain_list.append(chain_list[0])
return sorted_chain_list
def get_backup_chain_at_time(self, time):
"""
Return backup chain covering specified time
Tries to find the backup chain covering the given time. If
there is none, return the earliest chain before, and failing
that, the earliest chain.
"""
if not self.all_backup_chains:
raise CollectionsError("No backup chains found")
covering_chains = filter(lambda c: c.start_time <= time <= c.end_time,
self.all_backup_chains)
if len(covering_chains) > 1:
raise CollectionsError("Two chains cover the given time")
elif len(covering_chains) == 1:
return covering_chains[0]
old_chains = filter(lambda c: c.end_time < time,
self.all_backup_chains)
if old_chains:
return old_chains[-1]
else:
return self.all_backup_chains[0] # no chains are old enough
def get_signature_chain_at_time(self, time):
"""
Return signature chain covering specified time
Tries to find the signature chain covering the given time. If
there is none, return the earliest chain before, and failing
that, the earliest chain.
"""
if not self.all_sig_chains:
raise CollectionsError("No signature chains found")
covering_chains = filter(lambda c: c.start_time <= time <= c.end_time,
self.all_sig_chains)
if covering_chains:
return covering_chains[-1] # prefer local if multiple sig chains
old_chains = filter(lambda c: c.end_time < time,
self.all_sig_chains)
if old_chains:
return old_chains[-1]
else:
# no chains are old enough, give oldest and warn user
oldest = self.all_sig_chains[0]
if time < oldest.start_time:
log.Warn(_("No signature chain for the requested time. Using oldest available chain, starting at time %s.") % dup_time.timetopretty(oldest.start_time), log.WarningCode.no_sig_for_time, dup_time.timetostring(oldest.start_time))
return oldest
def get_extraneous(self, extra_clean):
"""
Return list of the names of extraneous duplicity files
A duplicity file is considered extraneous if it is
recognizable as a duplicity file, but isn't part of some
complete backup set, or current signature chain.
"""
assert self.values_set
local_filenames = []
remote_filenames = []
ext_containers = self.orphaned_backup_sets + self.incomplete_backup_sets
if extra_clean:
old_sig_chains = self.all_sig_chains[:]
if self.matched_chain_pair:
matched_sig_chain = self.matched_chain_pair[0]
for sig_chain in self.all_sig_chains:
if (sig_chain.start_time == matched_sig_chain.start_time and
sig_chain.end_time == matched_sig_chain.end_time):
old_sig_chains.remove(sig_chain)
ext_containers += old_sig_chains
for set_or_chain in ext_containers:
if set_or_chain.backend:
remote_filenames.extend(set_or_chain.get_filenames())
else:
local_filenames.extend(set_or_chain.get_filenames())
local_filenames += self.local_orphaned_sig_names
remote_filenames += self.remote_orphaned_sig_names
return local_filenames, remote_filenames
def sort_sets(self, setlist):
"""Return new list containing same elems of setlist, sorted by time"""
pairs = map(lambda s: (s.get_time(), s), setlist)
pairs.sort()
return map(lambda p: p[1], pairs)
def get_chains_older_than(self, t):
"""
Returns a list of backup chains older than the given time t
All of the times will be associated with an intact chain.
Furthermore, none of the times will be of a chain which a newer
set may depend on. For instance, if set A is a full set older
than t, and set B is an incremental based on A which is newer
than t, then the time of set A will not be returned.
"""
assert self.values_set
old_chains = []
for chain in self.all_backup_chains:
if chain.end_time < t and (
not self.matched_chain_pair or
chain is not self.matched_chain_pair[1]):
# don't delete the active (matched) chain
old_chains.append(chain)
return old_chains
def get_signature_chains_older_than(self, t):
"""
Returns a list of signature chains older than the given time t
All of the times will be associated with an intact chain.
Furthermore, none of the times will be of a chain which a newer
set may depend on. For instance, if set A is a full set older
than t, and set B is an incremental based on A which is newer
than t, then the time of set A will not be returned.
"""
assert self.values_set
old_chains = []
for chain in self.all_sig_chains:
if chain.end_time < t and (
not self.matched_chain_pair or
chain is not self.matched_chain_pair[0]):
# don't delete the active (matched) chain
old_chains.append(chain)
return old_chains
def get_last_full_backup_time(self):
"""
Return the time of the last full backup,
or 0 if there is none.
"""
return self.get_nth_last_full_backup_time(1)
def get_nth_last_full_backup_time(self, n):
"""
Return the time of the nth to last full backup,
or 0 if there is none.
"""
chain = self.get_nth_last_backup_chain(n)
if chain is None:
return 0
else:
return chain.get_first().time
def get_last_backup_chain(self):
"""
Return the last full backup of the collection,
or None if there is no full backup chain.
"""
return self.get_nth_last_backup_chain(1)
def get_nth_last_backup_chain(self,n):
"""
Return the nth-to-last full backup of the collection,
or None if there is less than n backup chains.
NOTE: n = 1 -> time of latest available chain (n = 0 is not
a valid input). Thus the second-to-last is obtained with n=2
rather than n=1.
"""
def mycmp(x, y):
return cmp(x.get_first().time, y.get_first().time)
assert self.values_set
assert n > 0
if len(self.all_backup_chains) < n:
return None
sorted = self.all_backup_chains[:]
sorted.sort(mycmp)
sorted.reverse()
return sorted[n - 1]
def get_older_than(self, t):
"""
Returns a list of backup sets older than the given time t
All of the times will be associated with an intact chain.
Furthermore, none of the times will be of a set which a newer
set may depend on. For instance, if set A is a full set older
than t, and set B is an incremental based on A which is newer
than t, then the time of set A will not be returned.
"""
old_sets = []
for chain in self.get_chains_older_than(t):
old_sets.extend(chain.get_all_sets())
return self.sort_sets(old_sets)
def get_older_than_required(self, t):
"""
Returns list of old backup sets required by new sets
This function is similar to the previous one, but it only
returns the times of sets which are old but part of the chains
where the newer end of the chain is newer than t.
"""
assert self.values_set
new_chains = filter(lambda c: c.end_time >= t, self.all_backup_chains)
result_sets = []
for chain in new_chains:
old_sets = filter(lambda s: s.get_time() < t, chain.get_all_sets())
result_sets.extend(old_sets)
return self.sort_sets(result_sets)
| gpl-2.0 |
fmfn/UnbalancedDataset | examples/under-sampling/plot_illustration_tomek_links.py | 2 | 3180 | """
==============================================
Illustration of the definition of a Tomek link
==============================================
This example illustrates what is a Tomek link.
"""
# Authors: Guillaume Lemaitre <[email protected]>
# License: MIT
# %%
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# This function allows to make nice plotting
# %%
def make_plot_despine(ax):
sns.despine(ax=ax, offset=10)
ax.set_xlim([0, 3])
ax.set_ylim([0, 3])
ax.set_xlabel(r"$X_1$")
ax.set_ylabel(r"$X_2$")
ax.legend(loc="lower right")
# %% [markdown]
# We will generate some toy data that illustrates how
# :class:`~imblearn.under_sampling.TomekLinks` is used to clean a dataset.
# %%
import numpy as np
rng = np.random.RandomState(18)
X_minority = np.transpose(
[[1.1, 1.3, 1.15, 0.8, 0.55, 2.1], [1.0, 1.5, 1.7, 2.5, 0.55, 1.9]]
)
X_majority = np.transpose(
[
[2.1, 2.12, 2.13, 2.14, 2.2, 2.3, 2.5, 2.45],
[1.5, 2.1, 2.7, 0.9, 1.0, 1.4, 2.4, 2.9],
]
)
# %% [markdown]
# In the figure above, the samples highlighted in green form a Tomek link since
# they are of different classes and are nearest neighbors of each other.
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(
X_minority[:, 0],
X_minority[:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_majority[:, 0],
X_majority[:, 1],
label="Majority class",
s=200,
marker="+",
)
# highlight the samples of interest
ax.scatter(
[X_minority[-1, 0], X_majority[1, 0]],
[X_minority[-1, 1], X_majority[1, 1]],
label="Tomek link",
s=200,
alpha=0.3,
)
make_plot_despine(ax)
fig.suptitle("Illustration of a Tomek link")
fig.tight_layout()
# %% [markdown]
# We can run the :class:`~imblearn.under_sampling.TomekLinks` sampling to
# remove the corresponding samples. If `sampling_strategy='auto'` only the
# sample from the majority class will be removed. If `sampling_strategy='all'`
# both samples will be removed.
# %%
from imblearn.under_sampling import TomekLinks
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
samplers = {
"Removing only majority samples": TomekLinks(sampling_strategy="auto"),
"Removing all samples": TomekLinks(sampling_strategy="all"),
}
for ax, (title, sampler) in zip(axs, samplers.items()):
X_res, y_res = sampler.fit_resample(
np.vstack((X_minority, X_majority)),
np.array([0] * X_minority.shape[0] + [1] * X_majority.shape[0]),
)
ax.scatter(
X_res[y_res == 0][:, 0],
X_res[y_res == 0][:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_res[y_res == 1][:, 0],
X_res[y_res == 1][:, 1],
label="Majority class",
s=200,
marker="+",
)
# highlight the samples of interest
ax.scatter(
[X_minority[-1, 0], X_majority[1, 0]],
[X_minority[-1, 1], X_majority[1, 1]],
label="Tomek link",
s=200,
alpha=0.3,
)
ax.set_title(title)
make_plot_despine(ax)
fig.tight_layout()
plt.show()
| mit |
junh1024/Reaper-Surround | Scripts/mutagen/_util.py | 2 | 14731 | # Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Utility classes for Mutagen.
You should not rely on the interfaces here being stable. They are
intended for internal use in Mutagen only.
"""
import struct
import codecs
from fnmatch import fnmatchcase
from ._compat import chr_, text_type, PY2, iteritems, iterbytes, integer_types
class MutagenError(Exception):
"""Base class for all custom exceptions in mutagen
.. versionadded:: 1.25
"""
def total_ordering(cls):
assert "__eq__" in cls.__dict__
assert "__lt__" in cls.__dict__
cls.__le__ = lambda self, other: self == other or self < other
cls.__gt__ = lambda self, other: not (self == other or self < other)
cls.__ge__ = lambda self, other: not self < other
cls.__ne__ = lambda self, other: not self.__eq__(other)
return cls
def hashable(cls):
"""Makes sure the class is hashable.
Needs a working __eq__ and __hash__ and will add a __ne__.
"""
# py2
assert "__hash__" in cls.__dict__
# py3
assert cls.__dict__["__hash__"] is not None
assert "__eq__" in cls.__dict__
cls.__ne__ = lambda self, other: not self.__eq__(other)
return cls
def enum(cls):
assert cls.__bases__ == (object,)
d = dict(cls.__dict__)
new_type = type(cls.__name__, (int,), d)
new_type.__module__ = cls.__module__
map_ = {}
for key, value in iteritems(d):
if key.upper() == key and isinstance(value, integer_types):
value_instance = new_type(value)
setattr(new_type, key, value_instance)
map_[value] = key
def repr_(self):
if self in map_:
return "%s.%s" % (type(self).__name__, map_[self])
else:
return "%s(%s)" % (type(self).__name__, self)
setattr(new_type, "__repr__", repr_)
return new_type
@total_ordering
class DictMixin(object):
"""Implement the dict API using keys() and __*item__ methods.
Similar to UserDict.DictMixin, this takes a class that defines
__getitem__, __setitem__, __delitem__, and keys(), and turns it
into a full dict-like object.
UserDict.DictMixin is not suitable for this purpose because it's
an old-style class.
This class is not optimized for very large dictionaries; many
functions have linear memory requirements. I recommend you
override some of these functions if speed is required.
"""
def __iter__(self):
return iter(self.keys())
def __has_key(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
if PY2:
has_key = __has_key
__contains__ = __has_key
if PY2:
iterkeys = lambda self: iter(self.keys())
def values(self):
return [self[k] for k in self.keys()]
if PY2:
itervalues = lambda self: iter(self.values())
def items(self):
return list(zip(self.keys(), self.values()))
if PY2:
iteritems = lambda s: iter(s.items())
def clear(self):
for key in list(self.keys()):
self.__delitem__(key)
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop takes at most two arguments")
try:
value = self[key]
except KeyError:
if args:
return args[0]
else:
raise
del(self[key])
return value
def popitem(self):
for key in self.keys():
break
else:
raise KeyError("dictionary is empty")
return key, self.pop(key)
def update(self, other=None, **kwargs):
if other is None:
self.update(kwargs)
other = {}
try:
for key, value in other.items():
self.__setitem__(key, value)
except AttributeError:
for key, value in other:
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.items()))
def __eq__(self, other):
return dict(self.items()) == other
def __lt__(self, other):
return dict(self.items()) < other
__hash__ = object.__hash__
def __len__(self):
return len(self.keys())
class DictProxy(DictMixin):
def __init__(self, *args, **kwargs):
self.__dict = {}
super(DictProxy, self).__init__(*args, **kwargs)
def __getitem__(self, key):
return self.__dict[key]
def __setitem__(self, key, value):
self.__dict[key] = value
def __delitem__(self, key):
del(self.__dict[key])
def keys(self):
return self.__dict.keys()
class cdata(object):
"""C character buffer to Python numeric type conversions."""
from struct import error
error = error
short_le = staticmethod(lambda data: struct.unpack('<h', data)[0])
ushort_le = staticmethod(lambda data: struct.unpack('<H', data)[0])
short_be = staticmethod(lambda data: struct.unpack('>h', data)[0])
ushort_be = staticmethod(lambda data: struct.unpack('>H', data)[0])
int_le = staticmethod(lambda data: struct.unpack('<i', data)[0])
uint_le = staticmethod(lambda data: struct.unpack('<I', data)[0])
int_be = staticmethod(lambda data: struct.unpack('>i', data)[0])
uint_be = staticmethod(lambda data: struct.unpack('>I', data)[0])
longlong_le = staticmethod(lambda data: struct.unpack('<q', data)[0])
ulonglong_le = staticmethod(lambda data: struct.unpack('<Q', data)[0])
longlong_be = staticmethod(lambda data: struct.unpack('>q', data)[0])
ulonglong_be = staticmethod(lambda data: struct.unpack('>Q', data)[0])
to_short_le = staticmethod(lambda data: struct.pack('<h', data))
to_ushort_le = staticmethod(lambda data: struct.pack('<H', data))
to_short_be = staticmethod(lambda data: struct.pack('>h', data))
to_ushort_be = staticmethod(lambda data: struct.pack('>H', data))
to_int_le = staticmethod(lambda data: struct.pack('<i', data))
to_uint_le = staticmethod(lambda data: struct.pack('<I', data))
to_int_be = staticmethod(lambda data: struct.pack('>i', data))
to_uint_be = staticmethod(lambda data: struct.pack('>I', data))
to_longlong_le = staticmethod(lambda data: struct.pack('<q', data))
to_ulonglong_le = staticmethod(lambda data: struct.pack('<Q', data))
to_longlong_be = staticmethod(lambda data: struct.pack('>q', data))
to_ulonglong_be = staticmethod(lambda data: struct.pack('>Q', data))
bitswap = b''.join(chr_(sum(((val >> i) & 1) << (7 - i) for i in range(8)))
for val in range(256))
test_bit = staticmethod(lambda value, n: bool((value >> n) & 1))
def lock(fileobj):
"""Lock a file object 'safely'.
That means a failure to lock because the platform doesn't
support fcntl or filesystem locks is not considered a
failure. This call does block.
Returns whether or not the lock was successful, or
raises an exception in more extreme circumstances (full
lock table, invalid file).
"""
try:
import fcntl
except ImportError:
return False
else:
try:
fcntl.lockf(fileobj, fcntl.LOCK_EX)
except IOError:
# FIXME: There's possibly a lot of complicated
# logic that needs to go here in case the IOError
# is EACCES or EAGAIN.
return False
else:
return True
def unlock(fileobj):
"""Unlock a file object.
Don't call this on a file object unless a call to lock()
returned true.
"""
# If this fails there's a mismatched lock/unlock pair,
# so we definitely don't want to ignore errors.
import fcntl
fcntl.lockf(fileobj, fcntl.LOCK_UN)
def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
"""Insert size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
"""
assert 0 < size
assert 0 <= offset
locked = False
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset
fobj.write(b'\x00' * size)
fobj.flush()
try:
try:
import mmap
file_map = mmap.mmap(fobj.fileno(), filesize + size)
try:
file_map.move(offset + size, offset, movesize)
finally:
file_map.close()
except (ValueError, EnvironmentError, ImportError):
# handle broken mmap scenarios
locked = lock(fobj)
fobj.truncate(filesize)
fobj.seek(0, 2)
padsize = size
# Don't generate an enormous string if we need to pad
# the file out several megs.
while padsize:
addsize = min(BUFFER_SIZE, padsize)
fobj.write(b"\x00" * addsize)
padsize -= addsize
fobj.seek(filesize, 0)
while movesize:
# At the start of this loop, fobj is pointing at the end
# of the data we need to move, which is of movesize length.
thismove = min(BUFFER_SIZE, movesize)
# Seek back however much we're going to read this frame.
fobj.seek(-thismove, 1)
nextpos = fobj.tell()
# Read it, so we're back at the end.
data = fobj.read(thismove)
# Seek back to where we need to write it.
fobj.seek(-thismove + size, 1)
# Write it.
fobj.write(data)
# And seek back to the end of the unmoved data.
fobj.seek(nextpos)
movesize -= thismove
fobj.flush()
finally:
if locked:
unlock(fobj)
def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
"""Delete size bytes of empty space starting at offset.
fobj must be an open file object, open rb+ or
equivalent. Mutagen tries to use mmap to resize the file, but
falls back to a significantly slower method if mmap fails.
"""
locked = False
assert 0 < size
assert 0 <= offset
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = filesize - offset - size
assert 0 <= movesize
try:
if movesize > 0:
fobj.flush()
try:
import mmap
file_map = mmap.mmap(fobj.fileno(), filesize)
try:
file_map.move(offset, offset + size, movesize)
finally:
file_map.close()
except (ValueError, EnvironmentError, ImportError):
# handle broken mmap scenarios
locked = lock(fobj)
fobj.seek(offset + size)
buf = fobj.read(BUFFER_SIZE)
while buf:
fobj.seek(offset)
fobj.write(buf)
offset += len(buf)
fobj.seek(offset + size)
buf = fobj.read(BUFFER_SIZE)
fobj.truncate(filesize - size)
fobj.flush()
finally:
if locked:
unlock(fobj)
def dict_match(d, key, default=None):
"""Like __getitem__ but works as if the keys() are all filename patterns.
Returns the value of any dict key that matches the passed key.
"""
if key in d and "[" not in key:
return d[key]
else:
for pattern, value in iteritems(d):
if fnmatchcase(key, pattern):
return value
return default
def decode_terminated(data, encoding, strict=True):
"""Returns the decoded data until the first NULL terminator
and all data after it.
In case the data can't be decoded raises UnicodeError.
In case the encoding is not found raises LookupError.
In case the data isn't null terminated (even if it is encoded correctly)
raises ValueError except if strict is False, then the decoded string
will be returned anyway.
"""
codec_info = codecs.lookup(encoding)
# normalize encoding name so we can compare by name
encoding = codec_info.name
# fast path
if encoding in ("utf-8", "iso8859-1"):
index = data.find(b"\x00")
if index == -1:
# make sure we raise UnicodeError first, like in the slow path
res = data.decode(encoding), b""
if strict:
raise ValueError("not null terminated")
else:
return res
return data[:index].decode(encoding), data[index + 1:]
# slow path
decoder = codec_info.incrementaldecoder()
r = []
for i, b in enumerate(iterbytes(data)):
c = decoder.decode(b)
if c == u"\x00":
return u"".join(r), data[i + 1:]
r.append(c)
else:
# make sure the decoder is finished
r.append(decoder.decode(b"", True))
if strict:
raise ValueError("not null terminated")
return u"".join(r), b""
def split_escape(string, sep, maxsplit=None, escape_char="\\"):
"""Like unicode/str/bytes.split but allows for the separator to be escaped
If passed unicode/str/bytes will only return list of unicode/str/bytes.
"""
assert len(sep) == 1
assert len(escape_char) == 1
if isinstance(string, bytes):
if isinstance(escape_char, text_type):
escape_char = escape_char.encode("ascii")
iter_ = iterbytes
else:
iter_ = iter
if maxsplit is None:
maxsplit = len(string)
empty = string[:0]
result = []
current = empty
escaped = False
for char in iter_(string):
if escaped:
if char != escape_char and char != sep:
current += escape_char
current += char
escaped = False
else:
if char == escape_char:
escaped = True
elif char == sep and len(result) < maxsplit:
result.append(current)
current = empty
else:
current += char
result.append(current)
return result
| gpl-3.0 |
prutseltje/ansible | lib/ansible/executor/action_write_locks.py | 140 | 1911 | # (c) 2016 - Red Hat, Inc. <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from multiprocessing import Lock
from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
if 'action_write_locks' not in globals():
# Do not initialize this more than once because it seems to bash
# the existing one. multiprocessing must be reloading the module
# when it forks?
action_write_locks = dict()
# Below is a Lock for use when we weren't expecting a named module. It gets used when an action
# plugin invokes a module whose name does not match with the action's name. Slightly less
# efficient as all processes with unexpected module names will wait on this lock
action_write_locks[None] = Lock()
# These plugins are known to be called directly by action plugins with names differing from the
# action plugin name. We precreate them here as an optimization.
# If a list of service managers is created in the future we can do the same for them.
mods = set(p['name'] for p in PKG_MGRS)
mods.update(('copy', 'file', 'setup', 'slurp', 'stat'))
for mod_name in mods:
action_write_locks[mod_name] = Lock()
| gpl-3.0 |
MakMukhi/grpc | src/python/grpcio/grpc/framework/common/__init__.py | 901 | 1528 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
mparus/android_kernel_huawei_msm8916-caf | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
imcomking/Convolutional-GRU-keras-extension- | examples/mnist_mlp.py | 7 | 1724 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
'''
Train a simple deep NN on the MNIST dataset.
Get to 98.30% test accuracy after 20 epochs (there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a GRID K520 GPU.
'''
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(128, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
| mit |
ddayguerrero/blogme | flask/lib/python3.4/site-packages/whoosh/query/__init__.py | 96 | 1843 | # Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.query.qcore import *
from whoosh.query.terms import *
from whoosh.query.compound import *
from whoosh.query.positional import *
from whoosh.query.ranges import *
from whoosh.query.wrappers import *
from whoosh.query.nested import *
from whoosh.query.qcolumns import *
from whoosh.query.spans import *
| mit |
CyanogenMod/android_kernel_motorola_msm8974 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
justathoughtor2/atomicApe | cygwin/lib/python2.7/optparse.py | 47 | 61205 | """A powerful, extensible, and easy-to-use option parser.
By Greg Ward <[email protected]>
Originally distributed as Optik.
For support, use the [email protected] mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import types
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext
except ImportError:
def gettext(message):
return message
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.help_position = self.max_help_position = \
min(max_help_position, max(width - 20, indent_increment * 2))
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError, "subclasses must implement"
def format_heading(self, heading):
raise NotImplementedError, "subclasses must implement"
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = max(self.width - self.current_indent, 11)
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = max(self.width - self.help_position, 11)
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
def _parse_long(val):
return _parse_num(val, long)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_long, _("long integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = filter(None, opts)
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = attrs.keys()
attrs.sort()
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of __builtin__ is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import __builtin__
if ( type(self.type) is types.TypeType or
(hasattr(self.type, "__name__") and
getattr(__builtin__, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not hasattr(self.callback, '__call__'):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
type(self.callback_args) is not types.TupleType):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
type(self.callback_kwargs) is not types.DictType):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
try:
basestring
except NameError:
def isbasestring(x):
return isinstance(x, (types.StringType, types.UnicodeType))
else:
def isbasestring(x):
return isinstance(x, basestring)
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __cmp__(self, other):
if isinstance(other, Values):
return cmp(self.__dict__, other.__dict__)
elif isinstance(other, types.DictType):
return cmp(self.__dict__, other)
else:
return -1
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %r" % mode
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError, "invalid conflict_resolution value %r" % handler
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) in types.StringTypes:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isbasestring(default):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if type(args[0]) is types.StringType:
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError, "not an OptionGroup instance: %r" % group
if group.parser is not self:
raise ValueError, "invalid OptionGroup (wrong parser)"
else:
raise TypeError, "invalid arguments"
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print >>file, self.get_usage()
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print >>file, self.get_version()
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
# used by test suite
def _get_encoding(self, file):
encoding = getattr(file, "encoding", None)
if not encoding:
encoding = sys.getdefaultencoding()
return encoding
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
encoding = self._get_encoding(file)
file.write(self.format_help().encode(encoding, "replace"))
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| gpl-3.0 |
taedla01/MissionPlanner | Lib/encodings/cp866.py | 93 | 35350 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp866',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
u'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
u'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
u'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
u'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
u'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
u'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
u'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u2116' # 0x00fc -> NUMERO SIGN
u'\xa4' # 0x00fd -> CURRENCY SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
0x2116: 0x00fc, # NUMERO SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-3.0 |
aaronr/Quantum-GIS | python/plugins/fTools/tools/doGeoprocessing.py | 1 | 64763 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# fTools
# Copyright (C) 2008-2011 Carson Farmer
# EMAIL: carson.farmer (at) gmail.com
# WEB : http://www.ftools.ca/fTools.html
#
# A collection of data management and analysis tools for vector data
#
# Geoprocessing functions adapted from 'Geoprocessing Plugin',
# (C) 2008 by Dr. Horst Duester, Stefan Ziegler
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from ui_frmGeoprocessing import Ui_Dialog
import ftools_utils
import sys
class GeoprocessingDialog( QDialog, Ui_Dialog ):
def __init__( self, iface, function ):
QDialog.__init__( self )
self.iface = iface
self.setupUi( self )
self.param.setValidator(QDoubleValidator(self.param))
self.myFunction = function
QObject.connect( self.btnBrowse, SIGNAL( "clicked()" ), self.outFile )
QObject.connect( self.inShapeA, SIGNAL( "currentIndexChanged(QString)" ), self.checkA )
QObject.connect( self.inShapeB, SIGNAL( "currentIndexChanged(QString)" ), self.checkB )
if function == 4 or function == 1 or function == 2:
QObject.connect( self.inShapeA, SIGNAL( "currentIndexChanged(QString)" ), self.update )
self.manageGui()
self.success = False
self.cancel_close = self.buttonBox_2.button( QDialogButtonBox.Close )
self.buttonOk = self.buttonBox_2.button( QDialogButtonBox.Ok )
self.progressBar.setValue (0 )
def checkA( self ):
inputLayer = unicode( self.inShapeA.currentText() )
if inputLayer != "":
changedLayer = ftools_utils.getVectorLayerByName( inputLayer )
if changedLayer.selectedFeatureCount() != 0:
self.useSelectedA.setCheckState( Qt.Checked )
else:
self.useSelectedA.setCheckState( Qt.Unchecked )
def checkB( self ):
inputLayer = unicode( self.inShapeB.currentText() )
if inputLayer != "":
changedLayer = ftools_utils.getVectorLayerByName( inputLayer )
if changedLayer.selectedFeatureCount() != 0:
self.useSelectedB.setCheckState( Qt.Checked )
else:
self.useSelectedB.setCheckState( Qt.Unchecked )
def update( self ):
self.attrib.clear()
inputLayer = unicode( self.inShapeA.currentText() )
if inputLayer != "":
changedLayer = ftools_utils.getVectorLayerByName( inputLayer )
changedField = changedLayer.dataProvider().fields()
for i in changedField:
self.attrib.addItem( unicode( changedField[i].name() ) )
if self.myFunction == 4:
self.attrib.addItem( "--- " + self.tr( "Dissolve all" ) + " ---" )
def accept( self ):
if self.inShapeA.currentText() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify an input layer" ) )
elif self.inShapeB.isVisible() and self.inShapeB.currentText() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify a difference/intersect/union layer" ) )
elif self.param.isEnabled() and self.param.isVisible() and self.param.text() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify valid buffer value" ) )
elif self.attrib.isEnabled() and self.attrib.isVisible() and self.attrib.currentText() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify dissolve field" ) )
elif self.outShape.text() == "":
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Please specify output shapefile" ) )
else:
changedLayerA = ftools_utils.getVectorLayerByName( self.inShapeA.currentText() )
changedLayerB = ftools_utils.getVectorLayerByName( self.inShapeB.currentText() )
# check for selection in layer A
if self.useSelectedA.isChecked() and changedLayerA.selectedFeatureCount() == 0:
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "No features selected, please uncheck 'Use selected' or make a selection" ) )
# check for selection in layer B
elif self.inShapeB.isVisible() and self.useSelectedB.isChecked() and changedLayerB.selectedFeatureCount() == 0:
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "No features selected, please uncheck 'Use selected' or make a selection" ) )
else:
self.outShape.clear()
if self.attrib.isEnabled():
self.geoprocessing( self.inShapeA.currentText(), self.inShapeB.currentText(),
unicode( self.attrib.currentText() ), self.mergeOutput.checkState(), self.useSelectedA.checkState(),
self.useSelectedB.checkState(), self.spnSegments.value() )
else:
if self.param.isEnabled() and self.param.isVisible():
parameter = float( self.param.text() )
else:
parameter = None
self.geoprocessing( self.inShapeA.currentText(), self.inShapeB.currentText(),
parameter, self.mergeOutput.checkState(), self.useSelectedA.checkState(), self.useSelectedB.checkState(), self.spnSegments.value() )
def outFile( self ):
self.outShape.clear()
( self.shapefileName, self.encoding ) = ftools_utils.saveDialog( self )
if self.shapefileName is None or self.encoding is None:
return
self.outShape.setText( QString( self.shapefileName ) )
def manageGui( self ):
if self.myFunction == 1: # Buffer
self.label_2.hide()
self.inShapeB.hide()
self.useSelectedB.hide()
self.label_4.hide()
self.setWindowTitle( self.tr( "Buffer(s)" ) )
elif self.myFunction == 2: # Convex hull
self.label_2.hide()
self.inShapeB.hide()
self.useSelectedB.hide()
self.rdoBuffer.setText( self.tr( "Create single minimum convex hull" ) )
self.rdoField.setText( self.tr( "Create convex hulls based on input field" ) )
self.label_4.hide()
self.param.hide()
self.lblSegments.hide()
self.spnSegments.hide()
self.setWindowTitle( self.tr( "Convex hull(s)" ) )
self.mergeOutput.hide()
elif self.myFunction == 4: # Dissolve
self.label_2.hide()
self.inShapeB.hide()
self.useSelectedB.hide()
self.rdoBuffer.hide()
self.attrib.setEnabled( True )
self.param.hide()
self.rdoField.hide()
self.mergeOutput.hide()
self.lblSegments.hide()
self.spnSegments.hide()
self.setWindowTitle( self.tr( "Dissolve" ) )
else:
self.rdoBuffer.hide()
self.param.hide()
self.label_4.hide()
self.rdoField.hide()
self.attrib.hide()
self.mergeOutput.hide()
self.lblSegments.hide()
self.spnSegments.hide()
if self.myFunction == 3: # Difference
self.label_2.setText( self.tr( "Difference layer" ) )
self.setWindowTitle( self.tr( "Difference" ) )
elif self.myFunction == 5: # Intersect
self.label_2.setText( self.tr( "Intersect layer" ) )
self.setWindowTitle( self.tr( "Intersect" ) )
elif self.myFunction == 7: # Symetrical difference
self.label_2.setText( self.tr( "Difference layer" ) )
self.setWindowTitle( self.tr( "Symetrical difference" ) )
elif self.myFunction == 8: # Clip
self.label_2.setText( self.tr( "Clip layer" ) )
self.setWindowTitle( self.tr( "Clip" ) )
else: # Union
self.label_2.setText( self.tr( "Union layer" ) )
self.setWindowTitle( self.tr( "Union" ) )
self.resize(381, 100)
myListA = []
myListB = []
self.inShapeA.clear()
self.inShapeB.clear()
if self.myFunction == 4:
myListA = ftools_utils.getLayerNames( [ QGis.Polygon ] )
myListB = []
else:
myListA = ftools_utils.getLayerNames( [ QGis.Point, QGis.Line, QGis.Polygon ] )
myListB = ftools_utils.getLayerNames( [ QGis.Point, QGis.Line, QGis.Polygon ] )
self.inShapeA.addItems( myListA )
self.inShapeB.addItems( myListB )
return
#1: Buffer
#2: Convex Hull
#3: Difference
#4: Dissolve
#5: Intersection
#6: Union
#7: Symetrical Difference
#8: Clip
def geoprocessing( self, myLayerA, myLayerB, myParam, myMerge, mySelectionA, mySelectionB, mySegments ):
check = QFile( self.shapefileName )
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile( self.shapefileName ):
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Unable to delete existing shapefile." ) )
return
self.buttonOk.setEnabled( False )
self.testThread = geoprocessingThread( self.iface.mainWindow(), self, self.myFunction, myLayerA,
myLayerB, myParam, myMerge, mySelectionA, mySelectionB, mySegments, self.shapefileName, self.encoding )
QObject.connect( self.testThread, SIGNAL( "runFinished(PyQt_PyObject)" ), self.runFinishedFromThread )
QObject.connect( self.testThread, SIGNAL( "runStatus(PyQt_PyObject)" ), self.runStatusFromThread )
QObject.connect( self.testThread, SIGNAL( "runRange(PyQt_PyObject)" ), self.runRangeFromThread )
self.cancel_close.setText( self.tr("Cancel") )
QObject.connect( self.cancel_close, SIGNAL( "clicked()" ), self.cancelThread )
self.testThread.start()
return True
def cancelThread( self ):
self.testThread.stop()
self.buttonOk.setEnabled( True )
def runFinishedFromThread( self, results ):
self.testThread.stop()
self.buttonOk.setEnabled( True )
self.cancel_close.setText( self.tr("Close") )
QObject.disconnect( self.cancel_close, SIGNAL( "clicked()" ), self.cancelThread )
out_text = ""
if results[3] is not None:
QMessageBox.warning( self, self.tr( "Geoprocessing" ),
self.tr( "No output created. File creation error:\n%1" )
.arg( results[3] ) )
return
if (not results[2] is None and not results[2]) or not results[1] or not results [0]:
out_text = self.tr( "\nWarnings:" )
end_text = self.tr( "\nSome output geometries may be missing or invalid.\n\nWould you like to add the new layer anyway?" )
else:
out_text = "\n"
end_text = self.tr( "\n\nWould you like to add the new layer to the TOC?" )
if not results[2] is None:
if not results[2]:
out_text = out_text + self.tr( "\nInput CRS error: Different input coordinate reference systems detected, results may not be as expected.")
else:
out_text = out_text + self.tr( "\nInput CRS error: One or more input layers missing coordinate reference information, results may not be as expected.")
if not results[1]:
out_text = out_text + self.tr( "\nFeature geometry error: One or more output features ignored due to invalid geometry.")
if not results[0]:
out_text = out_text + self.tr( "\nGEOS geoprocessing error: One or more input features have invalid geometry.")
addToTOC = QMessageBox.question( self, self.tr("Geoprocessing"), self.tr( "Created output shapefile:\n%1\n%2%3" ).arg( unicode( self.shapefileName ) ).arg( out_text ).arg( end_text ), QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton )
if addToTOC == QMessageBox.Yes:
if not ftools_utils.addShapeToCanvas( unicode( self.shapefileName ) ):
QMessageBox.warning( self, self.tr("Geoprocessing"), self.tr( "Error loading output shapefile:\n%1" ).arg( unicode( self.shapefileName ) ))
def runStatusFromThread( self, status ):
self.progressBar.setValue( status )
def runRangeFromThread( self, range_vals ):
self.progressBar.setRange( range_vals[ 0 ], range_vals[ 1 ] )
class geoprocessingThread( QThread ):
def __init__( self, parentThread, parentObject, function, myLayerA, myLayerB,
myParam, myMerge, mySelectionA, mySelectionB, mySegments, myName, myEncoding ):
QThread.__init__( self, parentThread )
self.parent = parentObject
self.running = False
self.myFunction = function
self.myLayerA = myLayerA
self.myLayerB = myLayerB
self.myParam = myParam
self.myMerge = myMerge
self.mySelectionA = mySelectionA
self.mySelectionB = mySelectionB
self.mySegments = int( mySegments )
self.myName = myName
self.myEncoding = myEncoding
def run( self ):
self.running = True
self.vlayerA = ftools_utils.getVectorLayerByName( self.myLayerA )
error = None
if self.myFunction == 1 or self.myFunction == 2 or self.myFunction == 4:
( self.myParam, useField ) = self.checkParameter( self.vlayerA, self.myParam )
if not self.myParam is None:
if self.myFunction == 1:
geos, feature, match, error = self.buffering( useField )
elif self.myFunction == 2:
geos, feature, match, error = self.convex_hull( useField )
elif self.myFunction == 4:
geos, feature, match, error = self.dissolve( useField )
else:
self.vlayerB = ftools_utils.getVectorLayerByName( self.myLayerB )
if self.myFunction == 3:
geos, feature, match, error = self.difference()
elif self.myFunction == 5:
geos, feature, match, error = self.intersect()
elif self.myFunction == 6:
geos, feature, match, error = self.union()
elif self.myFunction == 7:
geos, feature, match, error = self.symetrical_difference()
elif self.myFunction == 8:
geos, feature, match, error = self.clip()
self.emit( SIGNAL( "runFinished(PyQt_PyObject)" ), (geos, feature, match, error) )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
def stop(self):
self.running = False
def buffering( self, useField ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrs = vproviderA.attributeIndexes()
vproviderA.select( allAttrs )
fields = vproviderA.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, QGis.WKBPolygon, vproviderA.crs() )
# check if writer was created properly, if not, return with error
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, True, writer.errorMessage()
outFeat = QgsFeature()
inFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
nElement = 0
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# with dissolve
if self.myMerge:
first = True
for inFeat in selectionA:
atMap = inFeat.attributeMap()
if useField:
value = atMap[ self.myParam ].doDouble()[ 0 ]
else:
value = self.myParam
inGeom = QgsGeometry( inFeat.geometry() )
try:
outGeom = inGeom.buffer( float( value ), self.mySegments )
if first:
tempGeom = QgsGeometry( outGeom )
first = False
else:
try:
tempGeom = tempGeom.combine( outGeom )
except:
GEOS_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
try:
outFeat.setGeometry( tempGeom )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
# without dissolve
else:
for inFeat in selectionA:
atMap = inFeat.attributeMap()
if useField:
value = atMap[ self.myParam ].toDouble()[ 0 ]
else:
value = self.myParam
inGeom = QgsGeometry( inFeat.geometry() )
try:
outGeom = inGeom.buffer( float( value ), self.mySegments )
try:
outFeat.setGeometry( outGeom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# with dissolve
if self.myMerge:
first = True
while vproviderA.nextFeature( inFeat ):
atMap = inFeat.attributeMap()
if useField:
value = atMap[ self.myParam ].toDouble()[ 0 ]
else:
value = self.myParam
inGeom = QgsGeometry( inFeat.geometry() )
try:
outGeom = inGeom.buffer( float( value ), self.mySegments )
if first:
tempGeom = QgsGeometry( outGeom )
first = False
else:
try:
tempGeom = tempGeom.combine( outGeom )
except:
GEOS_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
try:
outFeat.setGeometry( tempGeom )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
# without dissolve
else:
vproviderA.rewind()
while vproviderA.nextFeature( inFeat ):
atMap = inFeat.attributeMap()
if useField:
value = atMap[ self.myParam ].toDouble()[ 0 ]
else:
value = self.myParam
inGeom = QgsGeometry( inFeat.geometry() )
try:
outGeom = inGeom.buffer( float( value ), self.mySegments )
try:
outFeat.setGeometry( outGeom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, True, None
def convex_hull(self, useField ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select(allAttrsA)
fields = vproviderA.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, QGis.WKBPolygon, vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, True, writer.errorMessage()
inFeat = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
nElement = 0
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
if useField:
unique = ftools_utils.getUniqueValues( vproviderA, self.myParam )
nFeat = nFeat * len( unique )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for i in unique:
hull = []
first = True
outID = 0
for inFeat in selectionA:
atMap = inFeat.attributeMap()
idVar = atMap[ self.myParam ]
if idVar.toString().trimmed() == i.toString().trimmed():
if first:
outID = idVar
first = False
inGeom = QgsGeometry( inFeat.geometry() )
points = ftools_utils.extractPoints( inGeom )
hull.extend( points )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
if len( hull ) >= 3:
tmpGeom = QgsGeometry( outGeom.fromMultiPoint( hull ) )
try:
outGeom = tmpGeom.convexHull()
outFeat.setGeometry( outGeom )
(area, perim) = self.simpleMeasure( outGeom )
outFeat.addAttribute( 0, QVariant( outID ) )
outFeat.addAttribute( 1, QVariant( area ) )
outFeat.addAttribute( 2, QVariant( perim ) )
writer.addFeature( outFeat )
except:
GEOS_EXCEPT = False
continue
else:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
hull = []
for inFeat in selectionA:
inGeom = QgsGeometry( inFeat.geometry() )
points = ftools_utils.extractPoints( inGeom )
hull.extend( points )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
tmpGeom = QgsGeometry( outGeom.fromMultiPoint( hull ) )
try:
outGeom = tmpGeom.convexHull()
outFeat.setGeometry( outGeom )
writer.addFeature( outFeat )
except:
GEOS_EXCEPT = False
# there is no selection in input layer
else:
rect = self.vlayerA.extent()
nFeat = vproviderA.featureCount()
if useField:
unique = ftools_utils.getUniqueValues( vproviderA, self.myParam )
nFeat = nFeat * len( unique )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for i in unique:
hull = []
first = True
outID = 0
vproviderA.select( allAttrsA )#, rect )
#vproviderA.rewind()
while vproviderA.nextFeature( inFeat ):
atMap = inFeat.attributeMap()
idVar = atMap[ self.myParam ]
if idVar.toString().trimmed() == i.toString().trimmed():
if first:
outID = idVar
first = False
inGeom = QgsGeometry( inFeat.geometry() )
points = ftools_utils.extractPoints( inGeom )
hull.extend( points )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
if len( hull ) >= 3:
tmpGeom = QgsGeometry( outGeom.fromMultiPoint( hull ) )
try:
outGeom = tmpGeom.convexHull()
outFeat.setGeometry( outGeom )
(area, perim) = self.simpleMeasure( outGeom )
outFeat.addAttribute( 0, QVariant( outID ) )
outFeat.addAttribute( 1, QVariant( area ) )
outFeat.addAttribute( 2, QVariant( perim ) )
writer.addFeature( outFeat )
except:
GEOS_EXCEPT = False
continue
else:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
hull = []
#vproviderA.rewind()
vproviderA.select(allAttrsA)
while vproviderA.nextFeature( inFeat ):
inGeom = QgsGeometry( inFeat.geometry() )
points = ftools_utils.extractPoints( inGeom )
hull.extend( points )
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
tmpGeom = QgsGeometry( outGeom.fromMultiPoint( hull ) )
try:
outGeom = tmpGeom.convexHull()
outFeat.setGeometry( outGeom )
writer.addFeature( outFeat )
except:
GEOS_EXCEPT = False
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, True, None
def dissolve( self, useField ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
fields = vproviderA.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, True, writer.errorMessage()
inFeat = QgsFeature()
outFeat = QgsFeature()
vproviderA.rewind()
nElement = 0
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
if not useField:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
first = True
for inFeat in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
if first:
attrs = inFeat.attributeMap()
tmpInGeom = QgsGeometry( inFeat.geometry() )
outFeat.setGeometry( tmpInGeom )
first = False
else:
tmpInGeom = QgsGeometry( inFeat.geometry() )
tmpOutGeom = QgsGeometry( outFeat.geometry() )
try:
tmpOutGeom = QgsGeometry( tmpOutGeom.combine( tmpInGeom ) )
outFeat.setGeometry( tmpOutGeom )
except:
GEOS_EXCEPT = False
continue
outFeat.setAttributeMap( attrs )
writer.addFeature( outFeat )
else:
unique = vproviderA.uniqueValues( int( self.myParam ) )
nFeat = nFeat * len( unique )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for item in unique:
first = True
add = False
vproviderA.select( allAttrsA )
vproviderA.rewind()
for inFeat in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
atMap = inFeat.attributeMap()
tempItem = atMap[ self.myParam ]
if tempItem.toString().trimmed() == item.toString().trimmed():
add = True
if first:
QgsGeometry( inFeat.geometry() )
tmpInGeom = QgsGeometry( inFeat.geometry() )
outFeat.setGeometry( tmpInGeom )
first = False
attrs = inFeat.attributeMap()
else:
tmpInGeom = QgsGeometry( inFeat.geometry() )
tmpOutGeom = QgsGeometry( outFeat.geometry() )
try:
tmpOutGeom = QgsGeometry( tmpOutGeom.combine( tmpInGeom ) )
outFeat.setGeometry( tmpOutGeom )
except:
GEOS_EXCEPT = False
add = False
if add:
outFeat.setAttributeMap( attrs )
writer.addFeature( outFeat )
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
if not useField:
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
first = True
while vproviderA.nextFeature( inFeat ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
if first:
attrs = inFeat.attributeMap()
tmpInGeom = QgsGeometry( inFeat.geometry() )
outFeat.setGeometry( tmpInGeom )
first = False
else:
tmpInGeom = QgsGeometry( inFeat.geometry() )
tmpOutGeom = QgsGeometry( outFeat.geometry() )
try:
tmpOutGeom = QgsGeometry( tmpOutGeom.combine( tmpInGeom ) )
outFeat.setGeometry( tmpOutGeom )
except:
GEOS_EXCEPT = False
continue
outFeat.setAttributeMap( attrs )
writer.addFeature( outFeat )
else:
unique = vproviderA.uniqueValues( int( self.myParam ) )
nFeat = nFeat * len( unique )
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
for item in unique:
first = True
add = True
vproviderA.select( allAttrsA )
vproviderA.rewind()
while vproviderA.nextFeature( inFeat ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
atMap = inFeat.attributeMap()
tempItem = atMap[ self.myParam ]
if tempItem.toString().trimmed() == item.toString().trimmed():
if first:
QgsGeometry( inFeat.geometry() )
tmpInGeom = QgsGeometry( inFeat.geometry() )
outFeat.setGeometry( tmpInGeom )
first = False
attrs = inFeat.attributeMap()
else:
tmpInGeom = QgsGeometry( inFeat.geometry() )
tmpOutGeom = QgsGeometry( outFeat.geometry() )
try:
tmpOutGeom = QgsGeometry( tmpOutGeom.combine( tmpInGeom ) )
outFeat.setGeometry( tmpOutGeom )
except:
GEOS_EXCEPT = False
add = False
if add:
outFeat.setAttributeMap( attrs )
writer.addFeature( outFeat )
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, True, None
def difference( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
fields = vproviderA.fields()
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
index = ftools_utils.createIndex( vproviderB )
nElement = 0
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
add = True
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
# is intersect feature in selection
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
GEOS_EXCEPT = False
add = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
# we have no selection in overlay layer
else:
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
add = True
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
GEOS_EXCEPT = False
add = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0 )
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
vproviderA.rewind()
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
while vproviderA.nextFeature( inFeatA ):
nElement += 1
add = True
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
# now check if id in selection
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
GEOS_EXCEPT = False
add = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
# we have no selection in overlay layer
else:
while vproviderA.nextFeature( inFeatA ):
nElement += 1
add = True
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
GEOS_EXCEPT = False
add = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def intersect( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
fields = ftools_utils.combineVectorFields( self.vlayerA, self.vlayerB )
longNames = ftools_utils.checkFieldNameLength( fields )
if not longNames.isEmpty():
message = QString( 'Following field names are longer than 10 characters:\n%1' ).arg( longNames.join( '\n' ) )
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, message
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
index = ftools_utils.createIndex( vproviderB )
nElement = 0
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMapA = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
atMapB = inFeatB.attributeMap()
int_geom = QgsGeometry( geom.intersection( tmpGeom ) )
if int_geom.wkbType() == 7:
int_com = geom.combine( tmpGeom )
int_sym = geom.symDifference( tmpGeom )
int_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
break
# we don't have selection in overlay layer
else:
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMapA = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
atMapB = inFeatB.attributeMap()
int_geom = QgsGeometry( geom.intersection( tmpGeom ) )
if int_geom.wkbType() == 7:
int_com = geom.combine( tmpGeom )
int_sym = geom.symDifference( tmpGeom )
int_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
writer.addFeature( outFeat )
except:
EATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
break
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
vproviderA.rewind()
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMapA = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
atMapB = inFeatB.attributeMap()
int_geom = QgsGeometry( geom.intersection( tmpGeom ) )
if int_geom.wkbType() == 7:
int_com = geom.combine( tmpGeom )
int_sym = geom.symDifference( tmpGeom )
int_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
break
# we have no selection in overlay layer
else:
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMapA = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
atMapB = inFeatB.attributeMap()
int_geom = QgsGeometry( geom.intersection( tmpGeom ) )
if int_geom.wkbType() == 7:
int_com = geom.combine( tmpGeom )
int_sym = geom.symDifference( tmpGeom )
int_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( int_geom )
print outFeat.isValid()
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
print outFeat.isValid()
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
break
del writer
print crs_match
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def union( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
fields = ftools_utils.combineVectorFields( self.vlayerA, self.vlayerB )
longNames = ftools_utils.checkFieldNameLength( fields )
if not longNames.isEmpty():
message = QString( 'Following field names are longer than 10 characters:\n%1' ).arg( longNames.join( '\n' ) )
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, message
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
indexA = ftools_utils.createIndex( vproviderB )
indexB = ftools_utils.createIndex( vproviderA )
nFeat = vproviderA.featureCount() * vproviderB.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
vproviderA.rewind()
count = 0
while vproviderA.nextFeature( inFeatA ):
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
nElement += 1
found = False
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMapA = inFeatA.attributeMap()
intersects = indexA.intersects( geom.boundingBox() )
if len( intersects ) < 1:
try:
outFeat.setGeometry( geom )
outFeat.setAttributeMap( atMapA )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
# this really shouldn't happen, as we
# haven't edited the input geom at all
# continue
else:
for id in intersects:
count += 1
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
atMapB = inFeatB.attributeMap()
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if geom.intersects( tmpGeom ):
found = True
int_geom = geom.intersection( tmpGeom )
if int_geom is None:
GEOS_EXCEPT = False
# There was a problem creating the intersection
int_geom = QgsGeometry()
else:
int_geom = QgsGeometry(int_geom)
if diff_geom.intersects( tmpGeom ):
diff_geom = diff_geom.difference( tmpGeom )
if diff_geom is None:
# It's possible there was an error here?
diff_geom = QgsGeometry()
else:
diff_geom = QgsGeometry(diff_geom)
if int_geom.wkbType() == 0:
# intersection produced different geomety types
temp_list = int_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
int_geom = QgsGeometry( i )
try:
outFeat.setGeometry( int_geom )
outFeat.setAttributeMap( ftools_utils.combineVectorAttributes( atMapA, atMapB ) )
# print int_geom.wkbType()
writer.addFeature( outFeat )
except Exception, err:
# print str(err)
FEATURE_EXCEPT = False
# else:
# # this only happends if the bounding box
# # intersects, but the geometry doesn't
# try:
# outFeat.setGeometry( geom )
# outFeat.setAttributeMap( atMapA )
# print geom.wkbType()
# writer.addFeature( outFeat )
# except:
## # also shoudn't ever happen
# FEATURE_EXCEPT = False
# pass
except Exception, err:
# print str(err)
GEOS_EXCEPT = False
found = False
if found:
try:
if diff_geom.wkbType() == 0:
temp_list = diff_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
diff_geom = QgsGeometry( i )
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMapA )
# print diff_geom.wkbType()
writer.addFeature( outFeat )
except Exception, err:
# print str(err)
FEATURE_EXCEPT = False
# continue
length = len( vproviderA.fields().values() )
vproviderB.rewind()
while vproviderB.nextFeature( inFeatA ):
add = False
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap().values()
atMap = dict( zip( range( length, length + len( atMap ) ), atMap ) )
intersects = indexB.intersects( geom.boundingBox() )
if len(intersects) < 1:
try:
outFeat.setGeometry( geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except Exception, err:
# print str(err)
FEATURE_EXCEPT = False
else:
for id in intersects:
vproviderA.featureAtId( int( id ), inFeatB , True, allAttrsA )
atMapB = inFeatB.attributeMap()
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
add = True
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except Exception, err:
# print str(err)
add = False
GEOS_EXCEPT = False
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMapB )
writer.addFeature( outFeat )
except Exception, err:
# print str(err)
FEATURE_EXCEPT = False
# continue
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
nElement += 1
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def symetrical_difference( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
fields = ftools_utils.combineVectorFields( self.vlayerA, self.vlayerB )
longNames = ftools_utils.checkFieldNameLength( fields )
if not longNames.isEmpty():
message = QString( 'Following field names are longer than 10 characters:\n%1' ).arg( longNames.join( '\n' ) )
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, message
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
indexA = ftools_utils.createIndex( vproviderB )
indexB = ftools_utils.createIndex( vproviderA )
nFeat = vproviderA.featureCount() * vproviderB.featureCount()
nElement = 0
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
vproviderA.rewind()
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
add = True
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMapA = inFeatA.attributeMap()
intersects = indexA.intersects( geom.boundingBox() )
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
add = False
GEOS_EXCEPT = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMapA )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
length = len( vproviderA.fields().values() )
vproviderB.rewind()
while vproviderB.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
add = True
geom = QgsGeometry( inFeatA.geometry() )
diff_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap().values()
atMap = dict( zip( range( length, length + len( atMap ) ), atMap ) )
intersects = indexB.intersects( geom.boundingBox() )
for id in intersects:
vproviderA.featureAtId( int( id ), inFeatB , True, allAttrsA )
tmpGeom = QgsGeometry( inFeatB.geometry() )
try:
if diff_geom.intersects( tmpGeom ):
diff_geom = QgsGeometry( diff_geom.difference( tmpGeom ) )
except:
add = False
GEOS_EXCEPT = False
break
if add:
try:
outFeat.setGeometry( diff_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEATURE_EXCEPT = False
continue
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def clip( self ):
GEOS_EXCEPT = True
FEATURE_EXCEPT = True
vproviderA = self.vlayerA.dataProvider()
allAttrsA = vproviderA.attributeIndexes()
vproviderA.select( allAttrsA )
vproviderB = self.vlayerB.dataProvider()
allAttrsB = vproviderB.attributeIndexes()
vproviderB.select( allAttrsB )
# check for crs compatibility
crsA = vproviderA.crs()
crsB = vproviderB.crs()
if not crsA.isValid() or not crsB.isValid():
crs_match = None
else:
crs_match = crsA == crsB
fields = vproviderA.fields()
writer = QgsVectorFileWriter( self.myName, self.myEncoding,
fields, vproviderA.geometryType(), vproviderA.crs() )
if writer.hasError():
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, writer.errorMessage()
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
index = ftools_utils.createIndex( vproviderB )
vproviderA.rewind()
nElement = 0
# there is selection in input layer
if self.mySelectionA:
nFeat = self.vlayerA.selectedFeatureCount()
selectionA = self.vlayerA.selectedFeatures()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
int_geom = QgsGeometry( geom )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
found = False
first = True
for id in intersects:
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
if tmpGeom.intersects( geom ):
found = True
if first:
outFeat.setGeometry( QgsGeometry( tmpGeom ) )
first = False
else:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( cur_geom.combine( tmpGeom ) )
outFeat.setGeometry( QgsGeometry( new_geom ) )
except:
GEOS_EXCEPT = False
break
if found:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( geom.intersection( cur_geom ) )
if new_geom.wkbType() == 7:
int_com = QgsGeometry( geom.combine( cur_geom ) )
int_sym = QgsGeometry( geom.symDifference( cur_geom ) )
new_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( new_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEAT_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
# we have no selection in overlay layer
else:
for inFeatA in selectionA:
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
found = False
first = True
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
if tmpGeom.intersects( geom ):
found = True
if first:
outFeat.setGeometry( QgsGeometry( tmpGeom ) )
first = False
else:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( cur_geom.combine( tmpGeom ) )
outFeat.setGeometry( QgsGeometry( new_geom ) )
except:
GEOS_EXCEPT = False
break
if found:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( geom.intersection( cur_geom ) )
if new_geom.wkbType() == 7:
int_com = QgsGeometry( geom.combine( cur_geom ) )
int_sym = QgsGeometry( geom.symDifference( cur_geom ) )
new_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( new_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEAT_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
# there is no selection in input layer
else:
nFeat = vproviderA.featureCount()
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), 0)
self.emit( SIGNAL( "runRange(PyQt_PyObject)" ), ( 0, nFeat ) )
# we have selection in overlay layer
if self.mySelectionB:
selectionB = self.vlayerB.selectedFeaturesIds()
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
found = False
first = True
for id in intersects:
if id in selectionB:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
if tmpGeom.intersects( geom ):
found = True
if first:
outFeat.setGeometry( QgsGeometry( tmpGeom ) )
first = False
else:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( cur_geom.combine( tmpGeom ) )
outFeat.setGeometry( QgsGeometry( new_geom ) )
except:
GEOS_EXCEPT = False
break
if found:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( geom.intersection( cur_geom ) )
if new_geom.wkbType() == 7:
int_com = QgsGeometry( geom.combine( cur_geom ) )
int_sym = QgsGeometry( geom.symDifference( cur_geom ) )
new_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( new_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEAT_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
# we have no selection in overlay layer
else:
while vproviderA.nextFeature( inFeatA ):
nElement += 1
self.emit( SIGNAL( "runStatus(PyQt_PyObject)" ), nElement )
geom = QgsGeometry( inFeatA.geometry() )
atMap = inFeatA.attributeMap()
intersects = index.intersects( geom.boundingBox() )
first = True
found = False
if len( intersects ) > 0:
for id in intersects:
vproviderB.featureAtId( int( id ), inFeatB , True, allAttrsB )
tmpGeom = QgsGeometry( inFeatB.geometry() )
if tmpGeom.intersects( geom ):
found = True
if first:
outFeat.setGeometry( QgsGeometry( tmpGeom ) )
first = False
else:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( cur_geom.combine( tmpGeom ) )
outFeat.setGeometry( QgsGeometry( new_geom ) )
except:
GEOS_EXCEPT = False
break
if found:
try:
cur_geom = QgsGeometry( outFeat.geometry() )
new_geom = QgsGeometry( geom.intersection( cur_geom ) )
if new_geom.wkbType() == 7:
int_com = QgsGeometry( geom.combine( cur_geom ) )
int_sym = QgsGeometry( geom.symDifference( cur_geom ) )
new_geom = QgsGeometry( int_com.difference( int_sym ) )
try:
outFeat.setGeometry( new_geom )
outFeat.setAttributeMap( atMap )
writer.addFeature( outFeat )
except:
FEAT_EXCEPT = False
continue
except:
GEOS_EXCEPT = False
continue
del writer
return GEOS_EXCEPT, FEATURE_EXCEPT, crs_match, None
def checkParameter( self, layer, param ):
if self.myFunction == 1:
if type( param ) == unicode:
check = layer.dataProvider().fieldNameIndex( param )
if check == -1:
return ( None, False )
else:
return ( check, True )
else:
if type( param ) == float or type( param ) == int:
return ( param, False )
else:
return ( None, False )
elif self.myFunction == 2:
if not param is None:
if type( param ) == unicode:
check = layer.dataProvider().fieldNameIndex( param )
if check == -1:
return ( None, False )
else:
return ( check, True )
else:
return ( None, False )
else:
return ( True, False )
elif self.myFunction == 4:
if type( param ) == unicode:
check = layer.dataProvider().fieldNameIndex( param )
if check == -1:
return ( check, False )
else:
return ( check, True )
else:
return ( None, False )
def simpleMeasure( self, inGeom ):
if inGeom.wkbType() == QGis.WKBPoint:
pt = QgsPoint()
pt = inGeom.asPoint()
attr1 = pt.x()
attr2 = pt.y()
else:
measure = QgsDistanceArea()
attr1 = measure.measure(inGeom)
if inGeom.type() == QGis.Polygon:
attr2 = self.perimMeasure( inGeom, measure )
else:
attr2 = attr1
return ( attr1, attr2 )
def perimMeasure( self, inGeom, measure ):
value = 0.00
if inGeom.isMultipart():
poly = inGeom.asMultiPolygon()
for k in poly:
for j in k:
value = value + measure.measureLine( j )
else:
poly = inGeom.asPolygon()
for k in poly:
value = value + measure.measureLine( k )
return value
| gpl-2.0 |
OPM/opm-cmake | python/pybind11/tests/test_iostream.py | 17 | 5268 | from pybind11_tests import iostream as m
import sys
from contextlib import contextmanager
try:
# Python 3
from io import StringIO
except ImportError:
# Python 2
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# Python 3.4
from contextlib import redirect_stdout
except ImportError:
@contextmanager
def redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
try:
# Python 3.5
from contextlib import redirect_stderr
except ImportError:
@contextmanager
def redirect_stderr(target):
original = sys.stderr
sys.stderr = target
yield
sys.stderr = original
def test_captured(capsys):
msg = "I've been redirected to Python, I hope!"
m.captured_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
m.captured_err(msg)
stdout, stderr = capsys.readouterr()
assert stdout == ''
assert stderr == msg
def test_captured_large_string(capsys):
# Make this bigger than the buffer used on the C++ side: 1024 chars
msg = "I've been redirected to Python, I hope!"
msg = msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
def test_guard_capture(capsys):
msg = "I've been redirected to Python, I hope!"
m.guard_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ''
def test_series_captured(capture):
with capture:
m.captured_output("a")
m.captured_output("b")
assert capture == "ab"
def test_flush(capfd):
msg = "(not flushed)"
msg2 = "(flushed)"
with m.ostream_redirect():
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == ''
m.noisy_function(msg2, flush=True)
stdout, stderr = capfd.readouterr()
assert stdout == msg + msg2
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == msg
def test_not_captured(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ''
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stdout(stream):
m.captured_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
def test_err(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stderr(stream):
m.raw_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == msg
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stderr(stream):
m.captured_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
def test_multi_captured(capfd):
stream = StringIO()
with redirect_stdout(stream):
m.captured_output("a")
m.raw_output("b")
m.captured_output("c")
m.raw_output("d")
stdout, stderr = capfd.readouterr()
assert stdout == 'bd'
assert stream.getvalue() == 'ac'
def test_dual(capsys):
m.captured_dual("a", "b")
stdout, stderr = capsys.readouterr()
assert stdout == "a"
assert stderr == "b"
def test_redirect(capfd):
msg = "Should not be in log!"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ''
stream = StringIO()
with redirect_stdout(stream):
with m.ostream_redirect():
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stream.getvalue() == msg
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ''
def test_redirect_err(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
with redirect_stderr(stream):
with m.ostream_redirect(stdout=False):
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ''
assert stream.getvalue() == msg2
def test_redirect_both(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
stream2 = StringIO()
with redirect_stdout(stream):
with redirect_stderr(stream2):
with m.ostream_redirect():
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == ''
assert stderr == ''
assert stream.getvalue() == msg
assert stream2.getvalue() == msg2
| gpl-3.0 |
mallconnectionorg/openerp | rrhh/currency_rate_update/__openerp__.py | 3 | 3672 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 Camtocamp SA
# @author JB Aubort, Nicolas Bessi, Joel Grand-Guillaume
# European Central Bank and Polish National Bank invented by Grzegorz Grzelak
# Ported to OpenERP 7.0 by Lorenzo Battistini <[email protected]>
# Banxico implemented by Agustin Cruz openpyme.mx
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Currency Rate Update",
"version" : "0.7",
"author" : "Camptocamp",
"website" : "http://camptocamp.com",
"category" : "Financial Management/Configuration",
"description": """Import exchange rates from the Internet.
The module is able to use 4 different sources:
1. Admin.ch
Updated daily, source in CHF.
2. European Central Bank (ported by Grzegorz Grzelak)
The reference rates are based on the regular daily concertation procedure between
central banks within and outside the European System of Central Banks,
which normally takes place at 2.15 p.m. (14:15) ECB time. Source in EUR.
http://www.ecb.europa.eu/stats/exchange/eurofxref/html/index.en.html
3. Yahoo Finance
Updated daily
4. Polish National Bank (Narodowy Bank Polski) (contribution by Grzegorz Grzelak)
Takes official rates from www.nbp.pl. Adds rate table symbol in log.
You should check when rates should apply to bookkeeping. If next day you should
change the update hour in schedule settings because in OpenERP they apply from
date of update (date - no hours).
5. Banxico for USD & MXN (created by Agustín Cruz)
Updated daily
In the roadmap : Google Finance.
Updated daily from Citibank N.A., source in EUR. Information may be delayed.
This is parsed from an HTML page, so it may be broken at anytime.
The update can be set under the company form.
You can set for each services which currency you want to update.
The logs of the update are visible under the service note.
You can active or deactivate the update.
The module uses internal ir_cron feature from OpenERP, so the job is launched once
the server starts if the 'first execute date' is before the current day.
The module supports multi-company currency in two ways:
* the currencies are shared, you can set currency update only on one
company
* the currency are separated, you can set currency on every company
separately
A function field lets you know your currency configuration.
If in multi-company mode, the base currency will be the first company's currency
found in database.
Thanks to main contributors: Grzegorz Grzelak, Alexis de Lattre
""",
"depends" : [
"base",
"account", #Added to ensure account security groups are present
],
"data" : [
"currency_rate_update.xml",
"company_view.xml",
"security/security.xml",
],
"demo" : [],
"active": False,
'installable': True
}
| agpl-3.0 |
karies/root | bindings/pyroot_experimental/cppyy/cppyy/test/test_regression.py | 3 | 4532 | import py, os, sys
from pytest import raises
from .support import setup_make
class TestREGRESSION:
helpout = []
def setup_class(cls):
import cppyy
def stringpager(text, cls=cls):
cls.helpout.append(text)
import pydoc
pydoc.pager = stringpager
def test01_kdcraw(self):
"""Doc strings for KDcrawIface (used to crash)."""
import cppyy, pydoc
# TODO: run a find for these paths
qtpath = "/usr/include/qt5"
kdcraw_h = "/usr/include/KF5/KDCRAW/kdcraw/kdcraw.h"
if not os.path.isdir(qtpath) or not os.path.exists(kdcraw_h):
import warnings
warnings.warn("no KDE/Qt found, skipping test01_kdcraw")
return
# need to resolve qt_version_tag for the incremental compiler; since
# it's not otherwise used, just make something up
cppyy.cppdef("int qt_version_tag = 42;")
cppyy.add_include_path(qtpath)
cppyy.include(kdcraw_h)
from cppyy.gbl import KDcrawIface
self.__class__.helpout = []
pydoc.doc(KDcrawIface.KDcraw)
helptext = ''.join(self.__class__.helpout)
assert 'KDcraw' in helptext
assert 'CPPInstance' in helptext
def test02_dir(self):
"""For the same reasons as test01_kdcraw, this used to crash."""
import cppyy, pydoc
assert not '__abstractmethods__' in dir(cppyy.gbl.gInterpreter)
assert '__class__' in dir(cppyy.gbl.gInterpreter)
self.__class__.helpout = []
pydoc.doc(cppyy.gbl.gInterpreter)
helptext = ''.join(self.__class__.helpout)
assert 'TInterpreter' in helptext
assert 'CPPInstance' in helptext
assert 'AddIncludePath' in helptext
cppyy.cppdef("namespace cppyy_regression_test { void iii() {}; }")
assert not 'iii' in cppyy.gbl.cppyy_regression_test.__dict__
assert not '__abstractmethods__' in dir(cppyy.gbl.cppyy_regression_test)
assert '__class__' in dir(cppyy.gbl.cppyy_regression_test)
assert 'iii' in dir(cppyy.gbl.cppyy_regression_test)
assert not 'iii' in cppyy.gbl.cppyy_regression_test.__dict__
assert cppyy.gbl.cppyy_regression_test.iii
assert 'iii' in cppyy.gbl.cppyy_regression_test.__dict__
self.__class__.helpout = []
pydoc.doc(cppyy.gbl.cppyy_regression_test)
helptext = ''.join(self.__class__.helpout)
# TODO: it's deeply silly that namespaces inherit from CPPInstance (in CPyCppyy)
assert ('CPPInstance' in helptext or 'CPPNamespace' in helptext)
def test03_pyfunc_doc(self):
"""Help on a generated pyfunc used to crash."""
import cppyy, distutils, pydoc, sys
cppyy.add_include_path(distutils.sysconfig_get_python_inc())
if sys.hexversion < 0x3000000:
cppyy.cppdef("#undef _POSIX_C_SOURCE")
cppyy.cppdef("#undef _XOPEN_SOURCE")
else:
cppyy.cppdef("#undef slots") # potentially pulled in by Qt/xapian.h
cppyy.cppdef("""#include "Python.h"
long py2long(PyObject* obj) { return PyLong_AsLong(obj); }""")
pydoc.doc(cppyy.gbl.py2long)
assert 1 == cppyy.gbl.py2long(1)
def test04_avx(self):
"""Test usability of AVX by default."""
import cppyy, subprocess
has_avx = False
try:
for line in open('/proc/cpuinfo', 'r'):
if 'avx' in line:
has_avx = True
break
except Exception:
try:
cli_arg = subprocess.check_output(['sysctl', 'machdep.cpu.features'])
has_avx = 'avx' in cli_arg.decode("utf-8").strip().lower()
except Exception:
pass
if has_avx:
assert cppyy.cppdef('int check_avx() { return (int) __AVX__; }')
assert cppyy.gbl.check_avx() # attribute error if compilation failed
def test05_default_template_arguments(self):
"""Calling a templated method on a templated class with all defaults used to crash."""
import cppyy
cppyy.cppdef("""
template<typename T>
class AllDefault {
public:
AllDefault(int val) : m_t(val) {}
template<int aap=1, int noot=2>
int do_stuff() { return m_t+aap+noot; }
public:
T m_t;
};""")
a = cppyy.gbl.AllDefault[int](24)
a.m_t = 21;
assert a.do_stuff() == 24
| lgpl-2.1 |
yedidiaklein/local_video_directory | classes/task/googleSpeech/google/protobuf/python/google/protobuf/descriptor_database.py | 19 | 6295 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a container for DescriptorProtos."""
__author__ = '[email protected] (Matt Toia)'
import warnings
class Error(Exception):
pass
class DescriptorDatabaseConflictingDefinitionError(Error):
"""Raised when a proto is added with the same name & different descriptor."""
class DescriptorDatabase(object):
"""A container accepting FileDescriptorProtos and maps DescriptorProtos."""
def __init__(self):
self._file_desc_protos_by_file = {}
self._file_desc_protos_by_symbol = {}
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
Raises:
DescriptorDatabaseConflictingDefinitionError: if an attempt is made to
add a proto with the same name but different definition than an
exisiting proto in the database.
"""
proto_name = file_desc_proto.name
if proto_name not in self._file_desc_protos_by_file:
self._file_desc_protos_by_file[proto_name] = file_desc_proto
elif self._file_desc_protos_by_file[proto_name] != file_desc_proto:
raise DescriptorDatabaseConflictingDefinitionError(
'%s already added, but with different descriptor.' % proto_name)
else:
return
# Add all the top-level descriptors to the index.
package = file_desc_proto.package
for message in file_desc_proto.message_type:
for name in _ExtractSymbols(message, package):
self._AddSymbol(name, file_desc_proto)
for enum in file_desc_proto.enum_type:
self._AddSymbol(('.'.join((package, enum.name))), file_desc_proto)
for extension in file_desc_proto.extension:
self._AddSymbol(('.'.join((package, extension.name))), file_desc_proto)
for service in file_desc_proto.service:
self._AddSymbol(('.'.join((package, service.name))), file_desc_proto)
def FindFileByName(self, name):
"""Finds the file descriptor proto by file name.
Typically the file name is a relative path ending to a .proto file. The
proto with the given name will have to have been added to this database
using the Add method or else an error will be raised.
Args:
name: The file name to find.
Returns:
The file descriptor proto matching the name.
Raises:
KeyError if no file by the given name was added.
"""
return self._file_desc_protos_by_file[name]
def FindFileContainingSymbol(self, symbol):
"""Finds the file descriptor proto containing the specified symbol.
The symbol should be a fully qualified name including the file descriptor's
package and any containing messages. Some examples:
'some.package.name.Message'
'some.package.name.Message.NestedEnum'
'some.package.name.Message.some_field'
The file descriptor proto containing the specified symbol must be added to
this database using the Add method or else an error will be raised.
Args:
symbol: The fully qualified symbol name.
Returns:
The file descriptor proto containing the symbol.
Raises:
KeyError if no file contains the specified symbol.
"""
try:
return self._file_desc_protos_by_symbol[symbol]
except KeyError:
# Fields, enum values, and nested extensions are not in
# _file_desc_protos_by_symbol. Try to find the top level
# descriptor. Non-existent nested symbol under a valid top level
# descriptor can also be found. The behavior is the same with
# protobuf C++.
top_level, _, _ = symbol.rpartition('.')
return self._file_desc_protos_by_symbol[top_level]
def _AddSymbol(self, name, file_desc_proto):
if name in self._file_desc_protos_by_symbol:
warn_msg = ('Conflict register for file "' + file_desc_proto.name +
'": ' + name +
' is already defined in file "' +
self._file_desc_protos_by_symbol[name].name + '"')
warnings.warn(warn_msg, RuntimeWarning)
self._file_desc_protos_by_symbol[name] = file_desc_proto
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
"""
message_name = package + '.' + desc_proto.name if package else desc_proto.name
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name))
| gpl-3.0 |
yjhjstz/gyp | test/actions/gyptest-default.py | 53 | 2338 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple actions when using the default build target.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default')
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Some gyp files use an action that mentions an output but never
# writes it as a means to making the action run on every build. That
# doesn't mesh well with ninja's semantics. TODO(evan): figure out
# how to work always-run actions in to ninja.
if test.format in ['ninja', 'xcode-ninja']:
test.build('actions.gyp', test.ALL, chdir='relocate/src')
else:
# Test that an "always run" action increases a counter on multiple
# invocations, and that a dependent action updates in step.
test.build('actions.gyp', chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1')
test.build('actions.gyp', chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
# The "always run" action only counts to 2, but the dependent target
# will count forever if it's allowed to run. This verifies that the
# dependent target only runs when the "always run" action generates
# new output, not just because the "always run" ran.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
expect = """\
Hello from program.c
Hello from make-prog1.py
Hello from make-prog2.py
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n")
expect = "Hello from generate_main.py\n"
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('null_input', chdir=chdir, stdout=expect)
test.pass_test()
| bsd-3-clause |
umayrh/sketchy-polytopes | python/evolvingdag/setup.py | 1 | 5286 | from __future__ import print_function
from glob import glob
from os.path import basename
from os.path import splitext
import os
import sys
import pkg_resources
import platform
from setuptools import setup, find_packages, Command
from setuptools.command.install_egg_info import install_egg_info as _install_egg_info
from setuptools.dist import Distribution
class EntryPoints(Command):
description = 'get entrypoints for a distribution'
user_options = [
('dist=', None, 'get entrypoints for specified distribution'),
]
def initialize_options(self):
self.dist = self.distribution.get_name()
def finalize_options(self):
"""Abstract method that is required to be overwritten"""
def run(self):
req_entry_points = pkg_resources.get_entry_map(self.dist)
if req_entry_points and 'console_scripts' in req_entry_points:
for entry in list(req_entry_points['console_scripts'].values()):
print(entry, file=sys.stdout)
class install_egg_info(_install_egg_info): # noqa
"""Override the setuptools namespace package templates.
Customizes the "nspkg.pth" files so that they're compatible with
"--editable" packages.
See this pip issue for details:
https://github.com/pypa/pip/issues/3
Modifications to the original implementation are marked with CHANGED
"""
_nspkg_tmpl = (
# CHANGED: Add the import of pkgutil needed on the last line.
"import sys, types, os, pkgutil",
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)",
"ie = os.path.exists(os.path.join(p, '__init__.py'))",
"m = not ie and "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__', [])",
"(p not in mp) and mp.append(p)",
# CHANGED: Fix the resulting __path__ on the namespace packages to
# properly traverse "--editable" packages too.
"mp[:] = m and pkgutil.extend_path(mp, %(pkg)r) or mp",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
# CHANGED: Use "__import__" to ensure the parent package has been
# loaded before attempting to read it from sys.modules.
# This avoids a possible issue with nested namespace packages where the
# parent could be skipped due to an existing __init__.py file.
'm and __import__(%(parent)r) and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
class GradleDistribution(Distribution, object):
PINNED_TXT = 'pinned.txt'
excluded_platform_packages = {}
def __init__(self, attrs):
attrs['name'] = os.getenv('PYGRADLE_PROJECT_NAME')
attrs['version'] = os.getenv('PYGRADLE_PROJECT_VERSION')
attrs['install_requires'] = list(self.load_pinned_deps())
super(GradleDistribution, self).__init__(attrs)
def get_command_class(self, command):
"""Return a customized command class or the base one."""
if command == 'install_egg_info':
return install_egg_info
elif command == 'entrypoints':
return EntryPoints
return super(GradleDistribution, self).get_command_class(command)
@property
def excluded_packages(self):
platform_name = platform.system().lower()
if platform_name in self.excluded_platform_packages:
return set(pkg.lower() for pkg in self.excluded_platform_packages[platform_name])
return set()
def load_pinned_deps(self):
"""Load a pinned.txt file.
The pinned.txt file contains a list of dependencies that this Python
project depends on. Although the PyGradle build system will ignore this
file and never install dependencies declared via this method, it is
important to declare the dependencies using this method to maintain
backwards compatibility with non-PyGradle build systems.
"""
# calculate this only once
blacklisted = self.excluded_packages
try:
reqs = []
with open(self.PINNED_TXT) as fh:
reqs = fh.readlines()
# Don't include the version information so that we don't mistakenly
# introduce a version conflict issue.
for req in reqs:
if req:
name, version = req.split('==')
if name and name.lower() not in blacklisted:
yield name
except IOError:
raise StopIteration
setup(
distclass=GradleDistribution,
name='evolvingdag',
version='0.1.0',
author='Umayr Hassan',
author_email='[email protected]',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
url='https://github.com/umayrh/sketchy-polytopes/tree/master/python/evolvingdag',
license='GPL-3.0',
description='Create and analyze random, longitudinal directed acyclic graphs',
long_description=open('README.txt').read(),
install_requires=[
"decorator>=4.3.0",
"networkx>=2.2",
"numpy>=1.14.5",
"neo4j-driver>=1.6.0"
]
)
| gpl-3.0 |
indera/titanium_mobile | node_modules/ioslib/node_modules/node-ios-device/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | 6387 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
| apache-2.0 |
AdaptivePELE/AdaptivePELE | AdaptivePELE/tests/testAtomset.py | 1 | 29178 | from __future__ import absolute_import, division, print_function, unicode_literals
from io import open
import os
import unittest
import mdtraj
import numpy as np
import AdaptivePELE.atomset.atomset as atomset
from AdaptivePELE.atomset import RMSDCalculator
from AdaptivePELE.atomset import SymmetryContactMapEvaluator as sym
from AdaptivePELE.clustering import clustering
from AdaptivePELE.utilities import utilities
class atomsetTest(unittest.TestCase):
""" For the moment the tests include loading from file and string, resname
and atomname selection. It uses a toy pdb of only 5 lines located in
tests/data
"""
def testPDB_from_file(self):
# preparation
pdb = atomset.PDB()
# function to test
pdb.initialise("tests/data/pdb_test.pdb")
# assertion
pdbContent = "ATOM 1 N ASN A 1 7.920 22.268 9.257 1.00 15.18 N1+\n\
ATOM 2 CA CYS A 1 8.394 20.916 9.575 1.00 16.24 C \n\
ATOM 3 CA ASN A 1 7.870 19.937 8.524 1.00 16.63 C \n\
ATOM 4 O ASN A 1 7.030 20.308 7.700 1.00 16.13 O \n\
ATOM 5 CB CYS A 1 8.108 20.445 11.030 1.00 16.53 C \n"
atom1 = atomset.Atom("ATOM 1 N ASN A 1 7.920 22.268 9.257 1.00 15.18 N1+")
atom2 = atomset.Atom("ATOM 2 CA CYS A 1 8.394 20.916 9.575 1.00 16.24 C ")
atom3 = atomset.Atom("ATOM 3 CA ASN A 1 7.870 19.937 8.524 1.00 16.63 C ")
atom4 = atomset.Atom("ATOM 4 O ASN A 1 7.030 20.308 7.700 1.00 16.13 O ")
atom5 = atomset.Atom("ATOM 5 CB CYS A 1 8.108 20.445 11.030 1.00 16.53 C ")
goldenAtomsDict = {atom1.id: atom1, atom2.id: atom2, atom3.id: atom3, atom4.id: atom4, atom5.id: atom5}
self.assertEqual(pdb.pdb, pdbContent)
self.assertEqual(pdb.atoms, goldenAtomsDict)
def testPDB_from_str(self):
# preparation
with open("tests/data/pdb_test.pdb", "rt") as pdbfile:
pdbstring = pdbfile.read()
pdb = atomset.PDB()
# function to test
pdb.initialise(pdbstring)
# assertion
pdbContent = "ATOM 1 N ASN A 1 7.920 22.268 9.257 1.00 15.18 N1+\n\
ATOM 2 CA CYS A 1 8.394 20.916 9.575 1.00 16.24 C \n\
ATOM 3 CA ASN A 1 7.870 19.937 8.524 1.00 16.63 C \n\
ATOM 4 O ASN A 1 7.030 20.308 7.700 1.00 16.13 O \n\
ATOM 5 CB CYS A 1 8.108 20.445 11.030 1.00 16.53 C \n"
atom1 = atomset.Atom("ATOM 1 N ASN A 1 7.920 22.268 9.257 1.00 15.18 N1+")
atom2 = atomset.Atom("ATOM 2 CA CYS A 1 8.394 20.916 9.575 1.00 16.24 C ")
atom3 = atomset.Atom("ATOM 3 CA ASN A 1 7.870 19.937 8.524 1.00 16.63 C ")
atom4 = atomset.Atom("ATOM 4 O ASN A 1 7.030 20.308 7.700 1.00 16.13 O ")
atom5 = atomset.Atom("ATOM 5 CB CYS A 1 8.108 20.445 11.030 1.00 16.53 C ")
goldenAtomsDict = {atom1.id: atom1, atom2.id: atom2, atom3.id: atom3, atom4.id: atom4, atom5.id: atom5}
self.assertEqual(pdb.pdb, pdbContent)
self.assertEqual(pdb.atoms, goldenAtomsDict)
def testPDB_sel_resname(self):
# preparation
pdb = atomset.PDB()
# function to test
pdb.initialise("tests/data/pdb_test.pdb", resname="CYS")
# assertion
pdbContent = "ATOM 1 N ASN A 1 7.920 22.268 9.257 1.00 15.18 N1+\n\
ATOM 2 CA CYS A 1 8.394 20.916 9.575 1.00 16.24 C \n\
ATOM 3 CA ASN A 1 7.870 19.937 8.524 1.00 16.63 C \n\
ATOM 4 O ASN A 1 7.030 20.308 7.700 1.00 16.13 O \n\
ATOM 5 CB CYS A 1 8.108 20.445 11.030 1.00 16.53 C \n"
atom2 = atomset.Atom("ATOM 2 CA CYS A 1 8.394 20.916 9.575 1.00 16.24 C ")
atom5 = atomset.Atom("ATOM 5 CB CYS A 1 8.108 20.445 11.030 1.00 16.53 C ")
goldenAtomsDict = {atom2.id: atom2, atom5.id: atom5}
self.assertEqual(pdb.pdb, pdbContent)
self.assertEqual(pdb.atoms, goldenAtomsDict)
def testPDB_sel_atomname(self):
# preparation
pdb = atomset.PDB()
# function to test
pdb.initialise("tests/data/pdb_test.pdb", atomname="CA")
# assertion
pdbContent = "ATOM 1 N ASN A 1 7.920 22.268 9.257 1.00 15.18 N1+\n\
ATOM 2 CA CYS A 1 8.394 20.916 9.575 1.00 16.24 C \n\
ATOM 3 CA ASN A 1 7.870 19.937 8.524 1.00 16.63 C \n\
ATOM 4 O ASN A 1 7.030 20.308 7.700 1.00 16.13 O \n\
ATOM 5 CB CYS A 1 8.108 20.445 11.030 1.00 16.53 C \n"
atom2 = atomset.Atom("ATOM 2 CA CYS A 1 8.394 20.916 9.575 1.00 16.24 C ")
atom3 = atomset.Atom("ATOM 3 CA ASN A 1 7.870 19.937 8.524 1.00 16.63 C ")
goldenAtomsDict = {atom2.id: atom2, atom3.id: atom3}
self.assertEqual(pdb.pdb, pdbContent)
self.assertEqual(pdb.atoms, goldenAtomsDict)
def testPDB_sel_type_protein(self):
# preparation
pdb = atomset.PDB()
# function to test
pdb.initialise("tests/data/pdb_test_ligand.pdb",
type="PROTEIN")
# assertion
pdbContent = "MODEL 1\n\
ATOM 1717 H ASN A 119 25.915 9.925 -7.236 1.00 31.61 H \n\
ATOM 1718 CA ASN A 119 27.159 10.509 -6.736 1.00 33.83 C \n\
TER\n\
HETATM 1733 O1 AIN L 1 13.907 16.130 0.624 0.50 28.52 O \n\
TER\n\
HETATM 1753 CA CA B 1 16.636 15.477 0.293 1.00 3.39 Ca2+\n\
TER\n\
ENDMDL\n\
END \n"
atom1 = atomset.Atom("ATOM 1718 CA ASN A 119 27.159 10.509 -6.736 1.00 33.83 C ")
goldenAtomsDict = {atom1.id: atom1}
self.assertEqual(pdb.pdb, pdbContent)
self.assertEqual(pdb.atoms, goldenAtomsDict)
def testPDB_sel_type_hetero(self):
# preparation
pdb = atomset.PDB()
# function to test
pdb.initialise("tests/data/pdb_test_ligand.pdb", type="HETERO")
# assertion
pdbContent = "MODEL 1\n\
ATOM 1717 H ASN A 119 25.915 9.925 -7.236 1.00 31.61 H \n\
ATOM 1718 CA ASN A 119 27.159 10.509 -6.736 1.00 33.83 C \n\
TER\n\
HETATM 1733 O1 AIN L 1 13.907 16.130 0.624 0.50 28.52 O \n\
TER\n\
HETATM 1753 CA CA B 1 16.636 15.477 0.293 1.00 3.39 Ca2+\n\
TER\n\
ENDMDL\n\
END \n"
atom1 = atomset.Atom("HETATM 1733 O1 AIN L 1 13.907 16.130 0.624 0.50 28.52 O ")
atom2 = atomset.Atom("HETATM 1753 CA CA B 1 16.636 15.477 0.293 1.00 3.39 Ca2+")
goldenAtomsDict = {atom1.id: atom1, atom2.id: atom2}
self.assertEqual(pdb.pdb, pdbContent)
self.assertEqual(pdb.atoms, goldenAtomsDict)
def testPDB_sel_type_heavyAtoms(self):
# preparation
pdb = atomset.PDB()
# function to test
pdb.initialise("tests/data/pdb_test_ligand.pdb", heavyAtoms=False)
# assertion
pdbContent = "MODEL 1\n\
ATOM 1717 H ASN A 119 25.915 9.925 -7.236 1.00 31.61 H \n\
ATOM 1718 CA ASN A 119 27.159 10.509 -6.736 1.00 33.83 C \n\
TER\n\
HETATM 1733 O1 AIN L 1 13.907 16.130 0.624 0.50 28.52 O \n\
TER\n\
HETATM 1753 CA CA B 1 16.636 15.477 0.293 1.00 3.39 Ca2+\n\
TER\n\
ENDMDL\n\
END \n"
atom1 = atomset.Atom("ATOM 1717 H ASN A 119 25.915 9.925 -7.236 1.00 31.61 H ")
atom2 = atomset.Atom("ATOM 1718 CA ASN A 119 27.159 10.509 -6.736 1.00 33.83 C ")
atom3 = atomset.Atom("HETATM 1733 O1 AIN L 1 13.907 16.130 0.624 0.50 28.52 O ")
atom4 = atomset.Atom("HETATM 1753 CA CA B 1 16.636 15.477 0.293 1.00 3.39 Ca2+")
goldenAtomsDict = {atom1.id: atom1, atom2.id: atom2, atom3.id: atom3, atom4.id: atom4}
self.assertEqual(pdb.pdb, pdbContent)
self.assertEqual(pdb.atoms, goldenAtomsDict)
def testPDB_sel_resnum(self):
# preparation
pdb = atomset.PDB()
# function to test
pdb.initialise("tests/data/pdb_test2.pdb", resnum=2)
# assertion
pdbContent = "ATOM 1 N ASN A 1 7.920 22.268 9.257 1.00 15.18 N1+\n\
ATOM 2 CA CYS B 1 8.394 20.916 9.575 1.00 16.24 C \n\
ATOM 3 CA ASN B 1 7.870 19.937 8.524 1.00 16.63 C \n\
ATOM 4 O ASN A 2 7.030 20.308 7.700 1.00 16.13 O \n\
ATOM 5 CB CYS A 2 8.108 20.445 11.030 1.00 16.53 C \n"
atom2 = atomset.Atom("ATOM 4 O ASN A 2 7.030 20.308 7.700 1.00 16.13 O ")
atom3 = atomset.Atom("ATOM 5 CB CYS A 2 8.108 20.445 11.030 1.00 16.53 C ")
goldenAtomsDict = {atom2.id: atom2, atom3.id: atom3}
self.assertEqual(pdb.pdb, pdbContent)
self.assertEqual(pdb.atoms, goldenAtomsDict)
def testPDB_sel_chain(self):
# preparation
pdb = atomset.PDB()
# function to test
pdb.initialise("tests/data/pdb_test2.pdb", chain="B")
# assertion
pdbContent = "ATOM 1 N ASN A 1 7.920 22.268 9.257 1.00 15.18 N1+\n\
ATOM 2 CA CYS B 1 8.394 20.916 9.575 1.00 16.24 C \n\
ATOM 3 CA ASN B 1 7.870 19.937 8.524 1.00 16.63 C \n\
ATOM 4 O ASN A 2 7.030 20.308 7.700 1.00 16.13 O \n\
ATOM 5 CB CYS A 2 8.108 20.445 11.030 1.00 16.53 C \n"
atom2 = atomset.Atom("ATOM 2 CA CYS B 1 8.394 20.916 9.575 1.00 16.24 C ")
atom3 = atomset.Atom("ATOM 3 CA ASN B 1 7.870 19.937 8.524 1.00 16.63 C ")
goldenAtomsDict = {atom2.id: atom2, atom3.id: atom3}
self.assertEqual(pdb.pdb, pdbContent)
self.assertEqual(pdb.atoms, goldenAtomsDict)
def testPDB_COM(self):
# preparation
pdb = atomset.PDB()
# function to test
pdb.initialise("tests/data/pdb_test.pdb")
# assertion
total_mass = 66.0382
COM_array = np.array([516.1336264, 1373.048894, 602.7150822])
pdb.extractCOM()
self.assertNotAlmostEqual(pdb.totalMass, 0)
COM = COM_array/pdb.totalMass
np.testing.assert_array_almost_equal(COM, pdb.getCOM())
self.assertAlmostEqual(total_mass, pdb.totalMass)
def testPDB_write(self):
# preparation
pdb = atomset.PDB()
pdb.initialise("tests/data/pdb_test.pdb")
# function to test
pdb.writePDB("tests/data/pdb_test_write.pdb")
# assertion
pdbtestfile = open("tests/data/pdb_test_write.pdb", "r")
pdbtestsstr = pdbtestfile.read()
pdbtestfile.close()
self.assertEqual(pdb.pdb, pdbtestsstr)
def testPDB_RMSD(self):
# preparation
pdb_native = atomset.PDB()
pdb_native.initialise("tests/data/ain_native_fixed.pdb", resname='AIN')
pdb_traj = atomset.PDB()
pdb_traj.initialise("tests/data/ain_trajectory.pdb", resname='AIN')
RMSDCalc = RMSDCalculator.RMSDCalculator()
# function to test
RMSD = RMSDCalc.computeRMSD(pdb_native, pdb_traj)
golden_RMSD = 3.928617
self.assertAlmostEqual(RMSD, golden_RMSD, 5)
def testPDB_RMSD_symmetries(self):
# preparation
pdb_native = atomset.PDB()
pdb_native.initialise("tests/data/ain_native_fixed.pdb", resname='AIN')
pdb_traj = atomset.PDB()
pdb_traj.initialise("tests/data/ain_trajectory.pdb", resname='AIN')
symDict = [{"1733:O1:AIN": "1735:O2:AIN"}]
RMSDCalc = RMSDCalculator.RMSDCalculator(symDict)
# function to test
RMSD = RMSDCalc.computeRMSD(pdb_native, pdb_traj)
reverseRMSD = RMSDCalc.computeRMSD(pdb_traj, pdb_native)
golden_RMSD = 3.860743
self.assertAlmostEqual(RMSD, reverseRMSD, 5)
self.assertAlmostEqual(RMSD, golden_RMSD, 5)
def test_combination_symmetries(self):
# preparation
pdb_0 = atomset.PDB()
pdb_0.initialise("tests/data/symmetries/cluster_0.pdb", resname='AEN')
pdb_1 = atomset.PDB()
pdb_1.initialise("tests/data/symmetries/cluster_1.pdb", resname='AEN')
pdb_2 = atomset.PDB()
pdb_2.initialise("tests/data/symmetries/cluster_2.pdb", resname='AEN')
symmetries3PTB = [{"3225:C3:AEN": "3227:C5:AEN", "3224:C2:AEN": "3228:C6:AEN"},
{"3230:N1:AEN": "3231:N2:AEN"}]
RMSDCalc = RMSDCalculator.RMSDCalculator(symmetries3PTB)
# funtion to test
RMSD02 = RMSDCalc.computeRMSD(pdb_0, pdb_2)
RMSD20 = RMSDCalc.computeRMSD(pdb_2, pdb_0)
RMSD01 = RMSDCalc.computeRMSD(pdb_0, pdb_1)
RMSD10 = RMSDCalc.computeRMSD(pdb_1, pdb_0)
RMSD21 = RMSDCalc.computeRMSD(pdb_2, pdb_1)
RMSD12 = RMSDCalc.computeRMSD(pdb_1, pdb_2)
self.assertEqual(RMSD01, RMSD10)
self.assertEqual(RMSD02, RMSD20)
self.assertEqual(RMSD21, RMSD12)
def testPDB_contacts(self):
# preparation
pdb_native = atomset.PDB()
pdb_native.initialise("tests/data/native_ain.pdb")
# function to test
contacts = pdb_native.countContacts("AIN", 8)
golden_contacts = 19
self.assertEqual(contacts, golden_contacts)
def testPDB_contactmap(self):
# preparation
pdb_native = atomset.PDB()
pdb_native.initialise("tests/data/pdb_test_contact.pdb")
symmetryEvaluator = sym.SymmetryContactMapEvaluator([])
# function to test
contact_map, contacts = symmetryEvaluator.createContactMap(pdb_native,
"AIN", 8)
golden_contact_map = np.array([[1, 0, 0, 0], [0, 1, 1, 1]])
golden_contacts = pdb_native.countContacts("AIN", 8)
np.testing.assert_array_equal(contact_map, golden_contact_map)
self.assertEqual(golden_contacts, contacts)
def test_contactMapContacts(self):
# preparation
pdb_1 = atomset.PDB()
pdb_1.initialise("tests/data/pdb_test_contact.pdb", resname='AIN')
symmetryEvaluator = sym.SymmetryContactMapEvaluator([])
# function to test
_, contacts = symmetryEvaluator.createContactMap(pdb_1, "AIN", 16)
golden_contacts = pdb_1.countContacts("AIN", 8)
self.assertEqual(golden_contacts, contacts)
def test_symmetryContactMapJaccard(self):
pdb_1 = atomset.PDB()
pdb_1.initialise("tests/data/symmetries/cluster_1.pdb", resname='AEN')
pdb_1_sym = atomset.PDB()
pdb_1_sym.initialise("tests/data/symmetries/cluster_1_sym.pdb",
resname='AEN')
symmetries3PTB = [{"3230:N1:AEN": "3231:N2:AEN"}]
symmetryEvaluator = sym.SymmetryContactMapEvaluator(symmetries3PTB)
symmetryEvaluatorEmpty = sym.SymmetryContactMapEvaluator()
contactMap1, contacts1 = symmetryEvaluator.buildContactMap(pdb_1, 'AEN', 16)
cluster = clustering.Cluster(pdb_1, contactMap=contactMap1)
contactMap1Sym, contactsSym = symmetryEvaluator.createContactMap(pdb_1_sym, 'AEN', 16)
contactMapNoSym, _ = symmetryEvaluator.createContactMap(pdb_1_sym, 'AEN', 16)
goldenJaccard = 0.0
Jaccard = symmetryEvaluator.evaluateJaccard(contactMap1Sym, cluster.contactMap)
JaccardNosym = symmetryEvaluatorEmpty.evaluateJaccard(contactMapNoSym, cluster.contactMap)
self.assertEqual(contacts1, contactsSym)
self.assertAlmostEqual(goldenJaccard, Jaccard)
self.assertNotAlmostEqual(Jaccard, JaccardNosym)
def test_symmetryContactMapCorrelation(self):
pdb_1 = atomset.PDB()
pdb_1.initialise("tests/data/symmetries/cluster_1.pdb", resname='AEN')
pdb_1_sym = atomset.PDB()
pdb_1_sym.initialise("tests/data/symmetries/cluster_1_sym.pdb",
resname='AEN')
symmetries3PTB = [{"3230:N1:AEN": "3231:N2:AEN"}]
symmetryEvaluator = sym.SymmetryContactMapEvaluator(symmetries3PTB)
symmetryEvaluatorEmpty = sym.SymmetryContactMapEvaluator()
contactMap1, contacts1 = symmetryEvaluator.buildContactMap(pdb_1, 'AEN', 16)
cluster = clustering.Cluster(pdb_1, contactMap=contactMap1)
contactMap1Sym, contactsSym = symmetryEvaluator.createContactMap(pdb_1_sym, 'AEN', 16)
contactMapNoSym, _ = symmetryEvaluator.createContactMap(pdb_1_sym, 'AEN', 16)
goldenCorrelation = 0.0
correlationSym = symmetryEvaluator.evaluateCorrelation(contactMap1Sym, cluster.contactMap)
correlationNosym = symmetryEvaluatorEmpty.evaluateCorrelation(contactMapNoSym, cluster.contactMap)
self.assertEqual(contacts1, contactsSym)
self.assertAlmostEqual(goldenCorrelation, correlationSym)
self.assertNotAlmostEqual(correlationSym, correlationNosym)
def test_symmetryContactMapDifference(self):
pdb_1 = atomset.PDB()
pdb_1.initialise("tests/data/symmetries/cluster_1.pdb", resname='AEN')
pdb_1_sym = atomset.PDB()
pdb_1_sym.initialise("tests/data/symmetries/cluster_1_sym.pdb",
resname='AEN')
symmetries3PTB = [{"3230:N1:AEN": "3231:N2:AEN"}]
symmetryEvaluator = sym.SymmetryContactMapEvaluator(symmetries3PTB)
symmetryEvaluatorEmpty = sym.SymmetryContactMapEvaluator()
contactMap1, contacts1 = symmetryEvaluator.buildContactMap(pdb_1, 'AEN', 16)
cluster = clustering.Cluster(pdb_1, contactMap=contactMap1)
contactMap1Sym, contactsSym = symmetryEvaluator.createContactMap(pdb_1_sym, 'AEN', 16)
contactMapNoSym, _ = symmetryEvaluator.createContactMap(pdb_1_sym, 'AEN', 16)
goldenDifference = 0.0
DifferenceSym = symmetryEvaluator.evaluateDifferenceDistance(contactMap1Sym, cluster.contactMap)
DifferenceNosym = symmetryEvaluatorEmpty.evaluateDifferenceDistance(contactMapNoSym, cluster.contactMap)
self.assertEqual(contacts1, contactsSym)
self.assertAlmostEqual(goldenDifference, DifferenceSym)
self.assertNotAlmostEqual(DifferenceSym, DifferenceNosym)
def test_PDB_interface(self):
pdb = atomset.PDB()
pdb.initialise("tests/data/symmetries/cluster_1.pdb", resname='AEN')
self.assertEqual(len(pdb), 9)
atomList = [atom for atom in pdb]
atoms = [pdb.getAtom(a) for a in pdb.atomList]
self.assertEqual(atomList, atoms)
atomId = pdb.atomList[0]
atom = pdb[atomId]
self.assertEqual(atom, pdb.getAtom(atomId))
pdb[atomId] = None
self.assertEqual(None, pdb.getAtom(atomId))
def test_write_XTC_to_pdb(self):
golden = "tests/data/ain_native_fixed.pdb"
output = "xtc_to_pdb.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(xtc_obj.xyz[0], resname="AIN", topology=topology)
top = utilities.getTopologyFile(golden)
xtc.writePDB(output)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, resname="AIN")
output_pdb = atomset.PDB()
output_pdb.initialise(output, resname="AIN")
os.remove(output)
self.assertEqual(golden_pdb.atoms, output_pdb.atoms)
def testPDB_sel_resname_XTC(self):
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], resname="AIN", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, resname="AIN")
self.assertEqual(xtc, golden_pdb)
def testPDB_sel_atomname_XTC(self):
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], atomname="CA", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, atomname="CA")
self.assertEqual(xtc, golden_pdb)
def testPDB_sel_type_protein_XTC(self):
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], type="PROTEIN", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, type="PROTEIN")
self.assertEqual(xtc, golden_pdb)
def testPDB_sel_type_hetero_XTC(self):
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], type="HETERO", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, type="HETERO")
self.assertEqual(xtc, golden_pdb)
def testPDB_sel_type_heavyAtoms_XTC(self):
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], heavyAtoms=False, topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, heavyAtoms=False)
self.assertEqual(xtc, golden_pdb)
def testPDB_sel_resnum_XTC(self):
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], resnum=2, topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, resnum=2)
self.assertEqual(xtc, golden_pdb)
def testPDB_COM_XTC(self):
# preparation
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], resname="AIN", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, resname="AIN")
# assertion
self.assertAlmostEqual(xtc.totalMass, golden_pdb.totalMass, 3)
np.testing.assert_array_almost_equal(xtc.getCOM(), golden_pdb.getCOM(), decimal=3)
def testPDB_RMSD_XTC(self):
# preparation
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], resname="AIN", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, resname="AIN")
# assertion
RMSDCalc = RMSDCalculator.RMSDCalculator()
# function to test
RMSD = RMSDCalc.computeRMSD(golden_pdb, xtc)
golden_RMSD = 0.0000
self.assertAlmostEqual(RMSD, golden_RMSD, 2)
def testPDB_RMSD_symmetries_XTC(self):
# preparation
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], resname="AIN", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, resname="AIN")
symDict = [{"1733:O1:AIN": "1735:O2:AIN"}]
RMSDCalc = RMSDCalculator.RMSDCalculator(symDict)
# function to test
RMSD = RMSDCalc.computeRMSD(xtc, golden_pdb)
reverseRMSD = RMSDCalc.computeRMSD(golden_pdb, xtc)
golden_RMSD = 0.00000
self.assertAlmostEqual(RMSD, reverseRMSD, 2)
self.assertAlmostEqual(RMSD, golden_RMSD, 2)
def testPDB_contacts_XTC(self):
# preparation
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], resname="AIN", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, resname="AIN")
# function to test
contacts = golden_pdb.countContacts("AIN", 8)
contacts_xtc = xtc.countContacts("AIN", 8)
self.assertEqual(contacts, contacts_xtc)
def testPDB_contactmap_XTC(self):
# preparation
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], resname="AIN", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, resname="AIN")
symmetryEvaluator = sym.SymmetryContactMapEvaluator([])
# function to test
contact_map, contacts = symmetryEvaluator.createContactMap(golden_pdb,
"AIN", 8)
symmetryEvaluator_xtc = sym.SymmetryContactMapEvaluator([])
contact_map_xtc, contacts_xtc = symmetryEvaluator_xtc.createContactMap(xtc, "AIN", 8)
np.testing.assert_array_equal(contact_map, contact_map_xtc)
self.assertEqual(contacts_xtc, contacts)
def test_symmetryContactMapJaccard_XTC(self):
xtc_obj = mdtraj.load("tests/data/symmetries/cluster_1.xtc", top="tests/data/symmetries/cluster_1.pdb")
topology = utilities.getTopologyFile("tests/data/symmetries/cluster_1.pdb")
pdb_1 = atomset.PDB()
pdb_1.initialise(10*xtc_obj.xyz[0], resname='AEN', topology=topology)
topology = utilities.getTopologyFile("tests/data/symmetries/cluster_1_sym.pdb")
xtc_obj = mdtraj.load("tests/data/symmetries/cluster_1_sym.xtc", top="tests/data/symmetries/cluster_1_sym.pdb")
pdb_1_sym = atomset.PDB()
pdb_1_sym.initialise(10*xtc_obj.xyz[0], resname='AEN', topology=topology)
symmetries3PTB = [{"3230:N1:AEN": "3231:N2:AEN"}]
symmetryEvaluator = sym.SymmetryContactMapEvaluator(symmetries3PTB)
symmetryEvaluatorEmpty = sym.SymmetryContactMapEvaluator()
contactMap1, contacts1 = symmetryEvaluator.buildContactMap(pdb_1, 'AEN', 16)
cluster = clustering.Cluster(pdb_1, contactMap=contactMap1)
contactMap1Sym, contactsSym = symmetryEvaluator.createContactMap(pdb_1_sym, 'AEN', 16)
contactMapNoSym, _ = symmetryEvaluator.createContactMap(pdb_1_sym, 'AEN', 16)
goldenJaccard = 0.0
Jaccard = symmetryEvaluator.evaluateJaccard(contactMap1Sym, cluster.contactMap)
JaccardNosym = symmetryEvaluatorEmpty.evaluateJaccard(contactMapNoSym, cluster.contactMap)
self.assertEqual(contacts1, contactsSym)
self.assertAlmostEqual(goldenJaccard, Jaccard)
self.assertNotAlmostEqual(Jaccard, JaccardNosym)
def test_PDB_interface_XTC(self):
golden = "tests/data/ain_native_fixed.pdb"
topology = utilities.getTopologyFile(golden)
xtc_obj = mdtraj.load("tests/data/ain_native_fixed.xtc", top=golden)
xtc = atomset.PDB()
xtc.initialise(10*xtc_obj.xyz[0], resname="AIN", topology=topology)
golden_pdb = atomset.PDB()
golden_pdb.initialise(golden, resname="AIN")
self.assertEqual(len(golden_pdb), len(xtc))
atomList = [atom for atom in golden_pdb]
atomList_xtc = [atom for atom in xtc]
self.assertEqual(atomList, atomList_xtc)
atomId = xtc.atomList[0]
atom = xtc[atomId]
self.assertEqual(atom, xtc.getAtom(atomId))
xtc[atomId] = None
self.assertEqual(None, xtc.getAtom(atomId))
| mit |
infobloxopen/infoblox-netmri | infoblox_netmri/api/remote/models/device_disk_utilization_remote.py | 1 | 2708 | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DeviceDiskUtilizationRemote(RemoteModel):
"""
This table list out entries of Device Disk Utilization
| ``HRStorageID:`` The internal NetMRI identifier of the high rate storage in the device disk utilization.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device from which device disk utilization information was collected.
| ``attribute type:`` number
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``StartTime:`` The date and time the record was initially created in NetMRI.
| ``attribute type:`` datetime
| ``EndTime:`` The date and time the record was last modified in NetMRI.
| ``attribute type:`` datetime
| ``HRStorageIndex:`` The current index of the high risk storage device in the device disk utilization.
| ``attribute type:`` string
| ``HRStorageDescr:`` The high risk storage description of the device disk utilization.
| ``attribute type:`` string
| ``HRStorageAllocationUnits:`` The allocated units of the high risk storage in the device disk utilization.
| ``attribute type:`` string
| ``HRStorageSize:`` The storage size in the device disk utilization.
| ``attribute type:`` string
| ``HRStorageUsed:`` The used storage size in the device disk utilization.
| ``attribute type:`` string
"""
properties = ("HRStorageID",
"DeviceID",
"DataSourceID",
"StartTime",
"EndTime",
"HRStorageIndex",
"HRStorageDescr",
"HRStorageAllocationUnits",
"HRStorageSize",
"HRStorageUsed",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"HRStorageID": self.HRStorageID})
@property
@check_api_availability
def device(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.device(**{"HRStorageID": self.HRStorageID})
@property
@check_api_availability
def infradevice(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.infradevice(**{"HRStorageID": self.HRStorageID})
| apache-2.0 |
elba7r/frameworking | frappe/tests/test_filemanager.py | 70 | 2927 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os
import unittest
from frappe.utils.file_manager import save_file, get_file, get_files_path
test_content1 = 'Hello'
test_content2 = 'Hello World'
def make_test_doc():
d = frappe.new_doc('ToDo')
d.description = 'Test'
d.save()
return d.doctype, d.name
class TestSimpleFile(unittest.TestCase):
def setUp(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content = test_content1
self.saved_file = save_file('hello.txt', self.test_content, self.attached_to_doctype, self.attached_to_docname)
self.saved_filename = get_files_path(self.saved_file.file_name)
def test_save(self):
filename, content = get_file(self.saved_file.name)
self.assertEqual(content, self.test_content)
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestSameFileName(unittest.TestCase):
def setUp(self):
self.attached_to_doctype, self.attached_to_docname = make_test_doc()
self.test_content1 = test_content1
self.test_content2 = test_content2
self.saved_file1 = save_file('hello.txt', self.test_content1, self.attached_to_doctype, self.attached_to_docname)
self.saved_file2 = save_file('hello.txt', self.test_content2, self.attached_to_doctype, self.attached_to_docname)
self.saved_filename1 = get_files_path(self.saved_file1.file_name)
self.saved_filename2 = get_files_path(self.saved_file2.file_name)
def test_saved_content(self):
filename1, content1 = get_file(self.saved_file1.name)
self.assertEqual(content1, self.test_content1)
filename2, content2 = get_file(self.saved_file2.name)
self.assertEqual(content2, self.test_content2)
def tearDown(self):
# File gets deleted on rollback, so blank
pass
class TestSameContent(unittest.TestCase):
def setUp(self):
self.attached_to_doctype1, self.attached_to_docname1 = make_test_doc()
self.attached_to_doctype2, self.attached_to_docname2 = make_test_doc()
self.test_content1 = test_content1
self.test_content2 = test_content1
self.orig_filename = 'hello.txt'
self.dup_filename = 'hello2.txt'
self.saved_file1 = save_file(self.orig_filename, self.test_content1, self.attached_to_doctype1, self.attached_to_docname1)
self.saved_file2 = save_file(self.dup_filename, self.test_content2, self.attached_to_doctype2, self.attached_to_docname2)
self.saved_filename1 = get_files_path(self.saved_file1.file_name)
self.saved_filename2 = get_files_path(self.saved_file2.file_name)
def test_saved_content(self):
filename1, content1 = get_file(self.saved_file1.name)
filename2, content2 = get_file(self.saved_file2.name)
self.assertEqual(filename1, filename2)
self.assertFalse(os.path.exists(get_files_path(self.dup_filename)))
def tearDown(self):
# File gets deleted on rollback, so blank
pass
| mit |
waylonflinn/bquery | bquery/benchmarks/bench_groupby.py | 2 | 2465 | from __future__ import print_function
# bench related imports
import numpy as np
import shutil
import bquery
import pandas as pd
import itertools as itt
import cytoolz
import cytoolz.dicttoolz
from toolz import valmap, compose
from cytoolz.curried import pluck
import blaze as blz
# other imports
import contextlib
import os
import time
try:
# Python 2
from itertools import izip
except ImportError:
# Python 3
izip = zip
t_elapsed = 0.0
@contextlib.contextmanager
def ctime(message=None):
"Counts the time spent in some context"
global t_elapsed
t_elapsed = 0.0
print('\n')
t = time.time()
yield
if message:
print(message + ": ", end='')
t_elapsed = time.time() - t
print(round(t_elapsed, 4), "sec")
ga = itt.cycle(['ES', 'NL'])
gb = itt.cycle(['b1', 'b2', 'b3', 'b4', 'b5'])
gx = itt.cycle([1, 2])
gy = itt.cycle([-1, -2])
rootdir = 'bench-data.bcolz'
if os.path.exists(rootdir):
shutil.rmtree(rootdir)
n_rows = 1000000
print('Rows: ', n_rows)
# -- data
z = np.fromiter(((a, b, x, y) for a, b, x, y in izip(ga, gb, gx, gy)),
dtype='S2,S2,i8,i8', count=n_rows)
ct = bquery.ctable(z, rootdir=rootdir, )
print(ct)
# -- pandas --
df = pd.DataFrame(z)
with ctime(message='pandas'):
result = df.groupby(['f0'])['f2'].sum()
print(result)
t_pandas = t_elapsed
# -- cytoolz --
with ctime(message='cytoolz over bcolz'):
# In Memory Split-Apply-Combine
# http://toolz.readthedocs.org/en/latest/streaming-analytics.html?highlight=reduce#split-apply-combine-with-groupby-and-reduceby
r = cytoolz.groupby(lambda row: row.f0, ct)
result = valmap(compose(sum, pluck(2)), r)
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
# -- blaze + bcolz --
blaze_data = blz.Data(ct.rootdir)
expr = blz.by(blaze_data.f0, sum_f2=blaze_data.f2.sum())
with ctime(message='blaze over bcolz'):
result = blz.compute(expr)
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
# -- bquery --
with ctime(message='bquery over bcolz'):
result = ct.groupby(['f0'], ['f2'])
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
ct.cache_factor(['f0'], refresh=True)
with ctime(message='bquery over bcolz (factorization cached)'):
result = ct.groupby(['f0'], ['f2'])
print('x{0} slower than pandas'.format(round(t_elapsed / t_pandas, 2)))
print(result)
shutil.rmtree(rootdir)
| bsd-3-clause |
christophlsa/odoo | addons/sale_stock/report/sale_report.py | 231 | 2100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class sale_report(osv.osv):
_inherit = "sale.report"
_columns = {
'shipped': fields.boolean('Shipped', readonly=True),
'shipped_qty_1': fields.integer('Shipped', readonly=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse',readonly=True),
'state': fields.selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('waiting_date', 'Waiting Schedule'),
('manual', 'Manual In Progress'),
('progress', 'In Progress'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
], 'Order Status', readonly=True),
}
def _select(self):
return super(sale_report, self)._select() + ", s.warehouse_id as warehouse_id, s.shipped, s.shipped::integer as shipped_qty_1"
def _group_by(self):
return super(sale_report, self)._group_by() + ", s.warehouse_id, s.shipped"
| agpl-3.0 |
crosswalk-project/chromium-crosswalk-efl | tools/telemetry/telemetry/value/summary.py | 58 | 6381 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
from telemetry.value import failure
from telemetry.value import merge_values
from telemetry.value import skip
class Summary(object):
"""Computes summary values from the per-page-run values produced by a test.
Some telemetry benchmark repeat a number of times in order to get a reliable
measurement. The test does not have to handle merging of these runs:
summarizer does it for you.
For instance, if two pages run, 3 and 1 time respectively:
ScalarValue(page1, 'foo', units='ms', 1)
ScalarValue(page1, 'foo', units='ms', 1)
ScalarValue(page1, 'foo', units='ms', 1)
ScalarValue(page2, 'foo', units='ms', 2)
Then summarizer will produce two sets of values. First,
computed_per_page_values:
[
ListOfScalarValues(page1, 'foo', units='ms', [1,1,1])],
ListOfScalarValues(page2, 'foo', units='ms', [2])]
]
In addition, it will produce a summary value:
[
ListOfScalarValues(page=None, 'foo', units='ms', [1,1,1,2])]
]
"""
def __init__(self, all_page_specific_values):
had_failures = any(isinstance(v, failure.FailureValue) for v in
all_page_specific_values)
self.had_failures = had_failures
self._computed_per_page_values = []
self._computed_summary_values = []
self._interleaved_computed_per_page_values_and_summaries = []
self._ComputePerPageValues(all_page_specific_values)
@property
def computed_per_page_values(self):
return self._computed_per_page_values
@property
def computed_summary_values(self):
return self._computed_summary_values
@property
def interleaved_computed_per_page_values_and_summaries(self):
"""Returns the computed per page values and summary values interleaved.
All the results for a given name are printed together. First per page
values, then summary values.
"""
return self._interleaved_computed_per_page_values_and_summaries
def _ComputePerPageValues(self, all_page_specific_values):
all_successful_page_values = [
v for v in all_page_specific_values if not (isinstance(
v, failure.FailureValue) or isinstance(v, skip.SkipValue))]
# We will later need to determine how many values were originally created
# for each value name, to apply a workaround meant to clean up the printf
# output.
num_successful_pages_for_value_name = defaultdict(int)
for v in all_successful_page_values:
num_successful_pages_for_value_name[v.name] += 1
# By here, due to page repeat options, all_values_from_successful_pages
# contains values of the same name not only from mulitple pages, but also
# from the same name. So even if, for instance, only one page ran, it may
# have run twice, producing two 'x' values.
#
# So, get rid of the repeated pages by merging.
merged_page_values = merge_values.MergeLikeValuesFromSamePage(
all_successful_page_values)
# Now we have a bunch of values, but there is only one value_name per page.
# Suppose page1 and page2 ran, producing values x and y. We want to print
# x for page1
# x for page2
# x for page1, page2 combined
#
# y for page1
# y for page2
# y for page1, page2 combined
#
# We already have the x values in the values array. But, we will need
# them indexable by the value name.
#
# The following dict maps value_name -> list of pages that have values of
# that name.
per_page_values_by_value_name = defaultdict(list)
for value in merged_page_values:
per_page_values_by_value_name[value.name].append(value)
# We already have the x values in the values array. But, we also need
# the values merged across the pages. And, we will need them indexed by
# value name so that we can find them when printing out value names in
# alphabetical order.
merged_pages_value_by_value_name = {}
if not self.had_failures:
for value in merge_values.MergeLikeValuesFromDifferentPages(
all_successful_page_values):
assert value.name not in merged_pages_value_by_value_name
merged_pages_value_by_value_name[value.name] = value
# sorted_value names will govern the order we start printing values.
value_names = set([v.name for v in merged_page_values])
sorted_value_names = sorted(value_names)
# Time to walk through the values by name, printing first the page-specific
# values and then the merged_site value.
for value_name in sorted_value_names:
per_page_values = per_page_values_by_value_name.get(value_name, [])
# Sort the values by their url
sorted_per_page_values = list(per_page_values)
sorted_per_page_values.sort(
key=lambda per_page_values: per_page_values.page.display_name)
# Output the page-specific results.
num_successful_pages_for_this_value_name = (
num_successful_pages_for_value_name[value_name])
for per_page_value in sorted_per_page_values:
self._ComputePerPageValue(per_page_value,
num_successful_pages_for_this_value_name)
# Output the combined values.
merged_pages_value = merged_pages_value_by_value_name.get(value_name,
None)
if merged_pages_value:
self._computed_summary_values.append(merged_pages_value)
self._interleaved_computed_per_page_values_and_summaries.append(
merged_pages_value)
def _ComputePerPageValue(
self, value, num_successful_pages_for_this_value_name):
# If there were any page errors, we typically will print nothing.
#
# Note: this branch is structured less-densely to improve legibility.
if num_successful_pages_for_this_value_name > 1:
should_print = True
elif (self.had_failures and
num_successful_pages_for_this_value_name == 1):
should_print = True
else:
should_print = False
if not should_print:
return
# Actually save the result.
self._computed_per_page_values.append(value)
self._interleaved_computed_per_page_values_and_summaries.append(value)
| bsd-3-clause |
johren/RackHD | test/tests/api/redfish_1_0/schema_tests.py | 15 | 4301 | from config.redfish1_0_config import *
from modules.logger import Log
from on_http_redfish_1_0 import RedfishvApi as redfish
from on_http_redfish_1_0 import rest
from datetime import datetime
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_true
from proboscis.asserts import fail
from proboscis import SkipTest
from proboscis import test
from json import loads,dumps
LOG = Log(__name__)
@test(groups=['redfish.schema.tests'], depends_on_groups=['obm.tests'])
class SchemaTests(object):
def __init__(self):
self.__client = config.api_client
self.__schemaList = None
self.__membersList = None
self.__locationUri = []
def __get_data(self):
return loads(self.__client.last_response.data)
@test(groups=['redfish.list_schemas'])
def test_list_schemas(self):
""" Testing GET /Schemas """
redfish().list_schemas()
schemas = self.__get_data()
LOG.debug(schemas,json=True)
assert_not_equal(0, len(schemas), message='Schema list was empty!')
self.__schemaList = schemas
@test(groups=['redfish.get_schema'], depends_on_groups=['redfish.list_schemas'])
def test_get_schema(self):
""" Testing GET /Schemas/{identifier} """
self.__membersList = self.__schemaList.get('Members')
assert_not_equal(None, self.__membersList)
for member in self.__membersList:
dataId = member.get('@odata.id')
assert_not_equal(None,dataId)
dataId = dataId.split('/redfish/v1/Schemas/')[1]
redfish().get_schema(dataId)
schema_ref = self.__get_data()
LOG.debug(schema_ref,json=True)
id = schema_ref.get('Id')
assert_equal(dataId, id, message='unexpected id {0}, expected {1}'.format(id,dataId))
assert_equal(type(schema_ref.get('Location')), list, message='expected list not found')
location = schema_ref.get('Location')[0]
assert_equal(type(location.get('Uri')), unicode, message='expected uri string not found')
self.__locationUri.append(location.get('Uri'))
@test(groups=['redfish.get_schema_invalid'], depends_on_groups=['redfish.list_schemas'])
def test_get_schema_invalid(self):
""" Testing GET /Schemas/{identifier} 404s properly """
self.__membersList = self.__schemaList.get('Members')
assert_not_equal(None, self.__membersList)
for member in self.__membersList:
dataId = member.get('@odata.id')
assert_not_equal(None,dataId)
dataId = dataId.split('/redfish/v1/Schemas/')[1]
try:
redfish().get_schema(dataId + '-invalid')
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
break
@test(groups=['redfish.get_schema_content'], depends_on_groups=['redfish.get_schema'])
def test_get_schema_content(self):
""" Testing GET /SchemaStore/en/{identifier} """
assert_not_equal([], self.__locationUri)
for member in self.__locationUri:
assert_not_equal(None,member)
dataId = member.split('/redfish/v1/SchemaStore/en/')[1]
redfish().get_schema_content(dataId)
schema_file_contents = self.__get_data()
@test(groups=['redfish.get_schema_content_invalid'], depends_on_groups=['redfish.get_schema'])
def test_get_schema_content_invalid(self):
""" Testing GET /Schemas/en/{identifier} 404s properly """
assert_not_equal([], self.__locationUri)
for member in self.__locationUri:
assert_not_equal(None,member)
dataId = member.split('/redfish/v1/SchemaStore/en/')[1]
try:
redfish().get_schema_content(dataId + '-invalid')
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
break
| apache-2.0 |
dwightgunning/django | django/templatetags/tz.py | 277 | 5572 | from datetime import datetime, tzinfo
from django.template import Library, Node, TemplateSyntaxError
from django.utils import six, timezone
try:
import pytz
except ImportError:
pytz = None
register = Library()
# HACK: datetime is an old-style class, create a new-style equivalent
# so we can define additional attributes.
class datetimeobject(datetime, object):
pass
# Template filters
@register.filter
def localtime(value):
"""
Converts a datetime to local time in the active time zone.
This only makes sense within a {% localtime off %} block.
"""
return do_timezone(value, timezone.get_current_timezone())
@register.filter
def utc(value):
"""
Converts a datetime to UTC.
"""
return do_timezone(value, timezone.utc)
@register.filter('timezone')
def do_timezone(value, arg):
"""
Converts a datetime to local time in a given time zone.
The argument must be an instance of a tzinfo subclass or a time zone name.
If it is a time zone name, pytz is required.
Naive datetimes are assumed to be in local time in the default time zone.
"""
if not isinstance(value, datetime):
return ''
# Obtain a timezone-aware datetime
try:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
# Filters must never raise exceptions, and pytz' exceptions inherit
# Exception directly, not a specific subclass. So catch everything.
except Exception:
return ''
# Obtain a tzinfo instance
if isinstance(arg, tzinfo):
tz = arg
elif isinstance(arg, six.string_types) and pytz is not None:
try:
tz = pytz.timezone(arg)
except pytz.UnknownTimeZoneError:
return ''
else:
return ''
result = timezone.localtime(value, tz)
# HACK: the convert_to_local_time flag will prevent
# automatic conversion of the value to local time.
result = datetimeobject(result.year, result.month, result.day,
result.hour, result.minute, result.second,
result.microsecond, result.tzinfo)
result.convert_to_local_time = False
return result
# Template tags
class LocalTimeNode(Node):
"""
Template node class used by ``localtime_tag``.
"""
def __init__(self, nodelist, use_tz):
self.nodelist = nodelist
self.use_tz = use_tz
def render(self, context):
old_setting = context.use_tz
context.use_tz = self.use_tz
output = self.nodelist.render(context)
context.use_tz = old_setting
return output
class TimezoneNode(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
class GetCurrentTimezoneNode(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ''
@register.tag('localtime')
def localtime_tag(parser, token):
"""
Forces or prevents conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" %
bits[0])
else:
use_tz = bits[1] == 'on'
nodelist = parser.parse(('endlocaltime',))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag('timezone')
def timezone_tag(parser, token):
"""
Enables a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, the default time zone is used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" %
bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(('endtimezone',))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Stores the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_timezone' requires "
"'as variable' (got %r)" % args)
return GetCurrentTimezoneNode(args[2])
| bsd-3-clause |
luceatnobis/youtube-dl | youtube_dl/extractor/fxnetworks.py | 47 | 2564 | # coding: utf-8
from __future__ import unicode_literals
from .adobepass import AdobePassIE
from ..utils import (
update_url_query,
extract_attributes,
parse_age_limit,
smuggle_url,
)
class FXNetworksIE(AdobePassIE):
_VALID_URL = r'https?://(?:www\.)?(?:fxnetworks|simpsonsworld)\.com/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.fxnetworks.com/video/719841347694',
'md5': '1447d4722e42ebca19e5232ab93abb22',
'info_dict': {
'id': '719841347694',
'ext': 'mp4',
'title': 'Vanpage',
'description': 'F*ck settling down. You\'re the Worst returns for an all new season August 31st on FXX.',
'age_limit': 14,
'uploader': 'NEWA-FNG-FX',
'upload_date': '20160706',
'timestamp': 1467844741,
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.simpsonsworld.com/video/716094019682',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if 'The content you are trying to access is not available in your region.' in webpage:
self.raise_geo_restricted()
video_data = extract_attributes(self._search_regex(
r'(<a.+?rel="http://link\.theplatform\.com/s/.+?</a>)', webpage, 'video data'))
player_type = self._search_regex(r'playerType\s*=\s*[\'"]([^\'"]+)', webpage, 'player type', default=None)
release_url = video_data['rel']
title = video_data['data-title']
rating = video_data.get('data-rating')
query = {
'mbr': 'true',
}
if player_type == 'movies':
query.update({
'manifest': 'm3u',
})
else:
query.update({
'switch': 'http',
})
if video_data.get('data-req-auth') == '1':
resource = self._get_mvpd_resource(
video_data['data-channel'], title,
video_data.get('data-guid'), rating)
query['auth'] = self._extract_mvpd_auth(url, video_id, 'fx', resource)
return {
'_type': 'url_transparent',
'id': video_id,
'title': title,
'url': smuggle_url(update_url_query(release_url, query), {'force_smil_url': True}),
'thumbnail': video_data.get('data-large-thumb'),
'age_limit': parse_age_limit(rating),
'ie_key': 'ThePlatform',
}
| unlicense |
hoettges/QKan | qkan/tools/dialogs/__init__.py | 1 | 3551 | import logging
import os
from typing import TYPE_CHECKING, Optional
from qgis.PyQt.QtWidgets import QDialog, QFileDialog, QLineEdit, QPushButton, QWidget
from qkan.database.dbfunc import DBConnection
logger = logging.getLogger("QKan.tools.dialogs")
if TYPE_CHECKING:
from qkan.plugin import QKanPlugin
class QKanDialog(QDialog):
def __init__(self, plugin: "QKanPlugin", parent: Optional[QWidget] = None):
# noinspection PyArgumentList
super().__init__(parent)
self.setupUi(self)
self.plugin = plugin
def bind_select_path(
self,
title: str,
file_filter: str,
line_edit: QLineEdit,
push_button: QPushButton,
is_open: bool,
default_dir: Optional[str] = None,
) -> None:
if not default_dir:
default_dir = self.plugin.default_dir
push_button.clicked.connect(
lambda: self.select_path(
title, file_filter, line_edit, is_open, default_dir
)
)
def select_path(
self,
title: str,
file_filter: str,
line_edit: QLineEdit,
is_open: bool,
default_dir: str,
) -> None:
if is_open:
# noinspection PyArgumentList,PyCallByClass
filename, __ = QFileDialog.getOpenFileName(
self, title, default_dir, file_filter
)
else:
# noinspection PyArgumentList,PyCallByClass
filename, __ = QFileDialog.getSaveFileName(
self,
title,
default_dir,
file_filter,
)
if os.path.dirname(filename) != "":
line_edit.setText(filename)
class QKanDBDialog(QKanDialog):
pb_selectQKanDB: QPushButton
tf_qkanDB: QLineEdit
open_mode = True
def __init__(self, plugin: "QKanPlugin", parent: Optional[QWidget] = None):
super().__init__(plugin, parent)
self.pb_selectQKanDB.clicked.connect(self.select_qkan_db)
self.db_qkan: Optional[DBConnection] = None
def select_qkan_db(self) -> None:
"""Anzubindende QKan-Datenbank festlegen"""
if self.open_mode:
# noinspection PyArgumentList,PyCallByClass
filename, __ = QFileDialog.getOpenFileName(
self, "QKan-Datenbank auswählen", self.plugin.default_dir, "*.sqlite"
)
else:
# noinspection PyArgumentList,PyCallByClass
filename, __ = QFileDialog.getSaveFileName(
self,
"Zu erstellende QKan-Datenbank auswählen",
self.plugin.default_dir,
"*.sqlite",
)
if os.path.dirname(filename) != "":
self.tf_qkanDB.setText(filename)
class QKanProjectDialog(QKanDialog):
pb_selectProjectFile: QPushButton
tf_projectFile: QLineEdit
def __init__(self, plugin: "QKanPlugin", parent: Optional[QWidget] = None):
super().__init__(plugin, parent)
self.pb_selectProjectFile.clicked.connect(self.select_project_file)
def select_project_file(self) -> None:
"""Zu erstellende Projektdatei festlegen"""
# noinspection PyArgumentList,PyCallByClass
filename, __ = QFileDialog.getSaveFileName(
self,
"Dateinamen der zu erstellenden Projektdatei eingeben",
self.plugin.default_dir,
"*.qgs",
)
if os.path.dirname(filename) != "":
self.tf_projectFile.setText(filename)
| gpl-3.0 |
Niektory/fifengine | engine/python/fife/extensions/pychan/widgets/widget.py | 1 | 34152 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
import weakref
from fife import fife
from fife.extensions.pychan import events
from fife.extensions.pychan.attrs import (Attr, UnicodeAttr, PointAttr,
ColorAttr, BoolAttr, IntAttr)
from fife.extensions.pychan.exceptions import StopTreeWalking
from fife.extensions.pychan.properties import ColorProperty
from common import get_manager
from layout import isLayouted
class Widget(object):
"""
This is the common widget base class, which provides most of the wrapping
functionality.
Attributes
==========
Widgets are manipulated (mostly) through attributes - and these can all be set by XML attributes.
Derived widgets will have other attributes. Please see their B{New Attributes} sections. The types of the
attributes are pretty straightforward, but note that Position and Color attribute types will also accept
C{fife.Point} and C{fife.Color} values.
- name: String: The identification of the widget, most useful if it is unique within a given widget hiarachy.
This is used to find widgets by L{mapEvents},L{distributeInitialData},L{distributeData} and L{collectData}.
- position: Position: The position relative to the parent widget - or on screen, if this is the root widget.
- size: Position: The real size of the widget (including border and margins). Usually you do not need to set this.
A notable exception is the L{ScrollArea}.
- min_size: Position: The minimal size this widget is allowed to have. This is enforced through the accessor methods
of the actual size attribute.
- max_size: Position: The maximal size this widget is allowed to have. This is enforced through the accessor methods
of the actual size attribute.
- base_color: Color
- background_color: Color
- foreground_color: Color
- selection_color: Color
- font: String: This should identify a font that was loaded via L{loadFonts} before.
- helptext: Unicode: Text which can be used for e.g. tooltips.
- comment: Unicode: Additional text stored by the widget. Not used by PyChan directly. Can be used by the client for additional info about the widget.
- border_size: Integer: The size of the border in pixels.
- position_technique: This can be either "automatic" or "explicit" - only L{Window} has this set to "automatic" which
results in new windows being centered on screen (for now).
If it is set to "explicit" the position attribute will not be touched.
- vexpand: Integer: >= 0. Proportion to expand this widget vertically.
- hexpand: Integer: >= 0. Proportion to expand this widget horizontally.
Convenience Attributes
======================
These attributes are convenience/shorthand versions of above mentioned attributes and assignment will reflect
the associated attributes values. E.g. the following is equivalent::
# Set X position, leave Y alone
widget.x = 10
# Same here
posi = widget.position
widget.position = (10, posi[1])
Here they are.
- x: Integer: The horizontal part of the position attribute.
- y: Integer: The vertical part of the position attribute.
- width: Integer: The horizontal part of the size attribute.
- height: Integer: The vertical part of the size attribute.
"""
ATTRIBUTES = [ Attr('name'),
PointAttr('position'),
PointAttr('min_size'),
PointAttr('size'),
PointAttr('max_size'),
ColorAttr('base_color'),
ColorAttr('background_color'),
ColorAttr('foreground_color'),
ColorAttr('selection_color'),
Attr('style'),
Attr('font'),
IntAttr('border_size'),
Attr('position_technique'),
IntAttr('vexpand'),
IntAttr('hexpand'),
UnicodeAttr('helptext'),
BoolAttr('is_focusable'),
UnicodeAttr('comment')
]
DEFAULT_NAME = '__unnamed__'
DEFAULT_HEXPAND = 0
DEFAULT_VEXPAND = 0
DEFAULT_MAX_SIZE = 500000, 500000
DEFAULT_SIZE = -1, -1
DEFAULT_MIN_SIZE = 0, 0
DEFAULT_HELPTEXT = u""
DEFAULT_POSITION = 0, 0
DEFAULT_FONT = "default"
DEFAULT_BORDER_SIZE = 0
DEFAULT_POSITION_TECHNIQUE = "explicit"
DEFAULT_COMMENT = u""
HIDE_SHOW_ERROR = """\
You can only show/hide the top widget of a hierachy.
Use 'addChild' or 'removeChild' to add/remove labels for example.
"""
def __init__(self,
parent = None,
name = None,
size = None,
min_size = None,
max_size = None,
helptext = None,
position = None,
style = None,
hexpand = None,
vexpand = None,
font = None,
base_color = None,
background_color = None,
foreground_color = None,
selection_color = None,
border_size = None,
position_technique = None,
is_focusable = None,
comment = None):
# Make sure the real_widget has been created
assert( hasattr(self,'real_widget') )
self.event_mapper = events.EventMapper(self)
# Flag to indicate if the Widget is added to the Manager
self._added = False
# Flag to indicate if the Widget is added to
# the top Widget list of the Manager
self._top_added = False
self._extra_border = (0,0)
# Data distribution & retrieval settings
self.accepts_data = False
self.accepts_initial_data = False
#set all defaults
if get_manager().compat_layout:
self.hexpand, self.vexpand = 0,0
else:
self.hexpand = self.DEFAULT_HEXPAND
self.vexpand = self.DEFAULT_VEXPAND
self.name = self.DEFAULT_NAME
self.has_name = False
self.position = self.DEFAULT_POSITION
self.position_technique = self.DEFAULT_POSITION_TECHNIQUE
self.font = self.DEFAULT_FONT
self.min_size = self.DEFAULT_MIN_SIZE
self.max_size = self.DEFAULT_MAX_SIZE
self.size = self.DEFAULT_SIZE
self.border_size = self.DEFAULT_BORDER_SIZE
self.helptext = self.DEFAULT_HELPTEXT
self.comment = self.DEFAULT_COMMENT
self._usedPrefixes = []
# Parent attribute makes sure we only have one parent,
# that tests self.__parent - so make sure we have the attr here.
self.__parent = None
self.parent = parent
# Inherit and apply style
if style is None and parent:
style = parent.style
self.style = style or "default"
# override everything style has set
if vexpand is not None: self.vexpand = vexpand
if hexpand is not None: self.hexpand = hexpand
if name is not None:
self.name = name
self.has_name = True
if position is not None: self.position = position
if position_technique is not None: self.position_technique = position_technique
if font is not None: self.font = font
# only set this if it's provided
if is_focusable is not None: self.is_focusable = is_focusable
if min_size is not None: self.min_size = min_size
if max_size is not None: self.max_size = max_size
if size is not None: self.size = size
if border_size is not None: self.border_size = border_size
if helptext is not None: self.helptext = helptext
if comment is not None: self.comment = comment
# these are set in the default style
if base_color is not None: self.base_color = base_color
if background_color is not None: self.background_color = background_color
if foreground_color is not None: self.foreground_color = foreground_color
if selection_color is not None: self.selection_color = selection_color
# add this widget to the manager
get_manager().addWidget(self)
def clone(self, prefix):
"""
Clones this widget.
Concrete widgets should implement this one, if not, an exception should
be raised.
Prefix is used to create the name of the cloned widget.
"""
raise RuntimeError("No implementation of clone method for %s" % self.__class__)
def execute(self, bind, focus=None):
"""
Execute a dialog synchronously.
As argument a dictionary mapping widget names to return values
is expected. Events from these widgets will cause this function
to return with the associated return value.
This function will not return until such an event occurs.
The widget will be shown before execution and hidden afterwards.
You can only execute root widgets.
@param focus: name of child widget which should have focus. Defaults to main widget.
Note: This feature is not tested well, and the API will probably
change. Otherwise have fun::
# Okay this a very condensed example :-)
return pychan.loadXML("contents/gui/dialog.xml").execute({ 'okButton' : True, 'closeButton' : False })
"""
if not get_manager().can_execute:
raise RuntimeError("Synchronous execution is not set up!")
if self.__parent:
raise RuntimeError("You can only 'execute' root widgets, not %s!" % str(self))
for name,returnValue in bind.items():
def _quitThisDialog(returnValue = returnValue ):
get_manager().breakFromMainLoop( returnValue )
self.hide()
self.findChild(name=name).capture( _quitThisDialog , group_name = "__execute__" )
self.show()
if focus and self.findChild(name=focus):
self.findChild(name=focus).is_focusable = True
self.findChild(name=focus).requestFocus()
else:
self.is_focusable = True
self.requestFocus()
return get_manager().mainLoop()
def requestFocus(self):
"""
Requests focus.
The widget must be focusable in order for this to work. See
the is_focusable property.
"""
if self.isVisible():
self.real_widget.requestFocus()
def isModalFocusable(self):
"""
Checks if a widget is modal focusable.
True if no other widget has modal focus, false otherwise.
"""
return self.real_widget.isModalFocusable()
def isModalFocused(self):
"""
Checks if the widget or it's parent has modal focus.
"""
return self.real_widget.isModalFocused()
def requestModalFocus(self):
"""
Requests modal focus.
When a widget has modal focus, only that widget and it's children may recieve input.
The widget must be modal focusable in order for this to work. Therefore,
no other widget should has modal focus.
"""
if self.isVisible():
if self.isModalFocusable():
self.real_widget.requestModalFocus()
def releaseModalFocus(self):
"""
Releases modal focus.
"""
if self.isModalFocused():
self.real_widget.releaseModalFocus()
def isModalMouseInputFocusable(self):
"""
Checks if a widget is modal mouse input focusable.
True if no other widget has modal mouse input focus, false otherwise.
"""
return self.real_widget.isModalMouseInputFocusable()
def isModalMouseInputFocused(self):
"""
Checks if the widget or it's parent has modal mouse input focus.
"""
return self.real_widget.isModalMouseInputFocused()
def requestModalMouseInputFocus(self):
"""
Requests modal mouse input focus.
When a widget has modal input focus that widget will be the only widget
receiving input even if the input occurs outside of the widget and
no matter what the input is.
The widget must be modal mouse input focusable in order for this to work.
Therefore, no other widget should has modal input focus.
"""
if self.isVisible():
if self.isModalMouseInputFocusable():
self.real_widget.requestModalMouseInputFocus()
def releaseModalMouseInputFocus(self):
"""
Releases modal mouse input focus.
"""
if self.isModalMouseInputFocused():
self.real_widget.releaseModalMouseInputFocus()
def match(self,**kwargs):
"""
Matches the widget against a list of key-value pairs.
Only if all keys are attributes and their value is the same it returns True.
"""
for k,v in kwargs.items():
if v != getattr(self,k,None):
return False
return True
def capture(self, callback, event_name="action", group_name="default"):
"""
Add a callback to be executed when the widget event occurs on this widget.
The callback must be either a callable or None.
The old event handler (if any) will be overridden by the callback.
If None is given, the event will be disabled. You can query L{isCaptured}
wether this widgets events are currently captured.
It might be useful to check out L{tools.callbackWithArguments}.
@param callback: Event callback - may accept keyword arguments event and widget.
@param event_name: The event to capture - may be one of L{events.EVENTS} and defaults to "action"
@param group_name: Event group.
Event groups are used to have different B{channels} which don't interfere with each other.
For derived widgets that need to capture events it's advised to use the group_name 'widget'.
The 'default' group is used by default, and should be reserved for the application programmers.
"""
self.event_mapper.capture( event_name, callback, group_name )
def isCaptured(self):
"""
Check whether this widgets events are captured
(a callback is installed) or not.
"""
return bool(self.event_mapper.getCapturedEvents())
def show(self):
"""
Show the widget and all contained widgets.
"""
# add this widget to the manager
if not self._added:
get_manager().addWidget(self)
if self.parent is None and not self._top_added:
get_manager().addTopWidget(self)
# add childs of this widget to the manager
def _show(shown_widget):
get_manager().addWidget(shown_widget)
self.deepApply(_show)
if self.isVisible() and self.isSetVisible():
self.beforeShow()
self.adaptLayout()
if self.parent is None:
get_manager().placeWidget(self, self.position_technique)
return
self.beforeShow()
# Show real widget to distribute a widgetShown event.
self.real_widget.setVisible(True)
self.adaptLayout()
if self.parent is None:
get_manager().placeWidget(self, self.position_technique)
def hide(self, free=False):
"""
Hide the widget and all contained widgets.
"""
# remove this widget from the manager
if self._added:
get_manager().removeWidget(self)
if self.parent is None and self._top_added:
get_manager().removeTopWidget(self)
# remove childs of this widget from the manager
def _hide(hidden_widget):
get_manager().removeWidget(hidden_widget)
self.deepApply(_hide)
if not self.isVisible() and not self.isSetVisible():
self.adaptLayout()
self.afterHide()
return
# Hide real widget to distribute a widgetHidden event.
self.real_widget.setVisible(False)
if free:
if self.parent:
self.parent.removeChild(self)
self.removeAllChildren()
self.adaptLayout()
self.afterHide()
def isVisible(self):
"""
Check whether the widget is currently shown,
either directly or as part of a container widget.
"""
return self.real_widget.isVisible()
def isSetVisible(self):
"""
Check the real widget visible flag.
It checks not if the widget is currently shown!
This is needed e.g. if the parent is already hidden
but we want to hide the child too.
"""
return self.real_widget.isSetVisible()
def adaptLayout(self,recurse=True):
"""
Execute the Layout engine. Automatically called by L{show}.
In case you want to relayout a visible widget.
This function will automatically perform the layout adaption
from the top-most layouted widget.
To make this clear consider this arrangement::
VBox 1
- Container
- VBox 2
- HBox
- Label
If you call adaptLayout on the Label the layout from the VBox 2
will get recalculated, while the VBox 1 stays untouched.
@param recurse: Pass False here to force the layout to start from
this widget.
"""
widget = self
while widget.parent and recurse:
if not isLayouted(widget.parent):
break
widget = widget.parent
widget._recursiveResizeToContent()
widget._recursiveExpandContent()
def beforeShow(self):
"""
This method is called just before the widget is shown.
You can override this in derived widgets to add finalization
behaviour.
NOTE:
- if your widget is a container, you have to call
_resetTiling(), as you will loose this call by using
your override method
"""
def afterHide(self):
"""
This method is called just before the widget is hidden.
You can override this in derived widgets to add finalization
behaviour.
"""
def findChildren(self,**kwargs):
"""
Find all contained child widgets by attribute values.
Usage::
closeButtons = root_widget.findChildren(name='close')
buttons = root_widget.findChildren(__class__=pychan.widgets.Button)
"""
children = []
def _childCollector(widget):
if widget.match(**kwargs):
children.append(widget)
self.deepApply(_childCollector)
return children
def getNamedChildren(self, include_unnamed = False):
"""
Create a dictionary of child widgets with the keys being
their name. This will contain only Widgets which have
a name different from "__unnamed__" (which is the default).
@param include_unnamed: Defaults to false. If this is true unnamed widgets are added, too.
The values are lists of widgets, so not only unique names
are handled correctly.
Usage::
children = widget.getNamedChildren()
for widget in children.get("info",[])
print widget.name , " == info"
"""
children = {}
if include_unnamed:
def _childCollector(widget):
children.setdefault(widget._name,[]).append(widget)
else:
def _childCollector(widget):
if widget.has_name:
children.setdefault(widget._name,[]).append(widget)
self.deepApply(_childCollector)
return children
def findChild(self,**kwargs):
""" Find the first contained child widgets by attribute values.
Usage::
closeButton = root_widget.findChild(name='close')
"""
if kwargs.keys() == ["name"]:
return self.findChildByName(kwargs["name"])
children = self.findChildren(**kwargs)
if children:
return children[0]
return None
def findChildByName(self,name):
"""
Find first contained child widget by its name.
Note that this is the fast version of findChild(name="...")
and that you don't have to call this explicitly, it is used
if possible.
"""
result = []
def _childCollector(widget):
if widget._name == name:
result.append(widget)
raise StopTreeWalking
try:
self.deepApply(_childCollector)
except StopTreeWalking:
return result[0]
return None
def addChild(self,widget):
"""
This function adds a widget as child widget and is only implemented
in container widgets.
You'll need to call L{adaptLayout} if the container is already shown,
to adapt the layout to the new widget. This doesn't happen
automatically.
"""
raise RuntimeError("Trying to add a widget to %s, which doesn't allow this." % repr(self))
def insertChild(self, widget, position):
"""
This function inserts a widget a given index in the child list.
See L{addChild} and L{insertChildBefore}
"""
raise RuntimeError("Trying to insert a widget to %s, which doesn't allow this." % repr(self))
def insertChildBefore(self, widget, before):
"""
Inserts a child widget before a given widget. If the widget isn't found,
the widget is appended to the children list.
See L{addChild} and L{insertChild}
"""
raise RuntimeError("Trying to insert a widget to %s, which doesn't allow this." % repr(self))
def addChildren(self,*widgets):
"""
Add multiple widgets as children.
Only implemented for container widgets. See also L{addChild}
Usage::
container.addChildren( widget1, widget2, ... )
# or you can use this on a list
container.addChildren( [widget1,widget2,...] )
"""
if len(widgets) == 1 and not isinstance(widgets[0],Widget):
widgets = widgets[0]
for widget in widgets:
self.addChild(widget)
def removeChild(self,widget):
"""
This function removes a direct child widget and is only implemented
in container widgets.
You'll need to call L{adaptLayout} if the container is already shown,
to adapt the layout to the removed widget. This doesn't happen
automatically.
"""
raise RuntimeError("Trying to remove a widget from %s, which is not a container widget." % repr(self))
def removeChildren(self,*widgets):
"""
Remove a list of direct child widgets.
All widgets have to be direct child widgets.
To 'clear' a container take a look at L{removeAllChildren}.
See also L{removeChild}.
Usage::
container.removeChildren( widget1, widget2, ... )
# or you can use this on a list
container.removeChildren( [widget1,widget2,...] )
"""
if len(widgets) == 1 and not isinstance(widgets[0],Widget):
widgets = widgets[0]
for widget in widgets:
self.removeChild(widget)
def removeAllChildren(self):
"""
This function will remove all direct child widgets.
This will work even for non-container widgets.
"""
children = self.findChildren(parent=self)
for widget in children:
self.removeChild(widget)
def mapEvents(self,eventMap,ignoreMissing = False):
"""
Convenience function to map widget events to functions
in a batch.
Subsequent calls of mapEvents will merge events with different
widget names and override the previously set callback.
You can also pass C{None} instead of a callback, which will
disable the event completely.
@param eventMap: A dictionary with widget/event names as keys and callbacks as values.
@param ignoreMissing: Normally this method raises a RuntimeError, when a widget
can not be found - this behaviour can be overriden by passing True here.
The keys in the dictionary are parsed as C{"widgetName/eventName"} with the slash
separating the two. If no slash is found the eventName is assumed to be "action".
Additionally you can supply a group name or channel C{"widgetName/eventName/groupName"}.
Event handlers from one group are not overridden by handlers from another group.
The default group name is C{"default"}.
Example::
guiElement.mapEvents({
"button" : guiElement.hide,
"button/mouseEntered" : toggleButtonColorGreen,
"button/mouseExited" : toggleButtonColorBlue,
})
"""
children = self.getNamedChildren(include_unnamed=True)
for descr,func in eventMap.items():
name, event_name, group_name = events.splitEventDescriptor(descr)
#print name, event_name, group_name
widgets = children.get(name,[])
if widgets:
for widget in widgets:
widget.capture( func, event_name = event_name, group_name = group_name )
elif not ignoreMissing:
raise RuntimeError("No widget with the name: %s" % name)
def setInitialData(self,data):
"""
Set the initial data on a widget, what this means depends on the Widget.
In case the widget does not accept initial data, a L{RuntimeError} is thrown.
"""
if not self.accepts_initial_data:
raise RuntimeError("Trying to set data on a widget that does not accept initial data. Widget: %s Data: %s " % (repr(self),repr(data)))
self._realSetInitialData(data)
def setData(self,data):
"""
Set the user-mutable data on a widget, what this means depends on the Widget.
In case the widget does not accept data, a L{RuntimeError} is thrown.
This is inverse to L{getData}.
"""
if not self.accepts_data:
raise RuntimeError("Trying to set data on a widget that does not accept data.")
self._realSetData(data)
def getData(self):
"""
Get the user-mutable data of a widget, what this means depends on the Widget.
In case the widget does not have user mutable data, a L{RuntimeError} is thrown.
This is inverse to L{setData}.
"""
if not self.accepts_data:
raise RuntimeError("Trying to retrieve data from a widget that does not accept data.")
return self._realGetData()
def distributeInitialData(self,initialDataMap):
"""
Distribute B{initial} (not mutable by the user) data from a dictionary over the widgets in the hierachy
using the keys as names and the values as the data (which is set via L{setInitialData}).
If more than one widget matches - the data is set on ALL matching widgets.
By default a missing widget is just ignored.
Use it like this::
guiElement.distributeInitialData({
'myTextField' : 'Hello World!',
'myListBox' : ["1","2","3"]
})
"""
children = self.getNamedChildren(include_unnamed=True)
for name,data in initialDataMap.items():
widgetList = children.get(name,[])
for widget in widgetList:
widget.setInitialData(data)
def distributeData(self,dataMap):
"""
Distribute data from a dictionary over the widgets in the hierachy
using the keys as names and the values as the data (which is set via L{setData}).
This will only accept unique matches.
Use it like this::
guiElement.distributeData({
'myTextField' : 'Hello World!',
'myListBox' : ["1","2","3"]
})
"""
children = self.getNamedChildren(include_unnamed=True)
for name,data in dataMap.items():
widgetList = children.get(name,[])
if len(widgetList) != 1:
if get_manager().debug:
self.listNamedWidgets()
raise RuntimeError("DistributeData can only handle widgets with unique names.")
widgetList[0].setData(data)
def collectDataAsDict(self,widgetNames):
"""
Collect data from a widget hierachy by names into a dictionary.
This can only handle UNIQUE widget names (in the hierachy)
and will raise a RuntimeError if the number of matching widgets
is not equal to one.
Usage::
data = guiElement.collectDataAsDict(['myTextField','myListBox'])
print "You entered:",data['myTextField']," and selected ",data['myListBox']
"""
children = self.getNamedChildren(include_unnamed=True)
dataMap = {}
for name in widgetNames:
widgetList = children.get(name,[])
if len(widgetList) != 1:
if get_manager().debug:
self.listNamedWidgets()
raise RuntimeError("CollectData can only handle widgets with unique names.")
dataMap[name] = widgetList[0].getData()
return dataMap
def collectData(self,*widgetNames):
"""
Collect data from a widget hierachy by names.
This can only handle UNIQUE widget names (in the hierachy)
and will raise a RuntimeError if the number of matching widgets
is not equal to one.
This function takes an arbitrary number of widget names and
returns a list of the collected data in the same order.
In case only one argument is given, it will return just the
data, with out putting it into a list.
Usage::
# Multiple element extraction:
text, selected = guiElement.collectData('myTextField','myListBox')
print "You entered:",text," and selected item nr",selected
# Single elements are handled gracefully, too:
test = guiElement.collectData('testElement')
"""
children = self.getNamedChildren(include_unnamed=True)
dataList = []
for name in widgetNames:
widgetList = children.get(name,[])
if len(widgetList) != 1:
if get_manager().debug:
self.listNamedWidgets()
raise RuntimeError("CollectData can only handle widgets with unique names.")
dataList.append( widgetList[0].getData() )
if len(dataList) == 1:
return dataList[0]
return dataList
def listNamedWidgets(self):
"""
This function will print a list of all currently named child-widgets
to the standard output. This is useful for debugging purposes.
"""
def _printNamedWidget(widget):
if widget.name != Widget.DEFAULT_NAME:
print widget.name.ljust(20),repr(widget).ljust(50),repr(widget.__parent)
print "Named child widgets of ",repr(self)
print "name".ljust(20),"widget".ljust(50),"parent"
self.deepApply(_printNamedWidget)
def stylize(self,style,**kwargs):
"""
Recursively apply a style to all widgets.
"""
def _restyle(widget):
get_manager().stylize(widget,style,**kwargs)
self.deepApply(_restyle)
def resizeToContent(self,recurse = True):
"""
Try to shrink the widget, so that it fits closely around its content.
Do not call directly.
"""
def expandContent(self,recurse = True):
"""
Try to expand any spacer in the widget within the current size.
Do not call directly.
"""
def _recursiveResizeToContent(self):
"""
Recursively call L{resizeToContent}. Uses L{deepApply}.
Do not call directly.
"""
def _callResizeToContent(widget):
#print "RTC:",widget
widget.resizeToContent()
self.deepApply(_callResizeToContent, shown_only = True)
def _recursiveExpandContent(self):
"""
Recursively call L{expandContent}. Uses L{deepApply}.
Do not call directly.
"""
def _callExpandContent(widget):
#print "ETC:",widget
widget.expandContent()
self.deepApply(_callExpandContent, leaves_first=False, shown_only = True)
def deepApply(self,visitorFunc, leaves_first = True, shown_only = False):
"""
Recursively apply a callable to all contained widgets and then the widget itself.
"""
visitorFunc(self)
def getAbsolutePos(self):
"""
Get absolute position on screen
"""
absX = self.x
absY = self.y
parent = self.parent
while parent is not None:
absX += parent.x
absY += parent.y
parent = parent.parent
return (absX, absY)
def sizeChanged(self):
pass
def __str__(self):
return "%s(name='%s')" % (self.__class__.__name__,self.name)
def __repr__(self):
return "<%s(name='%s') at %x>" % (self.__class__.__name__,self.name,id(self))
def _setSize(self,size):
if isinstance(size,fife.Point):
self.width, self.height = size.x, size.y
else:
self.width, self.height = size
def _getSize(self):
return self.width, self.height
def _setPosition(self,size):
if isinstance(size,fife.Point):
self.x, self.y = size.x, size.y
else:
self.x, self.y = size
def _getPosition(self):
return self.x, self.y
def _setX(self,x):self.real_widget.setX(x)
def _getX(self): return self.real_widget.getX()
def _setY(self,y): self.real_widget.setY(y)
def _getY(self): return self.real_widget.getY()
def _setWidth(self,w):
old_width = self.width
w = max(self.min_size[0],w)
w = min(self.max_size[0],w)
self.real_widget.setWidth(w)
if w != old_width:
self.sizeChanged()
def _getWidth(self): return self.real_widget.getWidth()
def _setHeight(self,h):
old_height = self.height
h = max(self.min_size[1],h)
h = min(self.max_size[1],h)
self.real_widget.setHeight(h)
if h != old_height:
self.sizeChanged()
def _getHeight(self): return self.real_widget.getHeight()
def _getMinWidth(self): return self.min_size[0]
def _getMaxWidth(self): return self.max_size[0]
def _getMinHeight(self): return self.min_size[1]
def _getMaxHeight(self): return self.max_size[1]
def _setMinWidth(self,w):
self.min_size = w, self.min_size[1]
def _setMaxWidth(self,w):
self.max_size = w, self.max_size[1]
def _setMinHeight(self,h):
self.min_size = self.min_size[0],h
def _setMaxHeight(self,h):
self.max_size = self.max_size[0],h
def _setFont(self, font):
self._font = font
self.real_font = get_manager().getFont(font)
self.real_widget.setFont(self.real_font)
def _getFont(self):
return self._font
def _getBorderSize(self): return self.real_widget.getFrameSize()
def _setBorderSize(self,size): self.real_widget.setFrameSize(size)
base_color = ColorProperty("BaseColor")
background_color = ColorProperty("BackgroundColor")
foreground_color = ColorProperty("ForegroundColor")
selection_color = ColorProperty("SelectionColor")
def _getStyle(self): return self._style
def _setStyle(self,style):
self._style = style
get_manager().stylize(self,style)
style = property(_getStyle,_setStyle)
def _getParent(self):
if self.__parent is not None:
return self.__parent()
return None
def _setParent(self,parent):
if parent and not issubclass(type(parent), Widget):
raise RuntimeError("Parent must be subclass of the Widget type.")
if self.__parent is not None and self.__parent() is not parent:
if self.__parent() is not None and parent is not None:
print "Widget containment fumble:", self, self.__parent, parent
self.__parent().removeChild(self)
if parent is not None:
self.__parent = weakref.ref(parent)
else:
self.__parent = None
parent = property(_getParent,_setParent)
def _setName(self,name):
self._name = name
if name != Widget.DEFAULT_NAME:
self.has_name = True
def _getName(self):
# __str__ relies on self.name
return getattr(self,'_name','__no_name_yet__')
name = property(_getName,_setName)
def _setFocusable(self, b): self.real_widget.setFocusable(b)
def _isFocusable(self):
return self.real_widget.isFocusable()
def _createNameWithPrefix(self, prefix):
if not isinstance(prefix, str):
raise RuntimeError("Widget names should be prefixed with a string")
if prefix in self._usedPrefixes:
raise RuntimeError("Widget %s already cloned with prefix %s" % (self.name, prefix))
if len(prefix) == 0:
raise RuntimeError("New widget name cannot be created with an empty prefix")
self._usedPrefixes.append(prefix)
return prefix + self.name
x = property(_getX,_setX)
y = property(_getY,_setY)
width = property(_getWidth,_setWidth)
height = property(_getHeight,_setHeight)
min_width = property(_getMinWidth,_setMinWidth)
min_height = property(_getMinHeight,_setMinHeight)
max_width = property(_getMaxWidth,_setMaxWidth)
max_height = property(_getMaxHeight,_setMaxHeight)
size = property(_getSize,_setSize)
position = property(_getPosition,_setPosition)
font = property(_getFont,_setFont)
border_size = property(_getBorderSize,_setBorderSize)
is_focusable = property(_isFocusable,_setFocusable)
| lgpl-2.1 |
jart/tensorflow | tensorflow/contrib/constrained_optimization/python/constrained_optimizer.py | 13 | 8208 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines base class for `ConstrainedOptimizer`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.training import optimizer as train_optimizer
@six.add_metaclass(abc.ABCMeta)
class ConstrainedOptimizer(object):
"""Base class representing a constrained optimizer.
A ConstrainedOptimizer wraps a tf.train.Optimizer (or more than one), and
applies it to a ConstrainedMinimizationProblem. Unlike a tf.train.Optimizer,
which takes a tensor to minimize as a parameter to its minimize() method, a
constrained optimizer instead takes a ConstrainedMinimizationProblem.
"""
def __init__(self, optimizer):
"""Constructs a new `ConstrainedOptimizer`.
Args:
optimizer: tf.train.Optimizer, used to optimize the
ConstraintedMinimizationProblem.
Returns:
A new `ConstrainedOptimizer`.
"""
self._optimizer = optimizer
@property
def optimizer(self):
"""Returns the `tf.train.Optimizer` used for optimization."""
return self._optimizer
def minimize_unconstrained(self,
minimization_problem,
global_step=None,
var_list=None,
gate_gradients=train_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Returns an `Op` for minimizing the unconstrained problem.
Unlike `minimize_constrained`, this function ignores the `constraints` (and
`proxy_constraints`) portion of the minimization problem entirely, and only
minimizes `objective`.
Args:
minimization_problem: ConstrainedMinimizationProblem, the problem to
optimize.
global_step: as in `tf.train.Optimizer`'s `minimize` method.
var_list: as in `tf.train.Optimizer`'s `minimize` method.
gate_gradients: as in `tf.train.Optimizer`'s `minimize` method.
aggregation_method: as in `tf.train.Optimizer`'s `minimize` method.
colocate_gradients_with_ops: as in `tf.train.Optimizer`'s `minimize`
method.
name: as in `tf.train.Optimizer`'s `minimize` method.
grad_loss: as in `tf.train.Optimizer`'s `minimize` method.
Returns:
TensorFlow Op.
"""
return self.optimizer.minimize(
minimization_problem.objective,
global_step=global_step,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
name=name,
grad_loss=grad_loss)
@abc.abstractmethod
def minimize_constrained(self,
minimization_problem,
global_step=None,
var_list=None,
gate_gradients=train_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Returns an `Op` for minimizing the constrained problem.
Unlike `minimize_unconstrained`, this function attempts to find a solution
that minimizes the `objective` portion of the minimization problem while
satisfying the `constraints` portion.
Args:
minimization_problem: ConstrainedMinimizationProblem, the problem to
optimize.
global_step: as in `tf.train.Optimizer`'s `minimize` method.
var_list: as in `tf.train.Optimizer`'s `minimize` method.
gate_gradients: as in `tf.train.Optimizer`'s `minimize` method.
aggregation_method: as in `tf.train.Optimizer`'s `minimize` method.
colocate_gradients_with_ops: as in `tf.train.Optimizer`'s `minimize`
method.
name: as in `tf.train.Optimizer`'s `minimize` method.
grad_loss: as in `tf.train.Optimizer`'s `minimize` method.
Returns:
TensorFlow Op.
"""
pass
def minimize(self,
minimization_problem,
unconstrained_steps=None,
global_step=None,
var_list=None,
gate_gradients=train_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Returns an `Op` for minimizing the constrained problem.
This method combines the functionality of `minimize_unconstrained` and
`minimize_constrained`. If global_step < unconstrained_steps, it will
perform an unconstrained update, and if global_step >= unconstrained_steps,
it will perform a constrained update.
The reason for this functionality is that it may be best to initialize the
constrained optimizer with an approximate optimum of the unconstrained
problem.
Args:
minimization_problem: ConstrainedMinimizationProblem, the problem to
optimize.
unconstrained_steps: int, number of steps for which we should perform
unconstrained updates, before transitioning to constrained updates.
global_step: as in `tf.train.Optimizer`'s `minimize` method.
var_list: as in `tf.train.Optimizer`'s `minimize` method.
gate_gradients: as in `tf.train.Optimizer`'s `minimize` method.
aggregation_method: as in `tf.train.Optimizer`'s `minimize` method.
colocate_gradients_with_ops: as in `tf.train.Optimizer`'s `minimize`
method.
name: as in `tf.train.Optimizer`'s `minimize` method.
grad_loss: as in `tf.train.Optimizer`'s `minimize` method.
Returns:
TensorFlow Op.
Raises:
ValueError: If unconstrained_steps is provided, but global_step is not.
"""
def unconstrained_fn():
"""Returns an `Op` for minimizing the unconstrained problem."""
return self.minimize_unconstrained(
minimization_problem=minimization_problem,
global_step=global_step,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
name=name,
grad_loss=grad_loss)
def constrained_fn():
"""Returns an `Op` for minimizing the constrained problem."""
return self.minimize_constrained(
minimization_problem=minimization_problem,
global_step=global_step,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
name=name,
grad_loss=grad_loss)
if unconstrained_steps is not None:
if global_step is None:
raise ValueError(
"global_step cannot be None if unconstrained_steps is provided")
unconstrained_steps_tensor = ops.convert_to_tensor(unconstrained_steps)
dtype = unconstrained_steps_tensor.dtype
return control_flow_ops.cond(
standard_ops.cast(global_step, dtype) < unconstrained_steps_tensor,
true_fn=unconstrained_fn,
false_fn=constrained_fn)
else:
return constrained_fn()
| apache-2.0 |
jj-umn/tools-iuc | tools/query_tabular/filters.py | 12 | 6738 | #!/usr/binsenv python
from __future__ import print_function
import re
import sys
class LineFilter(object):
def __init__(self, source, filter_dict):
self.source = source
self.filter_dict = filter_dict
self.func = lambda i, l: l.rstrip('\r\n') if l else None
self.src_lines = []
self.src_line_cnt = 0
if not filter_dict:
return
if filter_dict['filter'] == 'regex':
rgx = re.compile(filter_dict['pattern'])
if filter_dict['action'] == 'exclude_match':
self.func = lambda i, l: l if not rgx.match(l) else None
elif filter_dict['action'] == 'include_match':
self.func = lambda i, l: l if rgx.match(l) else None
elif filter_dict['action'] == 'exclude_find':
self.func = lambda i, l: l if not rgx.search(l) else None
elif filter_dict['action'] == 'include_find':
self.func = lambda i, l: l if rgx.search(l) else None
elif filter_dict['filter'] == 'select_columns':
cols = [int(c) - 1 for c in filter_dict['columns']]
self.func = lambda i, l: self.select_columns(l, cols)
elif filter_dict['filter'] == 'replace':
p = filter_dict['pattern']
r = filter_dict['replace']
c = int(filter_dict['column']) - 1
if 'add' not in filter_dict\
or filter_dict['add'] not in ['prepend',
'append',
'before',
'after']:
self.func = lambda i, l: '\t'.join(
[x if j != c else re.sub(p, r, x)
for j, x in enumerate(l.split('\t'))])
else:
a = 0 if filter_dict['add'] == 'prepend'\
else min(0, c - 1) if filter_dict['add'] == 'before'\
else c + 1 if filter_dict['add'] == 'after'\
else None
self.func = lambda i, l: self.replace_add(l, p, r, c, a)
elif filter_dict['filter'] == 'prepend_line_num':
self.func = lambda i, l: '%d\t%s' % (i, l)
elif filter_dict['filter'] == 'append_line_num':
self.func = lambda i, l: '%s\t%d' % (l.rstrip('\r\n'), i)
elif filter_dict['filter'] == 'prepend_text':
s = filter_dict['column_text']
self.func = lambda i, l: '%s\t%s' % (s, l)
elif filter_dict['filter'] == 'append_text':
s = filter_dict['column_text']
self.func = lambda i, l: '%s\t%s' % (l.rstrip('\r\n'), s)
elif filter_dict['filter'] == 'skip':
cnt = filter_dict['count']
self.func = lambda i, l: l if i > cnt else None
elif filter_dict['filter'] == 'normalize':
cols = [int(c) - 1 for c in filter_dict['columns']]
sep = filter_dict['separator']
self.func = lambda i, l: self.normalize(l, cols, sep)
def __iter__(self):
return self
def __next__(self):
if not self.src_lines:
self.get_lines()
if self.src_lines:
return self.src_lines.pop(0)
raise StopIteration
next = __next__
def select_columns(self, line, cols):
fields = line.split('\t')
return '\t'.join([fields[x] for x in cols])
def replace_add(self, line, pat, rep, col, pos):
fields = line.rstrip('\r\n').split('\t')
i = pos if pos is not None else len(fields)
val = ''
if col < len(fields) and re.search(pat, fields[col]):
val = re.sub(pat, rep, fields[col]).replace('\t', ' ')
return '\t'.join(fields[:i] + [val] + fields[i:])
def normalize(self, line, split_cols, sep):
lines = []
fields = line.rstrip('\r\n').split('\t')
split_fields = dict()
cnt = 0
for c in split_cols:
if c < len(fields):
split_fields[c] = fields[c].split(sep)
cnt = max(cnt, len(split_fields[c]))
if cnt == 0:
lines.append('\t'.join(fields))
else:
for n in range(0, cnt):
flds = [x if c not in split_cols else split_fields[c][n]
if n < len(split_fields[c])
else '' for (c, x) in enumerate(fields)]
lines.append('\t'.join(flds))
return lines
def get_lines(self):
for i, next_line in enumerate(self.source):
self.src_line_cnt += 1
line = self.func(self.src_line_cnt, next_line)
if line:
if isinstance(line, list):
self.src_lines.extend(line)
else:
self.src_lines.append(line)
return
class TabularReader:
"""
Tabular file iterator. Returns a list
"""
def __init__(self, input_file, skip=0, comment_char=None, col_idx=None,
filters=None):
self.skip = skip
self.comment_char = comment_char
self.col_idx = col_idx
self.filters = filters
self.tsv_file = \
input_file if hasattr(input_file, 'readline') else open(input_file)
if skip and skip > 0:
for i in range(skip):
if not self.tsv_file.readline():
break
source = LineFilter(self.tsv_file, None)
if comment_char:
source = LineFilter(source,
{"filter": "regex", "pattern": comment_char,
"action": "exclude_match"})
if filters:
for f in filters:
source = LineFilter(source, f)
self.source = source
def __iter__(self):
return self
def __next__(self):
''' Iteration '''
for i, line in enumerate(self.source):
fields = line.rstrip('\r\n').split('\t')
if self.col_idx:
fields = [fields[i] for i in self.col_idx]
return fields
raise StopIteration
next = __next__
def filter_file(input_file, output, skip=0, comment_char='#', filters=None):
data_lines = 0
try:
tr = TabularReader(input_file, skip=skip, comment_char=comment_char,
filters=filters)
for linenum, fields in enumerate(tr):
data_lines += 1
try:
output.write('%s\n' % '\t'.join(fields))
except Exception as e:
print('Failed at line: %d err: %s' % (linenum, e),
file=sys.stderr)
except Exception as e:
exit('Error: %s' % (e))
| mit |
blisseth/ThinkStats2 | code/cumulative.py | 67 | 6252 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy as np
import nsfg
import first
import thinkstats2
import thinkplot
def PercentileRank(scores, your_score):
"""Computes the percentile rank relative to a sample of scores."""
count = 0
for score in scores:
if score <= your_score:
count += 1
percentile_rank = 100.0 * count / len(scores)
return percentile_rank
scores = [55, 66, 77, 88, 99]
your_score = 88
print('score, percentile rank')
for score in scores:
print(score, PercentileRank(scores, score))
print()
def Percentile(scores, percentile_rank):
"""Computes the value that corresponds to a given percentile rank. """
scores.sort()
for score in scores:
if PercentileRank(scores, score) >= percentile_rank:
return score
def Percentile2(scores, percentile_rank):
"""Computes the value that corresponds to a given percentile rank.
Slightly more efficient.
"""
scores.sort()
index = percentile_rank * (len(scores)-1) // 100
return scores[index]
print('prank, score, score')
for percentile_rank in [0, 20, 25, 40, 50, 60, 75, 80, 100]:
print(percentile_rank,
Percentile(scores, percentile_rank),
Percentile2(scores, percentile_rank))
def EvalCdf(sample, x):
"""Computes CDF(x) in a sample.
sample: sequence
x: value
returns: cumulative probability
"""
count = 0.0
for value in sample:
if value <= x:
count += 1.0
prob = count / len(sample)
return prob
sample = [1, 2, 2, 3, 5]
print('x', 'CDF(x)')
for x in range(0, 7):
print(x, EvalCdf(sample, x))
def PositionToPercentile(position, field_size):
"""Converts from position in the field to percentile.
position: int
field_size: int
"""
beat = field_size - position + 1
percentile = 100.0 * beat / field_size
return percentile
def PercentileToPosition(percentile, field_size):
"""Converts from percentile to hypothetical position in the field.
percentile: 0-100
field_size: int
"""
beat = percentile * field_size / 100.0
position = field_size - beat + 1
return position
# my time 42:44
print('Percentile rank in field', PositionToPercentile(97, 1633))
print('Percentile rank in age group', PositionToPercentile(26, 256))
percentile = PositionToPercentile(26, 256)
print('Equivalent position in M50-59', PercentileToPosition(percentile, 171))
# 17th place = 46:05
print('Equivalent position in F20-29', PercentileToPosition(percentile, 448))
# 48:28
def MakeExample():
"""Makes a simple example CDF."""
t = [2, 1, 3, 2, 5]
cdf = thinkstats2.Cdf(t)
thinkplot.Clf()
thinkplot.Cdf(cdf)
thinkplot.Save(root='cumulative_example_cdf',
xlabel='x',
ylabel='CDF',
axis=[0, 6, 0, 1],
legend=False)
def MakeFigures(live, firsts, others):
"""Creates several figures for the book.
live: DataFrame
firsts: DataFrame
others: DataFrame
"""
first_wgt = firsts.totalwgt_lb
first_wgt_dropna = first_wgt.dropna()
print('Firsts', len(first_wgt), len(first_wgt_dropna))
#assert len(first_wgt_dropna) == 4381
other_wgt = others.totalwgt_lb
other_wgt_dropna = other_wgt.dropna()
print('Others', len(other_wgt), len(other_wgt_dropna))
#assert len(other_wgt_dropna) == 4706
first_pmf = thinkstats2.Pmf(first_wgt_dropna, label='first')
other_pmf = thinkstats2.Pmf(other_wgt_dropna, label='other')
width = 0.4 / 16
# plot PMFs of birth weights for first babies and others
thinkplot.PrePlot(2)
thinkplot.Hist(first_pmf, align='right', width=width)
thinkplot.Hist(other_pmf, align='left', width=width)
thinkplot.Save(root='cumulative_birthwgt_pmf',
title='Birth weight',
xlabel='weight (pounds)',
ylabel='PMF')
# plot CDFs of birth weights for first babies and others
first_cdf = thinkstats2.Cdf(firsts.totalwgt_lb, label='first')
other_cdf = thinkstats2.Cdf(others.totalwgt_lb, label='other')
thinkplot.PrePlot(2)
thinkplot.Cdfs([first_cdf, other_cdf])
thinkplot.Save(root='cumulative_birthwgt_cdf',
title='Birth weight',
xlabel='weight (pounds)',
ylabel='CDF',
axis=[0, 12.5, 0, 1]
)
def MakeCdf(live):
"""Plot the CDF of pregnancy lengths for live births.
live: DataFrame for live births
"""
cdf = thinkstats2.Cdf(live.prglngth, label='prglngth')
thinkplot.Cdf(cdf)
thinkplot.Save('cumulative_prglngth_cdf',
title='Pregnancy length',
xlabel='weeks',
ylabel='CDF')
def RandomFigure(live):
weights = live.totalwgt_lb
cdf = thinkstats2.Cdf(weights, label='totalwgt_lb')
sample = np.random.choice(weights, 100, replace=True)
ranks = [cdf.PercentileRank(x) for x in sample]
rank_cdf = thinkstats2.Cdf(ranks, label='percentile ranks')
thinkplot.Cdf(rank_cdf)
thinkplot.Save(root='cumulative_random',
xlabel='percentile rank',
ylabel='CDF')
def TestSample(live):
"""Plots the distribution of weights against a random sample.
live: DataFrame for live births
"""
weights = live.totalwgt_lb
cdf = thinkstats2.Cdf(weights, label='totalwgt_lb')
sample = cdf.Sample(1000)
sample_cdf = thinkstats2.Cdf(sample, label='sample')
thinkplot.PrePlot(2)
thinkplot.Cdfs([cdf, sample_cdf])
thinkplot.Save(root='cumulative_sample',
xlabel='weight (pounds)',
ylabel='CDF')
def main(name, data_dir=''):
thinkstats2.RandomSeed(17)
MakeExample()
live, firsts, others = first.MakeFrames()
RandomFigure(live)
TestSample(live)
MakeCdf(live)
MakeFigures(live, firsts, others)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
israeleriston/scientific-week | backend/venv/lib/python3.5/site-packages/sqlalchemy/engine/strategies.py | 32 | 9462 | # engine/strategies.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Strategies for creating new instances of Engine types.
These are semi-private implementation classes which provide the
underlying behavior for the "strategy" keyword argument available on
:func:`~sqlalchemy.engine.create_engine`. Current available options are
``plain``, ``threadlocal``, and ``mock``.
New strategies can be added via new ``EngineStrategy`` classes.
"""
from operator import attrgetter
from sqlalchemy.engine import base, threadlocal, url
from sqlalchemy import util, event
from sqlalchemy import pool as poollib
from sqlalchemy.sql import schema
strategies = {}
class EngineStrategy(object):
"""An adaptor that processes input arguments and produces an Engine.
Provides a ``create`` method that receives input arguments and
produces an instance of base.Engine or a subclass.
"""
def __init__(self):
strategies[self.name] = self
def create(self, *args, **kwargs):
"""Given arguments, returns a new Engine instance."""
raise NotImplementedError()
class DefaultEngineStrategy(EngineStrategy):
"""Base class for built-in strategies."""
def create(self, name_or_url, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
plugins = u._instantiate_plugins(kwargs)
u.query.pop('plugin', None)
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
if kwargs.pop('_coerce_config', False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop('module', None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args['dbapi'] = dbapi
for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args)
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg('connect_args', {}))
cargs = list(cargs) # allow mutability
# look for existing pool or create
pool = pop_kwarg('pool', None)
if pool is None:
def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(
dialect, connection_record, cargs, cparams)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg('creator', connect)
poolclass = pop_kwarg('poolclass', None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {
'dialect': dialect
}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {'logging_name': 'pool_logging_name',
'echo': 'echo_pool',
'timeout': 'pool_timeout',
'recycle': 'pool_recycle',
'events': 'pool_events',
'use_threadlocal': 'pool_threadlocal',
'reset_on_return': 'pool_reset_on_return'}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
pool._dialect = dialect
# create engine.
engineclass = self.engine_cls
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
_initialize = kwargs.pop('_initialize', True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components." % (','.join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__))
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(
dbapi_connection, '_sqla_unwrap', dbapi_connection)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, 'first_connect', on_connect)
event.listen(pool, 'connect', on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(engine, connection=dbapi_connection,
_has_events=False)
c._execution_options = util.immutabledict()
dialect.initialize(c)
event.listen(pool, 'first_connect', first_connect, once=True)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
for plugin in plugins:
plugin.engine_created(engine)
return engine
class PlainEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring a regular Engine."""
name = 'plain'
engine_cls = base.Engine
PlainEngineStrategy()
class ThreadLocalEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring an Engine with threadlocal behavior."""
name = 'threadlocal'
engine_cls = threadlocal.TLEngine
ThreadLocalEngineStrategy()
class MockEngineStrategy(EngineStrategy):
"""Strategy for configuring an Engine-like object with mocked execution.
Produces a single mock Connectable object which dispatches
statement execution to a passed-in function.
"""
name = 'mock'
def create(self, name_or_url, executor, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
dialect_cls = u.get_dialect()
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = kwargs.pop(k)
# create dialect
dialect = dialect_cls(**dialect_args)
return MockEngineStrategy.MockConnection(dialect, executor)
class MockConnection(base.Connectable):
def __init__(self, dialect, execute):
self._dialect = dialect
self.execute = execute
engine = property(lambda s: s)
dialect = property(attrgetter('_dialect'))
name = property(lambda s: s._dialect.name)
schema_for_object = schema._schema_getter(None)
def contextual_connect(self, **kwargs):
return self
def execution_options(self, **kw):
return self
def compiler(self, statement, parameters, **kwargs):
return self._dialect.compiler(
statement, parameters, engine=self, **kwargs)
def create(self, entity, **kwargs):
kwargs['checkfirst'] = False
from sqlalchemy.engine import ddl
ddl.SchemaGenerator(
self.dialect, self, **kwargs).traverse_single(entity)
def drop(self, entity, **kwargs):
kwargs['checkfirst'] = False
from sqlalchemy.engine import ddl
ddl.SchemaDropper(
self.dialect, self, **kwargs).traverse_single(entity)
def _run_visitor(self, visitorcallable, element,
connection=None,
**kwargs):
kwargs['checkfirst'] = False
visitorcallable(self.dialect, self,
**kwargs).traverse_single(element)
def execute(self, object, *multiparams, **params):
raise NotImplementedError()
MockEngineStrategy()
| mit |
moniker-dns/debian-beaver | beaver/tests/test_transport_config.py | 4 | 2499 | # -*- coding: utf-8 -*-
import fakeredis
import logging
import mock
import tempfile
import unittest
import beaver
from beaver.config import BeaverConfig
from beaver.transports import create_transport
from beaver.transports.base_transport import BaseTransport
class DummyTransport(BaseTransport):
pass
with mock.patch('pika.adapters.BlockingConnection', autospec=True) as mock_pika:
class TransportConfigTests(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
def _get_config(self, **kwargs):
empty_conf = tempfile.NamedTemporaryFile(delete=True)
return BeaverConfig(mock.Mock(config=empty_conf.name, **kwargs))
@mock.patch('pika.adapters.BlockingConnection', mock_pika)
def test_builtin_rabbitmq(self):
beaver_config = self._get_config(transport='rabbitmq')
transport = create_transport(beaver_config, logger=self.logger)
self.assertIsInstance(transport, beaver.transports.rabbitmq_transport.RabbitmqTransport)
@mock.patch('redis.StrictRedis', fakeredis.FakeStrictRedis)
def test_builtin_redis(self):
beaver_config = self._get_config(transport='redis')
transport = create_transport(beaver_config, logger=self.logger)
self.assertIsInstance(transport, beaver.transports.redis_transport.RedisTransport)
def test_builtin_stdout(self):
beaver_config = self._get_config(transport='stdout')
transport = create_transport(beaver_config, logger=self.logger)
self.assertIsInstance(transport, beaver.transports.stdout_transport.StdoutTransport)
def test_builtin_udp(self):
beaver_config = self._get_config(transport='udp')
transport = create_transport(beaver_config, logger=self.logger)
self.assertIsInstance(transport, beaver.transports.udp_transport.UdpTransport)
def test_builtin_zmq(self):
beaver_config = self._get_config(transport='zmq')
transport = create_transport(beaver_config, logger=self.logger)
self.assertIsInstance(transport, beaver.transports.zmq_transport.ZmqTransport)
def test_custom_transport(self):
beaver_config = self._get_config(transport='beaver.tests.test_transport_config.DummyTransport')
transport = create_transport(beaver_config, logger=self.logger)
self.assertIsInstance(transport, DummyTransport)
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.