gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the classes and utility functions for distance and
cartesian coordinates.
"""
import warnings
import numpy as np
from astropy import units as u
from astropy.utils.exceptions import AstropyWarning
from .angles import Angle
__all__ = ['Distance']
__doctest_requires__ = {'*': ['scipy']}
class Distance(u.SpecificTypeQuantity):
"""
A one-dimensional distance.
This can be initialized by providing one of the following:
* Distance ``value`` (array or float) and a ``unit``
* |Quantity| object with dimensionality of length
* Redshift and (optionally) a `~astropy.cosmology.Cosmology`
* Distance modulus
* Parallax
Parameters
----------
value : scalar or `~astropy.units.Quantity` ['length']
The value of this distance.
unit : `~astropy.units.UnitBase` ['length']
The unit for this distance.
z : float
A redshift for this distance. It will be converted to a distance
by computing the luminosity distance for this redshift given the
cosmology specified by ``cosmology``. Must be given as a keyword
argument.
cosmology : `~astropy.cosmology.Cosmology` or None
A cosmology that will be used to compute the distance from ``z``.
If `None`, the current cosmology will be used (see
`astropy.cosmology` for details).
distmod : float or `~astropy.units.Quantity`
The distance modulus for this distance. Note that if ``unit`` is not
provided, a guess will be made at the unit between AU, pc, kpc, and Mpc.
parallax : `~astropy.units.Quantity` or `~astropy.coordinates.Angle`
The parallax in angular units.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
order : {'C', 'F', 'A'}, optional
See `~astropy.units.Quantity`.
subok : bool, optional
See `~astropy.units.Quantity`.
ndmin : int, optional
See `~astropy.units.Quantity`.
allow_negative : bool, optional
Whether to allow negative distances (which are possible in some
cosmologies). Default: `False`.
Raises
------
`~astropy.units.UnitsError`
If the ``unit`` is not a length unit.
ValueError
If value specified is less than 0 and ``allow_negative=False``.
If ``cosmology`` is provided when ``z`` is *not* given.
If either none or more than one of ``value``, ``z``, ``distmod``,
or ``parallax`` were given.
Examples
--------
>>> from astropy import units as u
>>> from astropy.cosmology import WMAP5
>>> Distance(10, u.Mpc)
<Distance 10. Mpc>
>>> Distance(40*u.pc, unit=u.kpc)
<Distance 0.04 kpc>
>>> Distance(z=0.23) # doctest: +FLOAT_CMP
<Distance 1184.01657566 Mpc>
>>> Distance(z=0.23, cosmology=WMAP5) # doctest: +FLOAT_CMP
<Distance 1147.78831918 Mpc>
>>> Distance(distmod=24.47*u.mag) # doctest: +FLOAT_CMP
<Distance 783.42964277 kpc>
>>> Distance(parallax=21.34*u.mas) # doctest: +FLOAT_CMP
<Distance 46.86035614 pc>
"""
_equivalent_unit = u.m
_include_easy_conversion_members = True
def __new__(cls, value=None, unit=None, z=None, cosmology=None,
distmod=None, parallax=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0, allow_negative=False):
n_not_none = sum(x is not None for x in [value, z, distmod, parallax])
if n_not_none == 0:
raise ValueError('none of `value`, `z`, `distmod`, or `parallax` '
'were given to Distance constructor')
elif n_not_none > 1:
raise ValueError('more than one of `value`, `z`, `distmod`, or '
'`parallax` were given to Distance constructor')
if value is None:
# If something else but `value` was provided then a new array will
# be created anyways and there is no need to copy that.
copy = False
if z is not None:
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
value = cosmology.luminosity_distance(z)
elif cosmology is not None:
raise ValueError('a `cosmology` was given but `z` was not '
'provided in Distance constructor')
elif distmod is not None:
value = cls._distmod_to_pc(distmod)
if unit is None:
# if the unit is not specified, guess based on the mean of
# the log of the distance
meanlogval = np.log10(value.value).mean()
if meanlogval > 6:
unit = u.Mpc
elif meanlogval > 3:
unit = u.kpc
elif meanlogval < -3: # ~200 AU
unit = u.AU
else:
unit = u.pc
elif parallax is not None:
if unit is None:
unit = u.pc
value = parallax.to_value(unit, equivalencies=u.parallax())
if np.any(parallax < 0):
if allow_negative:
warnings.warn(
"negative parallaxes are converted to NaN "
"distances even when `allow_negative=True`, "
"because negative parallaxes cannot be transformed "
"into distances. See the discussion in this paper: "
"https://arxiv.org/abs/1507.02105", AstropyWarning)
else:
raise ValueError(
"some parallaxes are negative, which are not "
"interpretable as distances. See the discussion in "
"this paper: https://arxiv.org/abs/1507.02105 . You "
"can convert negative parallaxes to NaN distances by "
"providing the `allow_negative=True` argument.")
# now we have arguments like for a Quantity, so let it do the work
distance = super().__new__(
cls, value, unit, dtype=dtype, copy=copy, order=order,
subok=subok, ndmin=ndmin)
# This invalid catch block can be removed when the minimum numpy
# version is >= 1.19 (NUMPY_LT_1_19)
with np.errstate(invalid='ignore'):
any_negative = np.any(distance.value < 0)
if not allow_negative and any_negative:
raise ValueError("distance must be >= 0. Use the argument "
"`allow_negative=True` to allow negative values.")
return distance
@property
def z(self):
"""Short for ``self.compute_z()``"""
return self.compute_z()
def compute_z(self, cosmology=None, **atzkw):
"""
The redshift for this distance assuming its physical distance is
a luminosity distance.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` or None
The cosmology to assume for this calculation, or `None` to use the
current cosmology (see `astropy.cosmology` for details).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
z : `~astropy.units.Quantity`
The redshift of this distance given the provided ``cosmology``.
Warnings
--------
This method can be slow for large arrays.
The redshift is determined using :func:`astropy.cosmology.z_at_value`,
which handles vector inputs (e.g. an array of distances) by
element-wise calling of :func:`scipy.optimize.minimize_scalar`.
For faster results consider using an interpolation table;
:func:`astropy.cosmology.z_at_value` provides details.
See Also
--------
:func:`astropy.cosmology.z_at_value`
Find the redshift corresponding to a
:meth:`astropy.cosmology.FLRW.luminosity_distance`.
"""
from astropy.cosmology import z_at_value
if cosmology is None:
from astropy.cosmology import default_cosmology
cosmology = default_cosmology.get()
atzkw.setdefault("ztol", 1.e-10)
return z_at_value(cosmology.luminosity_distance, self, **atzkw)
@property
def distmod(self):
"""The distance modulus as a `~astropy.units.Quantity`"""
val = 5. * np.log10(self.to_value(u.pc)) - 5.
return u.Quantity(val, u.mag, copy=False)
@classmethod
def _distmod_to_pc(cls, dm):
dm = u.Quantity(dm, u.mag)
return cls(10 ** ((dm.value + 5) / 5.), u.pc, copy=False)
@property
def parallax(self):
"""The parallax angle as an `~astropy.coordinates.Angle` object"""
return Angle(self.to(u.milliarcsecond, u.parallax()))
|
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# This module is used to run benchmark queries. It runs the set queries specified in the
# given workload(s) under <workload name>/queries. This script will first try to warm the
# buffer cache before running the query. There is also a parameter to to control how
# many iterations to run each query.
import csv
import logging
import math
import os
import sys
import subprocess
import threading
from collections import defaultdict, deque
from functools import partial
from math import ceil
from optparse import OptionParser
from os.path import isfile, isdir
from tests.common.query_executor import *
from tests.common.test_dimensions import *
from tests.common.test_result_verifier import *
from tests.common.workload_executor import *
from tests.util.calculation_util import calculate_median
from tests.util.test_file_parser import *
from time import sleep
from random import choice
# Globals
WORKLOAD_DIR = os.environ['IMPALA_WORKLOAD_DIR']
IMPALA_HOME = os.environ['IMPALA_HOME']
# Setup Logging
logging.basicConfig(level=logging.INFO, format='[%(name)s]: %(message)s')
LOG = logging.getLogger('workload_runner')
class Query(object):
"""Represents the notion of a query in the Impala test infrastructure"""
def __init__(self, *args, **kwargs):
self.query_str = kwargs.get('query_str')
self.name = kwargs.get('name')
self.scale_factor = kwargs.get('scale_factor')
self.test_vector = kwargs.get('test_vector')
self.results = kwargs.get('results')
self.workload = kwargs.get('workload')
self.db = kwargs.get('db', str())
self.table_format_str = kwargs.get('table_format_str', str())
# Only attempt to build the query if a query_str has been passed to the c'tor.
# If it's None, assume the user wants to set a qualified query_str
if self.query_str: self.__build_query()
def __build_query(self):
self.db = QueryTestSectionReader.get_db_name(self.test_vector, self.scale_factor)
self.query_str = QueryTestSectionReader.build_query(self.query_str.strip())
self.table_format_str = '%s/%s/%s' % (self.test_vector.file_format,
self.test_vector.compression_codec,
self.test_vector.compression_type)
class WorkloadRunner(object):
"""Runs query files and captures results from the specified workload(s)
The usage is:
1) Initialize WorkloadRunner with desired execution parameters.
2) Call workload_runner.run_workload() passing in a workload name(s) and scale
factor(s).
Internally, for each workload, this module looks up and parses that workload's
query files and reads the workload's test vector to determine what combination(s)
of file format / compression to run with. The queries are then executed
and the results are displayed as well as saved to a CSV file.
"""
def __init__(self, **kwargs):
self.verbose = kwargs.get('verbose', False)
if self.verbose:
LOG.setLevel(level=logging.DEBUG)
self.client_type = kwargs.get('client_type', 'beeswax')
self.skip_impala = kwargs.get('skip_impala', False)
self.compare_with_hive = kwargs.get('compare_with_hive', False)
self.hive_cmd = kwargs.get('hive_cmd', 'hive -e ')
self.target_impalads = deque(kwargs.get('impalad', 'localhost:21000').split(","))
self.iterations = kwargs.get('iterations', 2)
self.num_clients = kwargs.get('num_clients', 1)
self.exec_options = kwargs.get('exec_options', str())
self.remote = not self.target_impalads[0].startswith('localhost')
self.profiler = kwargs.get('profiler', False)
self.use_kerberos = kwargs.get('use_kerberos', False)
self.run_using_hive = kwargs.get('compare_with_hive', False) or self.skip_impala
self.verify_results = kwargs.get('verify_results', False)
self.plugin_runner = kwargs.get('plugin_runner', None)
self.execution_scope = kwargs.get('execution_scope')
self.shuffle = kwargs.get('shuffle_queries')
# TODO: Need to find a way to get this working without runquery
#self.gprof_cmd = 'google-pprof --text ' + self.runquery_path + ' %s | head -n 60'
self.__summary = str()
self.__result_map = defaultdict(list)
def get_next_impalad(self):
"""Maintains a rotating list of impalads"""
self.target_impalads.rotate(-1)
return self.target_impalads[-1]
# Parse for the tables used in this query
@staticmethod
def __parse_tables(query):
"""
Parse the tables used in this query.
"""
table_predecessor = ['from', 'join']
tokens = query.split(' ')
tables = []
next_is_table = 0
for t in tokens:
t = t.lower()
if next_is_table == 1:
tables.append(t)
next_is_table = 0
if t in table_predecessor:
next_is_table = 1
return tables
def __get_executor_name(self):
executor_name = self.client_type
# We want to indicate this is IMPALA beeswax.
# We currently don't support hive beeswax.
return 'impala_beeswax' if executor_name == 'beeswax' else executor_name
def create_executor(self, executor_name, query, iterations):
# Add additional query exec options here
query_options = {
'hive': lambda: (execute_using_hive,
HiveQueryExecOptions(iterations,
hive_cmd=self.hive_cmd,
)),
'impala_beeswax': lambda: (execute_using_impala_beeswax,
ImpalaBeeswaxExecOptions(iterations,
plugin_runner=self.plugin_runner,
exec_options=self.exec_options,
use_kerberos=self.use_kerberos,
impalad=self.get_next_impalad(),
query=query
)),
'jdbc': lambda: (execute_using_jdbc,
JdbcQueryExecOptions(iterations,
impalad=self.get_next_impalad())),
} [executor_name]()
return query_options
def run_query(self, executor_name, query, exit_on_error):
"""
Run a query command and return the result.
Creates a query executor object and runs the query. The results are processed
and coalesced into a single QueryExecResult object before being returned.
"""
query_exec_func, exec_options = self.create_executor(executor_name, query,
self.iterations)
query_executor = QueryExecutor(query_exec_func, executor_name, exec_options, query,
self.num_clients, exit_on_error)
query_executor.run()
results = query_executor.get_results()
# If all the threads failed, do not call __get_median_exec_result
# and return an empty execution result. If exit_on_error is True and a query failed,
# return a blank result. This is ok since we don't persist the error message in the
# backend db; Moreover, the exact error is always logged to the console.
if not results or\
exit_on_error and any(map(lambda x: not x.success, results)):
return QueryExecResult()
return self.__get_median_exec_result(results)
def __get_median_exec_result(self, results):
"""
Returns an ExecutionResult object whose avg/stddev is the median of all results.
This is used when running with multiple clients to select a good representative value
for the overall execution time.
"""
# Choose a result to update with the mean avg/stddev values. It doesn't matter which
# one, so just pick the first one.
final_result = results[0]
# Pick a runtime profile from the middle of the result set, for queries that have run
# for multiple iterations.
final_result.runtime_profile = results[int(ceil(len(results) / 2))].runtime_profile
if len(results) == 1:
return final_result
final_result.avg_time = calculate_median([result.avg_time for result in results])
if self.iterations > 1:
final_result.std_dev = calculate_median([result.std_dev for result in results])
return final_result
@staticmethod
def __enumerate_query_files(base_directory):
"""
Recursively scan the given directory for all test query files.
"""
query_files = list()
for item in os.listdir(base_directory):
full_path = os.path.join(base_directory, item)
if isfile(full_path) and item.endswith('.test'):
query_files.append(full_path)
elif isdir(full_path):
query_files += WorkloadRunner.__enumerate_query_files(full_path)
return query_files
@staticmethod
def __extract_queries_from_test_files(workload, query_names):
"""
Enumerate all the query files for a workload and extract the query strings.
If the user has specified a subset of queries to execute, only extract those query
strings.
"""
query_regex = None
if query_names:
# Build a single regex from all query name regex strings.
query_regex = r'(?:' + '$)|('.join([name for name in query_names.split(',')]) + '$)'
workload_base_dir = os.path.join(WORKLOAD_DIR, workload)
if not isdir(workload_base_dir):
raise ValueError,\
"Workload '%s' not found at path '%s'" % (workload, workload_base_dir)
query_dir = os.path.join(workload_base_dir, 'queries')
if not isdir(query_dir):
raise ValueError, "Workload query directory not found at path '%s'" % (query_dir)
query_map = defaultdict(list)
for query_file_name in WorkloadRunner.__enumerate_query_files(query_dir):
LOG.debug('Parsing Query Test File: ' + query_file_name)
sections = parse_query_test_file(query_file_name)
test_name = re.sub('/', '.', query_file_name.split('.')[0])[1:]
# If query_names is not none, only extract user specified queries to
# the query map.
if query_names:
sections = [s for s in sections if re.match(query_regex, s['QUERY_NAME'], re.I)]
for section in sections:
query_map[test_name].append((section['QUERY_NAME'],
(section['QUERY'], section['RESULTS'])))
return query_map
def execute_queries(self, queries, stop_on_query_error):
"""
Execute the queries for combinations of file format, compression, etc.
The values needed to build the query are stored in the first 4 columns of each row.
"""
executor_name = self.__get_executor_name()
# each list of queries has the same test vector. pick the first one.
print "\nRunning Vector: File Format: %s, Compression: %s/%s" % \
(queries[0].test_vector.file_format, queries[0].test_vector.compression_codec,
queries[0].test_vector.compression_type)
for query in queries:
self.__summary += "\nQuery (%s): %s\n" % (query.table_format_str, query.name)
exec_result = QueryExecResult()
if not self.skip_impala:
self.__summary += " Impala Results: "
LOG.info('Query Name: \n%s\n' % query.name)
LOG.debug('Sql: \n%s\n' % query.query_str)
exec_result = self.run_query(executor_name, query, stop_on_query_error)
if exec_result:
self.__summary += "%s\n" % exec_result
if not exec_result.success and stop_on_query_error:
break
hive_exec_result = QueryExecResult()
if self.compare_with_hive or self.skip_impala:
self.__summary += " Hive Results: "
hive_exec_result = self.run_query('hive', query, False)
if hive_exec_result:
self.__summary += "%s\n" % hive_exec_result
LOG.info("---------------------------------------------------------------------")
self.__result_map[query].append((exec_result, hive_exec_result))
def execute_workload(self, queries, exit_on_error):
"""Execute a set of queries in a workload.
A workload is a unique combination of the dataset and the test vector.
"""
executor_name = self.__get_executor_name()
query_pipelines =[]
# Since parallelism and iterations are at the level of a workload, each
# QueryExecutor runs a single thread once.
num_query_iter = num_query_clients = 1
for i in xrange(self.num_clients):
query_pipeline = dict()
# Create a mapping from the query name to its executor.
for query in queries:
# The number of iterations for an individual query should be 1
query_exec_func, exec_options = self.create_executor(executor_name, query,
num_query_iter)
query_executor = QueryExecutor(query_exec_func, executor_name, exec_options,
query, num_query_clients, exit_on_error)
query_pipeline[query] = query_executor
query_pipelines.append(query_pipeline)
# Create a workload executor and run the workload.
workload_executor = WorkloadExecutor(query_pipelines=query_pipelines,
shuffle=self.shuffle, iterations=self.iterations)
workload_executor.run()
query_results = workload_executor.get_results()
self.__summary = "\nWorkload [%s]:\n" % (queries[0].db.upper())
# Save the results
for query, results in query_results.iteritems():
if not results:
exec_result = QueryExecResult()
else:
exec_result = self.__get_median_exec_result(results)
self.__result_map[query].append((exec_result, QueryExecResult()))
self.__summary += " Impala Results: %s\n" % exec_result
def construct_queries(self, query_map, workload, scale_factor, query_names,
test_vector):
"""Constructs a list of query objects based on the test vector and workload"""
queries = []
for test_name in query_map.keys():
for query_name, query_and_expected_result in query_map[test_name]:
query_str, results = query_and_expected_result
if not query_name:
query_name = query_str
query = Query(name=query_name,
query_str=query_str,
results=results,
workload=workload,
scale_factor=scale_factor,
test_vector=test_vector)
queries.append(query)
return queries
def get_summary_str(self):
return self.__summary
def get_results(self):
return self.__result_map
def run_workload(self, workload, scale_factor=str(), table_formats=None,
query_names=None, exploration_strategy='core',
stop_on_query_error=True):
"""
Run queries associated with each workload specified on the commandline.
For each workload specified, look up the associated query files and extract them.
Execute the queries in a workload as an execution unit if the scope us 'workload'.
If the scope of execution is a query, run each query individually. Finally,
aggregate the results.
"""
LOG.info('Running workload %s at Scale Factor %s' % (workload,
scale_factor if scale_factor else "None"))
query_map = WorkloadRunner.__extract_queries_from_test_files(workload, query_names)
if not query_map:
LOG.error('No queries selected to run.')
return
test_vectors = None
if table_formats:
table_formats = table_formats.split(',')
dataset = get_dataset_from_workload(workload)
test_vectors =\
[TableFormatInfo.create_from_string(dataset, tf) for tf in table_formats]
else:
test_vectors = [vector.value for vector in\
load_table_info_dimension(workload, exploration_strategy)]
args = [query_map, workload, scale_factor, query_names]
construct_queries_partial = partial(self.construct_queries, *args)
query_lists = map(construct_queries_partial, test_vectors)
exec_func = self.execute_queries
# Scope is case insensitive.
if self.execution_scope.lower() == 'workload':
exec_func = self.execute_workload
for query_list in query_lists:
exec_func(query_list, stop_on_query_error)
|
|
import random
class Sorting:
#=======================================================================
# Author: Isai Damier
# Title: Bubblesort
# Project: geekviewpoint
# Package: algorithms
#
# Statement:
# Given a disordered list of integers (or any other items),
# rearrange the integers in natural order.
#
# Sample Input: [8,5,3,1,9,6,0,7,4,2,5]
# Sample Output: [0,1,2,3,4,5,5,6,7,8,9]
#
# Time Complexity of Solution:
# Best O(n^2); Average O(n^2); Worst O(n^2).
#
# Approach:
# Bubblesort is an elementary sorting algorithm. The idea is to
# imagine bubbling the smallest elements of a (vertical) array to the
# top; then bubble the next smallest; then so on until the entire
# array is sorted. Bubble sort is worse than both insertion sort and
# selection sort. It moves elements as many times as insertion sort
# (bad) and it takes as long as selection sort (bad). On the positive
# side, bubble sort is easy to understand. Also there are highly
# improved variants of bubble sort.
#
# 0] For each element at index i from 0 to n, loop:
# 1] For each element at index k, from n to i exclusive, loop:
# 2] If the element at k is less than that at k-1, swap them.
#=======================================================================
def bubble_sort(self, nums):
for i in xrange(len(nums)):
for j in xrange(len(nums)-1,i,-1):
if nums[j] < nums[j-1]:
nums[j], nums[j-1] = nums[j-1], nums[j]
return nums
#=======================================================================
# Author: Isai Damier
# Title: Insertionsort
# Project: geekviewpoint
# Package: algorithms
#
# Statement:
# Given a disordered list of integers (or any other items),
# rearrange the integers in natural order.
#
# Sample Input: [8,5,3,1,9,6,0,7,4,2,5]
# Sample Output: [0,1,2,3,4,5,5,6,7,8,9]
#
# Time Complexity of Solution:
# Best O(n); Average O(n^2); Worst O(n^2).
#
# Approach:
# Insertion sort is good for collections that are very small
# or nearly sorted. Otherwise it's not a good sorting algorithm:
# it moves data around too much. Each time an insertion is made,
# all elements in a greater position are shifted.
#=======================================================================
def insertation_sort(self, nums):
for i in xrange(1,len(nums)):
k = i
tmp = nums[k]
while k > 0 and tmp < nums[k-1]:
nums[k] = nums[k-1]
k -= 1
nums[k] = tmp
return nums
#=======================================================================
# Author: Isai Damier
# Title: Selection Sort
# Project: geekviewpoint
# Package: algorithm.sorting
#
# Statement:
# Given a disordered list of integers (or any other items),
# rearrange the integers in natural order.
#
# Sample Input: [18,5,3,1,19,6,0,7,4,2,5]
# Sample Output: [0,1,2,3,4,5,5,6,7,18,19]
#
# Time Complexity of Solution:
# Best O(n^2); Average O(n^2); Worst O(n^2).
#
# Approach:
# Selection sort is a step up from insertion sort from a memory
# viewpoint. It only swaps elements that need to be swapped. In terms
# of time complexity, however, insertion sort is better.
#=======================================================================
def selection_sort(self,nums):
for i in xrange(len(nums)):
min_id = i
for j in xrange(i+1, len(nums)):
if nums[j] < nums[min_id]:
min_id = j
nums[i], nums[min_id] = nums[min_id],nums[i]
return nums
#=======================================================================
# Author: Isai Damier
# Title: Countingsort
# Project: GeekViewpoint
# Package: algorithms
#
# Statement:
# Given a disordered list of repeated integers, rearrange the
# integers in natural order.
#
# Sample Input: [4,3,2,1,4,3,2,4,3,4]
#
# Sample Output: [1,2,2,3,3,3,4,4,4,4]
#
# Time Complexity of Solution:
# Best Case O(n+k); Average Case O(n+k); Worst Case O(n+k),
# where n is the size of the input array and k means the
# values range from 0 to k.
#
# Approach:
# Counting sort, like radix sort and bucket sort,
# is an integer based algorithm (i.e. the values of the input
# array are assumed to be integers). Hence counting sort is
# among the fastest sorting algorithms around, in theory. The
# particular distinction for counting sort is that it creates
# a bucket for each value and keep a counter in each bucket.
# Then each time a value is encountered in the input collection,
# the appropriate counter is incremented. Because counting sort
# creates a bucket for each value, an imposing restriction is
# that the maximum value in the input array be known beforehand.
#
# There is a great number of counting sort code on the Internet,
# including on university websites, that erroneously claim to be
# bucket sort. Bucket sort uses a hash function to distribute
# values; counting sort, on the other hand, creates a counter for
# each value -- hence the name.
#
# Implementation notes:
#
# 1] Since the values range from 0 to k, create k+1 buckets.
# 2] To fill the buckets, iterate through the input list and
# each time a value appears, increment the counter in its
# bucket.
# 3] Now fill the input list with the compressed data in the
# buckets. Each bucket's key represents a value in the
# array. So for each bucket, from smallest key to largest,
# add the index of the bucket to the input array and
# decrease the counter in said bucket by one; until the
# counter is zero.
#=======================================================================
def counting_sort(self,nums):
if not nums:
return nums
Min = min(nums)
Max = max(nums)
count = [0]*(Max-Min+1)
for n in nums:
count[n-Min] += 1
cnt = 0
for i in xrange(len(count)):
for j in xrange(count[i]):
nums[cnt] = i+Min
cnt += 1
return nums
#=======================================================================
# Author: Isai Damier
# Title: Mergesort
# Project: geekviewpoint
# Package: algorithm.sorting
#
# Statement:
# Given a disordered list of integers (or any other items),
# rearrange the integers in natural order.
#
# Sample Input: [8,5,3,1,9,6,0,7,4,2,5]
#
# Sample Output: [0,1,2,3,4,5,5,6,7,8,9]
#
# Time Complexity of Solution:
# Best = Average = Worst = O(nlog(n)).
#
# Approach:
# Merge sort is a divide and conquer algorithm. In the divide and
# conquer paradigm, a problem is broken into pieces where each piece
# still retains all the properties of the larger problem -- except
# its size. To solve the original problem, each piece is solved
# individually; then the pieces are merged back together.
#
# For illustration, imagine needing to sort an array of 200 elements
# using selection sort. Since selection sort takes O(n^2), it would
# take about 40,000 time units to sort the array. Now imagine
# splitting the array into ten equal pieces and sorting each piece
# individually still using selection sort. Now it would take 400
# time units to sort each piece; for a grand total of 10400 = 4000.
# Once each piece is sorted, merging them back together would take
# about 200 time units; for a grand total of 200+4000 = 4,200.
# Clearly 4,200 is an impressive improvement over 40,000. Now
# imagine greater. Imagine splitting the original array into
# groups of two and then sorting them. In the end, it would take about
# 1,000 time units to sort the array. That's how merge sort works.
#
# NOTE to the Python experts:
# While it might seem more "Pythonic" to take such approach as
#
# mid = len(aList) / 2
# left = mergesort(aList[:mid])
# right = mergesort(aList[mid:])
#
# That approach take too much memory for creating sublists.
#=======================================================================
def merge_sort(self,nums):
def merge(l,r):
if l == r:
return
m = (l+r)/2
merge(l,m)
merge(m+1,r)
tmp = nums[m+1:r+1]
i1 = m; i2 = len(tmp)-1
for i in xrange(r,l-1,-1):
if i2 < 0 or i2 < 0:
break
if tmp[i2] >= nums[i1]:
nums[i] = tmp[i2]
i2 -= 1
else:
nums[i] = nums[i1]
i1 -= 1
for i in xrange(i2,-1,-1):
nums[i] = tmp[i2]
i2 -= 1
if len(nums) == 0:
return nums
merge(0,len(nums)-1)
return nums
#=======================================================================
# Author: Isai Damier
# Title: QuickSort
# Project: geekviewpoint
# Package: algorithm.sorting
#
# Statement: Given a disordered list of integers (or any other items),
# rearrange the integers in natural order.
#
# Sample Input: [8,5,3,1,9,6,0,7,4,2]
#
# Sample Output: [0,1,2,3,4,5,6,7,8,9]
#
# Time Complexity of Solution:
# Best = Average = O(nlog(n)); Worst = O(n^2).
#
# Approach:
# Quicksort is admirably known as the algorithm that sorts an array
# while preparing to sort it. For contrast, recall that merge sort
# first partitions an array into smaller pieces, then sorts each piece,
# then merge the pieces back. Quicksort actually sorts the array
# during the partition phase.
#
# Quicksort works by selecting an element called a pivot and splitting
# the array around that pivot such that all the elements in, say, the
# left sub-array are less than pivot and all the elements in the right
# sub-array are greater than pivot. The splitting continues until the
# array can no longer be broken into pieces. That's it. Quicksort is
# done.
#
# All this fussing about quicksort sorting while preparing to sort
# may give the impression that it is better than mergesort, but its
# not. In practice their time complexity is about the same -- with
# one funny exception. Because quicksort picks its pivot randomly,
# there is a practically impossible possibility that the algorithm
# may take O(n^2) to compute.
#
# The aforementioned notwithstanding, quicksort is better than
# mergesort if you consider memory usage. Quicksort is an in-place
# algorithm, requiring no additional storage to work.
#=======================================================================
def quick_sort(self,nums):
def partition(l,r):
pivot = l + random.randrange(r-l+1)
nums[pivot], nums[r] = nums[r], nums[pivot]
for i in xrange(l,r):
print l, i, r
if nums[i] < nums[r]:
nums[i], nums[l] = nums[l],nums[i]
l += 1
nums[l], nums[r] = nums[r], nums[l]
return l
def dfs(l,r):
if l < r:
pivot = partition(l,r)
dfs(l,pivot-1)
dfs(pivot+1,r)
dfs(0,len(nums)-1)
return nums
if __name__ == '__main__':
nums = [8,5,3,1,9,6,0,7,4,2,5]
print Sorting().bubble_sort(nums[:])
print Sorting().insertation_sort(nums[:])
print Sorting().selection_sort(nums[:])
print Sorting().counting_sort(nums[:])
print Sorting().merge_sort(nums[:])
print Sorting().quick_sort(nums[:])
|
|
from __future__ import absolute_import
from datetime import time, date, timedelta
from django.test import TestCase
from django.utils.timezone import now, localtime
from farnsworth import pre_fill
from workshift.models import *
from workshift.forms import *
from workshift.cron import CollectBlownCronJob, UpdateWeeklyStandings
from workshift import utils, signals
class TestUtils(TestCase):
"""
Tests most of the various functions within workshift.utils.
"""
def setUp(self):
self.u = User.objects.create_user(
username="u", first_name="N", last_name="M",
)
today = localtime(now()).date()
self.semester = Semester.objects.create(
year=today.year,
season=Semester.SUMMER,
start_date=today,
end_date=today + timedelta(weeks=18),
)
self.profile = WorkshiftProfile.objects.get(user=self.u)
self.p1 = WorkshiftPool.objects.get(
is_primary=True,
semester=self.semester,
)
self.p1.sign_out_cutoff = 24
self.p1.verify_cutoff = 2
self.p1.save()
self.p2 = WorkshiftPool.objects.create(
title="Alternate Workshift",
semester=self.semester,
)
def test_cron_blown(self):
CollectBlownCronJob().do()
def test_cron_standings(self):
UpdateWeeklyStandings().do()
def test_get_year_season(self):
year, season = utils.get_year_season()
self.assertLess(abs(year - localtime(now()).date().year), 2)
self.assertIn(
season,
[Semester.SPRING, Semester.SUMMER, Semester.FALL],
)
def test_starting_month(self):
# Starting in Summer, Fall, and Spring
self.assertEqual(
(2015, Semester.SPRING),
utils.get_year_season(day=date(2014, 12, 20)),
)
self.assertEqual(
(2015, Semester.SPRING),
utils.get_year_season(day=date(2015, 3, 20)),
)
self.assertEqual(
(2014, Semester.SUMMER),
utils.get_year_season(day=date(2014, 4, 1)),
)
self.assertEqual(
(2014, Semester.SUMMER),
utils.get_year_season(day=date(2014, 7, 20)),
)
self.assertEqual(
(2014, Semester.FALL),
utils.get_year_season(day=date(2014, 8, 1)),
)
self.assertEqual(
(2014, Semester.FALL),
utils.get_year_season(day=date(2014, 10, 20)),
)
def test_start_end(self):
self.assertEqual(
(date(2014, 1, 20), date(2014, 5, 17)),
utils.get_semester_start_end(2014, Semester.SPRING),
)
self.assertEqual(
(date(2014, 5, 25), date(2014, 8, 16)),
utils.get_semester_start_end(2014, Semester.SUMMER),
)
self.assertEqual(
(date(2014, 8, 24), date(2014, 12, 20)),
utils.get_semester_start_end(2014, Semester.FALL),
)
def test_make_pool_hours_all(self):
PoolHours.objects.all().delete()
utils.make_workshift_pool_hours()
self.assertEqual(2, PoolHours.objects.count())
self.assertEqual(2, self.profile.pool_hours.count())
def test_make_pool_hours_profile(self):
PoolHours.objects.all().delete()
utils.make_workshift_pool_hours(
semester=self.semester,
profiles=[],
)
self.assertEqual(0, PoolHours.objects.count())
self.assertEqual(0, self.profile.pool_hours.count())
utils.make_workshift_pool_hours(
semester=self.semester,
profiles=[self.profile],
)
self.assertEqual(2, PoolHours.objects.count())
self.assertEqual(2, self.profile.pool_hours.count())
def test_make_pool_hours_pools(self):
PoolHours.objects.all().delete()
utils.make_workshift_pool_hours(
semester=self.semester,
pools=[self.p1],
)
self.assertEqual(1, PoolHours.objects.count())
self.assertEqual(1, self.profile.pool_hours.count())
utils.make_workshift_pool_hours(
semester=self.semester,
pools=[self.p2],
)
self.assertEqual(2, PoolHours.objects.count())
self.assertEqual(2, self.profile.pool_hours.count())
def test_make_pool_hours_primary(self):
PoolHours.objects.all().delete()
utils.make_workshift_pool_hours(
semester=self.semester,
primary_hours=6,
)
self.assertEqual(
6,
PoolHours.objects.get(pool=self.p1).hours,
)
self.assertEqual(
self.p2.hours,
PoolHours.objects.get(pool=self.p2).hours,
)
def test_can_manage(self):
pass
def test_is_available(self):
pass
def test_make_instances(self):
wtype = WorkshiftType.objects.create(
title="Test Make Instances",
)
# Disconnect the handler and run make_instances ourselves
models.signals.post_save.disconnect(
signals.create_workshift_instances, sender=RegularWorkshift
)
shift = RegularWorkshift.objects.create(
workshift_type=wtype,
pool=self.p1,
day=4,
hours=7,
)
shift.current_assignees = [self.profile]
today = localtime(now()).date()
WorkshiftInstance.objects.create(
weekly_workshift=shift,
date=today - timedelta(today.weekday()),
)
instances = utils.make_instances(
semester=self.semester,
shifts=[shift],
)
models.signals.post_save.connect(
signals.create_workshift_instances, sender=RegularWorkshift
)
for instance in instances:
self.assertEqual(wtype.title, instance.title)
self.assertEqual(shift, instance.weekly_workshift)
self.assertEqual(shift.hours, instance.hours)
self.assertEqual(shift.hours, instance.intended_hours)
self.assertEqual(1, instance.logs.count())
self.assertEqual(
set([shift.day]),
set(i.date.weekday() for i in instances),
)
def test_collect_blown(self):
utils.make_workshift_pool_hours()
self.assertEqual(
([], [], []),
utils.collect_blown(),
)
self.assertEqual(
([], [], []),
utils.collect_blown(semester=self.semester),
)
moment = localtime(now().replace(
hour=20, minute=0, second=0, microsecond=0,
))
past = moment - timedelta(days=1)
WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Closed",
pool=self.p1,
end_time=time(12),
),
closed=True,
date=past.date(),
semester=self.semester,
)
to_close = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="To be closed",
pool=self.p1,
end_time=time(12),
),
date=past.date(),
semester=self.semester,
)
WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Not Blown",
pool=self.p1,
end_time=time(12),
),
date=moment.date(),
semester=self.semester,
)
blown = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Blown",
pool=self.p1,
end_time=time(12),
),
date=past.date(),
workshifter=self.profile,
semester=self.semester,
)
WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Edge Case 1: Not Closed",
pool=self.p1,
end_time=moment.time(),
),
date=moment.date(),
semester=self.semester,
)
edge_datetime = moment - timedelta(
hours=self.p1.verify_cutoff, minutes=1,
)
edge_case_2 = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Edge Case 2: Closed",
pool=self.p1,
end_time=edge_datetime.time(),
),
date=edge_datetime.date(),
)
signed_out_1 = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Workshifter signed out early enough",
pool=self.p1,
end_time=time(12),
),
date=past.date(),
semester=self.semester,
)
signed_out_2 = WorkshiftInstance.objects.create(
info=InstanceInfo.objects.create(
title="Workshifter signed out too late",
pool=self.p1,
end_time=time(12),
),
liable=self.profile,
date=past.date(),
semester=self.semester,
)
self.assertEqual(
([to_close, edge_case_2, signed_out_1], [], [blown, signed_out_2]),
utils.collect_blown(moment=moment),
)
class TestAssignment(TestCase):
"""
Test the functionality of workshift.utils.auto_assign_shifts. This should
include respecting member's shift preferences and schedules.
"""
def setUp(self):
self.u = User.objects.create_user(username="u0")
today = localtime(now()).date()
self.semester = Semester.objects.create(
year=today.year,
start_date=today,
end_date=today + timedelta(days=6),
)
self.profile = WorkshiftProfile.objects.get(
user=self.u,
semester=self.semester,
)
self.p1 = WorkshiftPool.objects.get(
is_primary=True,
semester=self.semester,
)
self.p2 = WorkshiftPool.objects.create(
title="Alternate Workshift",
semester=self.semester,
)
self.wtype1 = WorkshiftType.objects.create(
title="Like Type",
)
self.wtype2 = WorkshiftType.objects.create(
title="Indifferent Type",
)
self.wtype3 = WorkshiftType.objects.create(
title="Dislike Type",
)
preference1 = WorkshiftRating.objects.create(
rating=WorkshiftRating.LIKE,
workshift_type=self.wtype1,
)
preference2 = WorkshiftRating.objects.create(
rating=WorkshiftRating.INDIFFERENT,
workshift_type=self.wtype2,
)
preference3 = WorkshiftRating.objects.create(
rating=WorkshiftRating.DISLIKE,
workshift_type=self.wtype3,
)
self.profile.ratings = [preference1, preference2, preference3]
self.profile.save()
utils.make_workshift_pool_hours(semester=self.semester)
def test_auto_assign_one(self):
"""
Assign one shift to a member.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
self.assertIn(self.profile, shift1.current_assignees.all())
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == self.profile
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
pool_hours.hours,
)
def test_pre_assigned(self):
"""
Test that assignment behaves correctly when members are already
assigned to other workshifts.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
shift2 = RegularWorkshift.objects.create(
workshift_type=self.wtype3,
pool=self.p1,
hours=1,
)
shift2.current_assignees = [self.profile]
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([self.profile], unfinished)
self.assertNotIn(self.profile, shift1.current_assignees.all())
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
1,
)
def test_auto_assign_one_overflow(self):
"""
Don't assign one shift to a member because it pushes them over their
weekly requirement.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=6,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([self.profile], unfinished)
self.assertNotIn(self.profile, shift1.current_assignees.all())
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter is None
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
0,
)
def test_auto_assign_two(self):
"""
Assign two shifts to a member.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=2,
)
shift2 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=3,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
self.assertIn(self.profile, shift1.current_assignees.all())
self.assertIn(self.profile, shift2.current_assignees.all())
for shift in [shift1, shift2]:
instances = WorkshiftInstance.objects.filter(
weekly_workshift=shift,
)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == self.profile
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
pool_hours.hours,
)
def test_auto_assign_two_preferred(self):
"""
Assign one of two shifts to a member.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
shift2 = RegularWorkshift.objects.create(
workshift_type=self.wtype2,
pool=self.p1,
hours=5,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
self.assertIn(self.profile, shift1.current_assignees.all())
self.assertNotIn(self.profile, shift2.current_assignees.all())
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == self.profile
for instance in instances
))
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift2)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter is None
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
pool_hours.hours,
)
def test_auto_assign_two_overflow(self):
"""
Assign a preferred shift to a member, but don't assign the other
because it pushes them over their weekly requirement.
"""
shift1 = RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=3,
)
shift2 = RegularWorkshift.objects.create(
workshift_type=self.wtype2,
pool=self.p1,
hours=3,
)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([self.profile], unfinished)
self.assertIn(self.profile, shift1.current_assignees.all())
self.assertNotIn(self.profile, shift2.current_assignees.all())
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter == self.profile
for instance in instances
))
instances = WorkshiftInstance.objects.filter(weekly_workshift=shift2)
self.assertGreater(instances.count(), 0)
self.assertTrue(all(
instance.workshifter is None
for instance in instances
))
pool_hours = self.profile.pool_hours.get(pool=self.p1)
self.assertEqual(
pool_hours.assigned_hours,
3,
)
def _test_auto_assign_fifty(self):
"""
Assign fifty members to fifty shifts, with each shift providing 5 hours
of workshift. Ensures that the assignments don't mysteriously break or
run for an extremely long time for medium-sized houses.
"""
shifts = []
for i in range(50):
shifts.append(
RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
)
for i in range(1, 50):
User.objects.create_user(username="u{0}".format(i))
utils.make_workshift_pool_hours(semester=self.semester)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
for shift in shifts:
self.assertEqual(1, shift.current_assignees.count())
def _test_auto_assign_one_hundred_and_fifty(self):
"""
Assign 150 members to 150 shifts, with each shift providing 5 hours
of workshift. Ensures that the assignments don't mysteriously break or
run for an extremely long time for large houses.
"""
shifts = []
for i in range(150):
shifts.append(
RegularWorkshift.objects.create(
workshift_type=self.wtype1,
pool=self.p1,
hours=5,
)
)
for i in range(1, 150):
User.objects.create_user(username="u{0}".format(i))
utils.make_workshift_pool_hours(semester=self.semester)
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
for shift in shifts:
self.assertEqual(1, shift.current_assignees.count())
def test_pre_fill_and_assign(self):
"""
Tests that shifts can be correctly assigned after
farnsworth/pre_fill.py is run. This is a good test of how the
assignment code functions "in the wild," rather than with many
duplicates of the same shift.
"""
users = []
for i in range(1, 50):
users.append(User.objects.create_user(username="u{0}".format(i)))
pre_fill.main(["--managers", "--workshift"])
utils.make_workshift_pool_hours(semester=self.semester)
# Assign manager shifts beforehand
for user, manager in zip(users, Manager.objects.all()):
manager.incumbent = UserProfile.objects.get(user=user)
manager.save()
unfinished = utils.auto_assign_shifts(self.semester)
self.assertEqual([], unfinished)
def _test_pre_fill_and_assign_humor(self):
"""
Tests that humor shifts can be correctly assigned after
farnsworth/pre_fill.py is run.
"""
for i in range(1, 50):
User.objects.create_user(username="u{0}".format(i))
pre_fill.main(["--managers", "--workshift"])
utils.make_workshift_pool_hours(semester=self.semester)
# Assign manager shifts beforehand
manager_shifts = RegularWorkshift.objects.filter(
pool=self.p1, workshift_type__auto_assign=False,
)
profiles = WorkshiftProfile.objects.all()
for profile, shift in zip(profiles, manager_shifts):
shift.current_assignees.add(profile)
shift.save()
unfinished = utils.auto_assign_shifts(
self.semester, pool=WorkshiftPool.objects.get(title="Humor Shift")
)
self.assertEqual([], unfinished)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet model for classifying images from CIFAR-10 dataset.
Support single-host training with one or multiple devices.
ResNet as proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
CIFAR-10 as in:
http://www.cs.toronto.edu/~kriz/cifar.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import cifar10
import cifar10_model
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('data_dir', '',
'The directory where the CIFAR-10 input data is stored.')
tf.flags.DEFINE_string('model_dir', '',
'The directory where the model will be stored.')
tf.flags.DEFINE_boolean('is_cpu_ps', False,
'If using CPU as the parameter server.')
tf.flags.DEFINE_integer('num_gpus', 1,
'The number of gpus used. Uses only CPU if set to 0.')
tf.flags.DEFINE_integer('num_layers', 44, 'The number of layers of the model.')
tf.flags.DEFINE_integer('train_steps', 10000,
'The number of steps to use for training.')
tf.flags.DEFINE_integer('train_batch_size', 128, 'Batch size for training.')
tf.flags.DEFINE_integer('eval_batch_size', 100, 'Batch size for validation.')
tf.flags.DEFINE_float('momentum', 0.9, 'Momentum for MomentumOptimizer.')
tf.flags.DEFINE_float('weight_decay', 1e-4, 'Weight decay for convolutions.')
tf.flags.DEFINE_boolean('use_distortion_for_training', True,
'If doing image distortion for training.')
# Perf flags
tf.flags.DEFINE_integer('num_intra_threads', 1,
"""Number of threads to use for intra-op parallelism.
If set to 0, the system will pick an appropriate number.
The default is 1 since in this example CPU only handles
the input pipeline and gradient aggregation (when
--is_cpu_ps). Ops that could potentially benefit
from intra-op parallelism are scheduled to run on GPUs.
""")
tf.flags.DEFINE_integer('num_inter_threads', 0,
"""Number of threads to use for inter-op
parallelism. If set to 0, the system will pick
an appropriate number.""")
tf.flags.DEFINE_boolean('force_gpu_compatible', False,
"""whether to enable force_gpu_compatible in
GPU_Options. Check
tensorflow/core/protobuf/config.proto#L69
for details.""")
# Debugging flags
tf.flags.DEFINE_boolean('log_device_placement', False,
'Whether to log device placement.')
# TODO(jamesqin): Replace with fix in b/62239022
class ParamServerDeviceSetter(object):
"""Helper class to assign variables on the least loaded ps-device."""
def __init__(self, worker_device, ps_devices):
"""Initializer for ParamServerDeviceSetter.
Args:
worker_device: the device to use for computer ops.
ps_devices: a list of devices to use for Variable ops. Each variable is
assigned to the least loaded device.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2', 'VarHandleOp']:
return self.worker_device
device_index, _ = min(enumerate(self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
self.ps_sizes[device_index] += var_size
return device_name
def _create_device_setter(is_cpu_ps, worker):
"""Create device setter object."""
if is_cpu_ps:
return tf.train.replica_device_setter(
worker_device=worker, ps_device='/cpu:0', ps_tasks=1)
else:
gpus = ['/gpu:%d' % i for i in range(FLAGS.num_gpus)]
return ParamServerDeviceSetter(worker, gpus)
def _resnet_model_fn(features, labels, mode):
"""Resnet model body.
Support single host, one or more GPU training. Parameter distribution can be
either one of the following scheme.
1. CPU is the parameter server and manages gradient updates.
2. Paramters are distributed evenly across all GPUs, and the first GPU
manages gradient updates.
Args:
features: a list of tensors, one for each tower
labels: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
Returns:
A EstimatorSpec object.
"""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
is_cpu_ps = FLAGS.is_cpu_ps
num_gpus = FLAGS.num_gpus
weight_decay = FLAGS.weight_decay
momentum = FLAGS.momentum
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
if num_gpus != 0:
for i in range(num_gpus):
worker = '/gpu:%d' % i
device_setter = _create_device_setter(is_cpu_ps, worker)
with tf.variable_scope('resnet', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
_tower_fn(is_training, weight_decay, tower_features[i],
tower_labels[i], tower_losses, tower_gradvars,
tower_preds, False)
if i == 0:
# Only trigger batch_norm moving mean and variance update from the
# 1st tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
name_scope)
else:
with tf.variable_scope('resnet'), tf.device('/cpu:0'):
with tf.name_scope('tower_cpu') as name_scope:
_tower_fn(is_training, weight_decay, tower_features[0], tower_labels[0],
tower_losses, tower_gradvars, tower_preds, True)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
# Now compute global loss and gradients.
gradvars = []
# parameter server here isn't necessarily one server storing the model params.
# (For gpu-as-ps case, model params are distributed evenly across all gpus.)
# It's the server that runs the ops to apply global gradient updates.
ps_device = '/cpu:0' if is_cpu_ps else '/gpu:0'
with tf.device(ps_device):
with tf.name_scope('gradient_averaging'):
loss = tf.reduce_mean(tower_losses)
for zipped_gradvars in zip(*tower_gradvars):
# Averaging one var's gradients computed from multiple towers
var = zipped_gradvars[0][1]
grads = [gv[0] for gv in zipped_gradvars]
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
# Suggested learning rate scheduling from
# https://github.com/ppwwyyxx/tensorpack/blob/master/examples/ResNet/cifar10-resnet.py#L155
# users could apply other scheduling.
num_batches_per_epoch = cifar10.Cifar10DataSet.num_examples_per_epoch(
'train') // FLAGS.train_batch_size
boundaries = [
num_batches_per_epoch * x
for x in np.array([82, 123, 300], dtype=np.int64)
]
staged_lr = [0.1, 0.01, 0.001, 0.0002]
learning_rate = tf.train.piecewise_constant(tf.train.get_global_step(),
boundaries, staged_lr)
# Create a nicely-named tensor for logging
learning_rate = tf.identity(learning_rate, name='learning_rate')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step())
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
predictions = {
'classes':
tf.concat([p['classes'] for p in tower_preds], axis=0),
'probabilities':
tf.concat([p['probabilities'] for p in tower_preds], axis=0)
}
stacked_labels = tf.concat(labels, axis=0)
metrics = {
'accuracy': tf.metrics.accuracy(stacked_labels, predictions['classes'])
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def _tower_fn(is_training, weight_decay, feature, label, tower_losses,
tower_gradvars, tower_preds, is_cpu):
"""Build computation tower for each device (CPU or GPU).
Args:
is_training: true if is for training graph.
weight_decay: weight regularization strength, a float.
feature: a Tensor.
label: a Tensor.
tower_losses: a list to be appended with current tower's loss.
tower_gradvars: a list to be appended with current tower's gradients.
tower_preds: a list to be appended with current tower's predictions.
is_cpu: true if build tower on CPU.
"""
data_format = 'channels_last' if is_cpu else 'channels_first'
model = cifar10_model.ResNetCifar10(
FLAGS.num_layers, is_training=is_training, data_format=data_format)
logits = model.forward_pass(feature, input_data_format='channels_last')
tower_pred = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits)
}
tower_preds.append(tower_pred)
tower_loss = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=label)
tower_loss = tf.reduce_mean(tower_loss)
tower_losses.append(tower_loss)
model_params = tf.trainable_variables()
tower_loss += weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in model_params])
tower_losses.append(tower_loss)
tower_grad = tf.gradients(tower_loss, model_params)
tower_gradvars.append(zip(tower_grad, model_params))
def input_fn(subset, num_shards):
"""Create input graph for model.
Args:
subset: one of 'train', 'validate' and 'eval'.
num_shards: num of towers participating in data-parallel training.
Returns:
two lists of tensors for features and labels, each of num_shards length.
"""
if subset == 'train':
batch_size = FLAGS.train_batch_size
elif subset == 'validate' or subset == 'eval':
batch_size = FLAGS.eval_batch_size
else:
raise ValueError('Subset must be one of \'train\', \'validate\' and \'eval\'')
with tf.device('/cpu:0'):
use_distortion = subset == 'train' and FLAGS.use_distortion_for_training
dataset = cifar10.Cifar10DataSet(FLAGS.data_dir, subset, use_distortion)
image_batch, label_batch = dataset.make_batch(batch_size)
if num_shards <= 1:
# No GPU available or only 1 GPU.
return [image_batch], [label_batch]
# Note that passing num=batch_size is safe here, even though
# dataset.batch(batch_size) can, in some cases, return fewer than batch_size
# examples. This is because it does so only when repeating for a limited
# number of epochs, but our dataset repeats forever.
image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
feature_shards = [[] for i in range(num_shards)]
label_shards = [[] for i in range(num_shards)]
for i in xrange(batch_size):
idx = i % num_shards
feature_shards[idx].append(image_batch[i])
label_shards[idx].append(label_batch[i])
feature_shards = [tf.parallel_stack(x) for x in feature_shards]
label_shards = [tf.parallel_stack(x) for x in label_shards]
return feature_shards, label_shards
def main(unused_argv):
# The env variable is on deprecation path, default is set to off.
os.environ['TF_SYNC_ON_FINISH'] = '0'
if FLAGS.num_gpus < 0:
raise ValueError(
'Invalid GPU count: \"num_gpus\" must be 0 or a positive integer.')
if FLAGS.num_gpus == 0 and not FLAGS.is_cpu_ps:
raise ValueError(
'No GPU available for use, must use CPU as parameter server.')
if (FLAGS.num_layers - 2) % 6 != 0:
raise ValueError('Invalid num_layers parameter.')
if FLAGS.num_gpus != 0 and FLAGS.train_batch_size % FLAGS.num_gpus != 0:
raise ValueError('train_batch_size must be multiple of num_gpus.')
if FLAGS.num_gpus != 0 and FLAGS.eval_batch_size % FLAGS.num_gpus != 0:
raise ValueError('eval_batch_size must be multiple of num_gpus.')
num_eval_examples = cifar10.Cifar10DataSet.num_examples_per_epoch('eval')
if num_eval_examples % FLAGS.eval_batch_size != 0:
raise ValueError('validation set size must be multiple of eval_batch_size')
config = tf.estimator.RunConfig()
sess_config = tf.ConfigProto()
sess_config.allow_soft_placement = True
sess_config.log_device_placement = FLAGS.log_device_placement
sess_config.intra_op_parallelism_threads = FLAGS.num_intra_threads
sess_config.inter_op_parallelism_threads = FLAGS.num_inter_threads
sess_config.gpu_options.force_gpu_compatible = FLAGS.force_gpu_compatible
config = config.replace(session_config=sess_config)
classifier = tf.estimator.Estimator(
model_fn=_resnet_model_fn, model_dir=FLAGS.model_dir, config=config)
tensors_to_log = {'learning_rate': 'learning_rate'}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
print('Starting to train...')
classifier.train(
input_fn=functools.partial(
input_fn, subset='train', num_shards=FLAGS.num_gpus),
steps=FLAGS.train_steps,
hooks=[logging_hook])
print('Starting to evaluate...')
eval_results = classifier.evaluate(
input_fn=functools.partial(
input_fn, subset='eval', num_shards=FLAGS.num_gpus),
steps=num_eval_examples // FLAGS.eval_batch_size)
print(eval_results)
if __name__ == '__main__':
tf.app.run()
|
|
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.http import HttpRequest, HttpResponse
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import gettext_lazy
def add_level_messages(storage):
"""
Add 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super().enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, update that constant also.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super().disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests:
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS={},
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__, self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return HttpRequest()
def get_response(self):
return HttpResponse()
def get_storage(self, data=None):
"""
Return the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_repr(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(
repr(storage),
f'<{self.storage_class.__qualname__}: request=<HttpRequest>>',
)
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, gettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, messages are properly stored and
retrieved across the full request/redirect/response cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels:
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Messages persist properly when multiple POSTs are made before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
When the middleware is disabled, an exception is raised when one
attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
with self.assertRaises(MessageFailure):
self.client.post(add_url, data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
When the middleware is disabled, an exception is not raised
if 'fail_silently' is True.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Return the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([
Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2', extra_tags='tag'),
])
def test_existing_read(self):
"""
Reading the existing storage doesn't cause the data to be lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
storage.add(constants.INFO, 'A generic info message', extra_tags=None)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', '', 'extra-tag debug', 'warning', 'error', 'success', 'info'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags, ['info', '', 'debug', 'warning', 'error', 'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
})
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', 'custom', 'extra-tag', '', 'bad', 'success'])
|
|
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.views.decorators.csrf import csrf_exempt
from django.http import QueryDict, HttpResponseNotAllowed, HttpRequest
from django.http.multipartparser import MultiPartParser
from zerver.models import UserProfile, get_client, get_user_profile_by_email
from zerver.lib.response import json_error, json_unauthorized
from django.shortcuts import resolve_url
from django.utils.decorators import available_attrs
from django.utils.timezone import now
from django.conf import settings
from zerver.lib.queue import queue_json_publish
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.utils import statsd, get_subdomain, check_subdomain
from zerver.exceptions import RateLimited
from zerver.lib.rate_limiter import incr_ratelimit, is_ratelimited, \
api_calls_left
from zerver.lib.request import REQ, has_request_variables, JsonableError, RequestVariableMissingError
from django.core.handlers import base
from functools import wraps
import base64
import logging
import cProfile
from io import BytesIO
from zerver.lib.mandrill_client import get_mandrill_client
from six.moves import zip, urllib
from six import text_type
from typing import Union, Any, Callable, Sequence, Dict, Optional, TypeVar
from zerver.lib.str_utils import force_bytes
if settings.ZILENCER_ENABLED:
from zilencer.models import get_deployment_by_domain, Deployment
else:
from mock import Mock
get_deployment_by_domain = Mock()
Deployment = Mock() # type: ignore # https://github.com/JukkaL/mypy/issues/1188
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
ViewFuncT = TypeVar('ViewFuncT', bound=Callable[..., HttpResponse])
def get_deployment_or_userprofile(role):
# type: (text_type) -> Union[UserProfile, Deployment]
return get_user_profile_by_email(role) if "@" in role else get_deployment_by_domain(role)
class _RespondAsynchronously(object):
pass
# Return RespondAsynchronously from an @asynchronous view if the
# response will be provided later by calling handler.zulip_finish(),
# or has already been provided this way. We use this for longpolling
# mode.
RespondAsynchronously = _RespondAsynchronously()
def asynchronous(method):
# type: (Callable[..., Union[HttpResponse, _RespondAsynchronously]]) -> Callable[..., Union[HttpResponse, _RespondAsynchronously]]
# TODO: this should be the correct annotation when mypy gets fixed: type:
# (Callable[[HttpRequest, base.BaseHandler, Sequence[Any], Dict[str, Any]], Union[HttpResponse, _RespondAsynchronously]]) ->
# Callable[[HttpRequest, Sequence[Any], Dict[str, Any]], Union[HttpResponse, _RespondAsynchronously]]
# TODO: see https://github.com/python/mypy/issues/1655
@wraps(method)
def wrapper(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> Union[HttpResponse, _RespondAsynchronously]
return method(request, handler=request._tornado_handler, *args, **kwargs)
if getattr(method, 'csrf_exempt', False):
wrapper.csrf_exempt = True # type: ignore # https://github.com/JukkaL/mypy/issues/1170
return wrapper
def update_user_activity(request, user_profile):
# type: (HttpRequest, UserProfile) -> None
# update_active_status also pushes to rabbitmq, and it seems
# redundant to log that here as well.
if request.META["PATH_INFO"] == '/json/users/me/presence':
return
if hasattr(request, '_query'):
query = request._query
else:
query = request.META['PATH_INFO']
event={'query': query,
'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(now()),
'client': request.client.name}
queue_json_publish("user_activity", event, lambda event: None)
# Based on django.views.decorators.http.require_http_methods
def require_post(func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(func)
def wrapper(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if (request.method != "POST"
and not (request.method == "SOCKET"
and request.META['zulip.emulated_method'] == "POST")):
if request.method == "SOCKET":
err_method = "SOCKET/%s" % (request.META['zulip.emulated_method'],)
else:
err_method = request.method
logging.warning('Method Not Allowed (%s): %s', err_method, request.path,
extra={'status_code': 405, 'request': request})
return HttpResponseNotAllowed(["POST"])
return func(request, *args, **kwargs)
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
def require_realm_admin(func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(func)
def wrapper(request, user_profile, *args, **kwargs):
# type: (HttpRequest, UserProfile, *Any, **Any) -> HttpResponse
if not user_profile.is_realm_admin:
raise JsonableError(_("Must be a realm administrator"))
return func(request, user_profile, *args, **kwargs)
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
from zerver.lib.user_agent import parse_user_agent
def get_client_name(request, is_json_view):
# type: (HttpRequest, bool) -> text_type
# If the API request specified a client in the request content,
# that has priority. Otherwise, extract the client from the
# User-Agent.
if 'client' in request.REQUEST:
return request.REQUEST['client']
elif "HTTP_USER_AGENT" in request.META:
user_agent = parse_user_agent(request.META["HTTP_USER_AGENT"])
# We could check for a browser's name being "Mozilla", but
# e.g. Opera and MobileSafari don't set that, and it seems
# more robust to just key off whether it was a json view
if user_agent["name"] != "ZulipDesktop" and is_json_view:
# Avoid changing the client string for browsers Once this
# is out to prod, we can name the field to something like
# Browser for consistency.
return "website"
else:
return user_agent["name"]
else:
# In the future, we will require setting USER_AGENT, but for
# now we just want to tag these requests so we can review them
# in logs and figure out the extent of the problem
if is_json_view:
return "website"
else:
return "Unspecified"
def process_client(request, user_profile, is_json_view=False, client_name=None):
# type: (HttpRequest, UserProfile, bool, Optional[text_type]) -> None
if client_name is None:
client_name = get_client_name(request, is_json_view)
# Transitional hack for early 2014. Eventually the ios clients
# will all report ZulipiOS, and we can remove the next couple lines.
if client_name == 'ios':
client_name = 'ZulipiOS'
request.client = get_client(client_name)
update_user_activity(request, user_profile)
def validate_api_key(request, role, api_key, is_webhook=False):
# type: (HttpRequest, text_type, text_type, bool) -> Union[UserProfile, Deployment]
# Remove whitespace to protect users from trivial errors.
role, api_key = role.strip(), api_key.strip()
try:
profile = get_deployment_or_userprofile(role)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user: %s") % (role,))
except Deployment.DoesNotExist:
raise JsonableError(_("Invalid deployment: %s") % (role,))
if api_key != profile.api_key:
if len(api_key) != 32:
reason = _("Incorrect API key length (keys should be 32 "
"characters long) for role '%s'")
else:
reason = _("Invalid API key for role '%s'")
raise JsonableError(reason % (role,))
if not profile.is_active:
raise JsonableError(_("Account not active"))
if profile.is_incoming_webhook and not is_webhook:
raise JsonableError(_("Account is not valid to post webhook messages"))
try:
if profile.realm.deactivated:
raise JsonableError(_("Realm for account has been deactivated"))
except AttributeError:
# Deployment objects don't have realms
pass
if (not check_subdomain(get_subdomain(request), profile.realm.subdomain)
# Allow access to localhost for Tornado
and not (settings.RUNNING_INSIDE_TORNADO and
request.META["SERVER_NAME"] == "127.0.0.1" and
request.META["REMOTE_ADDR"] == "127.0.0.1")):
logging.warning("User %s attempted to access API on wrong subdomain %s" % (
profile.email, get_subdomain(request)))
raise JsonableError(_("Account is not associated with this subdomain"))
return profile
# Use this for webhook views that don't get an email passed in.
def api_key_only_webhook_view(client_name):
# type: (text_type) -> Callable[..., HttpResponse]
# This function can't be typed perfectly because returning a generic function
# isn't supported in mypy - https://github.com/python/mypy/issues/1551.
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(request, api_key=REQ(),
*args, **kwargs):
# type: (HttpRequest, text_type, *Any, **Any) -> HttpResponse
try:
user_profile = UserProfile.objects.get(api_key=api_key)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid API key"))
if not user_profile.is_active:
raise JsonableError(_("Account not active"))
if user_profile.realm.deactivated:
raise JsonableError(_("Realm for account has been deactivated"))
if not check_subdomain(get_subdomain(request), user_profile.realm.subdomain):
logging.warning("User %s attempted to access webhook API on wrong subdomain %s" % (
user_profile.email, get_subdomain(request)))
raise JsonableError(_("Account is not associated with this subdomain"))
request.user = user_profile
request._email = user_profile.email
webhook_client_name = "Zulip{}Webhook".format(client_name)
process_client(request, user_profile, client_name=webhook_client_name)
if settings.RATE_LIMITING:
rate_limit_user(request, user_profile, domain='all')
return view_func(request, user_profile, request.client, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# From Django 1.8, modified to leave off ?next=/
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
# type: (text_type, Optional[text_type], text_type) -> HttpResponseRedirect
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urllib.parse.urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
# Don't add ?next=/, to keep our URLs clean
if next != '/':
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urllib.parse.urlunparse(login_url_parts))
# From Django 1.8
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
# type: (Callable[[UserProfile], bool], Optional[text_type], text_type) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if test_func(request):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urllib.parse.urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urllib.parse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def logged_in_and_active(request):
# type: (HttpRequest) -> bool
if not request.user.is_authenticated():
return False
if not request.user.is_active:
return False
if request.user.realm.deactivated:
return False
return check_subdomain(get_subdomain(request), request.user.realm.subdomain)
# Based on Django 1.8's @login_required
def zulip_login_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=settings.HOME_NOT_LOGGED_IN):
# type: (Optional[Callable[..., HttpResponse]], text_type, text_type) -> Union[Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]], Callable[..., HttpResponse]]
actual_decorator = user_passes_test(
logged_in_and_active,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def zulip_internal(view_func):
# type: (ViewFuncT) -> ViewFuncT
@zulip_login_required
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
request._query = view_func.__name__
if request.user.realm.domain != 'zulip.com':
return HttpResponseRedirect(settings.HOME_NOT_LOGGED_IN)
request._email = request.user.email
process_client(request, request.user)
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
# authenticated_api_view will add the authenticated user's
# user_profile to the view function's arguments list, since we have to
# look it up anyway. It is deprecated in favor on the REST API
# versions.
def authenticated_api_view(is_webhook=False):
# type: (bool) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(request, email=REQ(), api_key=REQ(default=None),
api_key_legacy=REQ('api-key', default=None),
*args, **kwargs):
# type: (HttpRequest, text_type, Optional[text_type], Optional[text_type], *Any, **Any) -> HttpResponse
if not api_key and not api_key_legacy:
raise RequestVariableMissingError("api_key")
elif not api_key:
api_key = api_key_legacy
user_profile = validate_api_key(request, email, api_key, is_webhook)
request.user = user_profile
request._email = user_profile.email
process_client(request, user_profile)
# Apply rate limiting
limited_func = rate_limit()(view_func)
return limited_func(request, user_profile, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# A more REST-y authentication decorator, using, in particular, HTTP Basic
# authentication.
def authenticated_rest_api_view(is_webhook=False):
# type: (bool) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@wraps(view_func)
def _wrapped_func_arguments(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
# First try block attempts to get the credentials we need to do authentication
try:
# Grab the base64-encoded authentication string, decode it, and split it into
# the email and API key
auth_type, credentials = request.META['HTTP_AUTHORIZATION'].split()
# case insensitive per RFC 1945
if auth_type.lower() != "basic":
return json_error(_("Only Basic authentication is supported."))
role, api_key = base64.b64decode(force_bytes(credentials)).decode('utf-8').split(":")
except ValueError:
json_error(_("Invalid authorization header for basic auth"))
except KeyError:
return json_unauthorized("Missing authorization header for basic auth")
# Now we try to do authentication or die
try:
# Could be a UserProfile or a Deployment
profile = validate_api_key(request, role, api_key, is_webhook)
except JsonableError as e:
return json_unauthorized(e.error)
request.user = profile
process_client(request, profile)
if isinstance(profile, UserProfile):
request._email = profile.email
else:
assert isinstance(profile, Deployment)
request._email = "deployment:" + role
profile.rate_limits = ""
# Apply rate limiting
return rate_limit()(view_func)(request, profile, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
def process_as_post(view_func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
# Adapted from django/http/__init__.py.
# So by default Django doesn't populate request.POST for anything besides
# POST requests. We want this dict populated for PATCH/PUT, so we have to
# do it ourselves.
#
# This will not be required in the future, a bug will be filed against
# Django upstream.
if not request.POST:
# Only take action if POST is empty.
if request.META.get('CONTENT_TYPE', '').startswith('multipart'):
# Note that request._files is just the private attribute that backs the
# FILES property, so we are essentially setting request.FILES here. (In
# Django 1.5 FILES was still a read-only property.)
request.POST, request._files = MultiPartParser(request.META, BytesIO(request.body),
request.upload_handlers, request.encoding).parse()
else:
request.POST = QueryDict(request.body, encoding=request.encoding)
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def authenticate_log_and_execute_json(request, view_func, *args, **kwargs):
# type: (HttpRequest, Callable[..., HttpResponse], *Any, **Any) -> HttpResponse
if not request.user.is_authenticated():
return json_error(_("Not logged in"), status=401)
user_profile = request.user
if not user_profile.is_active:
raise JsonableError(_("Account not active"))
if user_profile.realm.deactivated:
raise JsonableError(_("Realm for account has been deactivated"))
if user_profile.is_incoming_webhook:
raise JsonableError(_("Webhook bots can only access webhooks"))
if (not check_subdomain(get_subdomain(request), user_profile.realm.subdomain) and
# Exclude the SOCKET requests from this filter; they were
# checked when the original websocket request reached Tornado
not (request.method == "SOCKET" and
request.META['SERVER_NAME'] == "127.0.0.1")):
logging.warning("User %s attempted to access JSON API on wrong subdomain %s" % (
user_profile.email, get_subdomain(request)))
raise JsonableError(_("Account is not associated with this subdomain"))
process_client(request, user_profile, True)
request._email = user_profile.email
return view_func(request, user_profile, *args, **kwargs)
# Checks if the request is a POST request and that the user is logged
# in. If not, return an error (the @login_required behavior of
# redirecting to a login page doesn't make sense for json views)
def authenticated_json_post_view(view_func):
# type: (ViewFuncT) -> ViewFuncT
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_view_func(request,
*args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def authenticated_json_view(view_func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(view_func)
def _wrapped_view_func(request,
*args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def is_local_addr(addr):
# type: (text_type) -> bool
return addr in ('127.0.0.1', '::1')
# These views are used by the main Django server to notify the Tornado server
# of events. We protect them from the outside world by checking a shared
# secret, and also the originating IP (for now).
def authenticate_notify(request):
# type: (HttpRequest) -> bool
return (is_local_addr(request.META['REMOTE_ADDR'])
and request.POST.get('secret') == settings.SHARED_SECRET)
def client_is_exempt_from_rate_limiting(request):
# type: (HttpRequest) -> bool
# Don't rate limit requests from Django that come from our own servers,
# and don't rate-limit dev instances
return ((request.client and request.client.name.lower() == 'internal')
and (is_local_addr(request.META['REMOTE_ADDR']) or
settings.DEBUG_RATE_LIMITING))
def internal_notify_view(view_func):
# type: (ViewFuncT) -> ViewFuncT
@csrf_exempt
@require_post
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if not authenticate_notify(request):
return json_error(_('Access denied'), status=403)
if not hasattr(request, '_tornado_handler'):
# We got called through the non-Tornado server somehow.
# This is not a security check; it's an internal assertion
# to help us find bugs.
raise RuntimeError('notify view called with no Tornado handler')
request._email = "internal"
return view_func(request, *args, **kwargs)
return _wrapped_view_func
# Converter functions for use with has_request_variables
def to_non_negative_int(x):
# type: (float) -> int
x = int(x)
if x < 0:
raise ValueError("argument is negative")
return x
def flexible_boolean(boolean):
# type: (text_type) -> bool
"""Returns True for any of "1", "true", or "True". Returns False otherwise."""
if boolean in ("1", "true", "True"):
return True
else:
return False
def statsd_increment(counter, val=1):
# type: (text_type, int) -> Callable[[Callable[..., Any]], Callable[..., Any]]
"""Increments a statsd counter on completion of the
decorated function.
Pass the name of the counter to this decorator-returning function."""
def wrapper(func):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(func)
def wrapped_func(*args, **kwargs):
# type: (*Any, **Any) -> Any
ret = func(*args, **kwargs)
statsd.incr(counter, val)
return ret
return wrapped_func
return wrapper
def rate_limit_user(request, user, domain):
# type: (HttpRequest, UserProfile, text_type) -> None
"""Returns whether or not a user was rate limited. Will raise a RateLimited exception
if the user has been rate limited, otherwise returns and modifies request to contain
the rate limit information"""
ratelimited, time = is_ratelimited(user, domain)
request._ratelimit_applied_limits = True
request._ratelimit_secs_to_freedom = time
request._ratelimit_over_limit = ratelimited
# Abort this request if the user is over her rate limits
if ratelimited:
statsd.incr("ratelimiter.limited.%s.%s" % (type(user), user.id))
raise RateLimited()
incr_ratelimit(user, domain)
calls_remaining, time_reset = api_calls_left(user, domain)
request._ratelimit_remaining = calls_remaining
request._ratelimit_secs_to_freedom = time_reset
def rate_limit(domain='all'):
# type: (text_type) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
"""Rate-limits a view. Takes an optional 'domain' param if you wish to rate limit different
types of API calls independently.
Returns a decorator"""
def wrapper(func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@wraps(func)
def wrapped_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
# It is really tempting to not even wrap our original function
# when settings.RATE_LIMITING is False, but it would make
# for awkward unit testing in some situations.
if not settings.RATE_LIMITING:
return func(request, *args, **kwargs)
if client_is_exempt_from_rate_limiting(request):
return func(request, *args, **kwargs)
try:
user = request.user
except:
# TODO: This logic is not tested, and I'm not sure we are
# doing the right thing here.
user = None
if not user:
logging.error("Requested rate-limiting on %s but user is not authenticated!" % \
func.__name__)
return func(request, *args, **kwargs)
# Rate-limiting data is stored in redis
# We also only support rate-limiting authenticated
# views right now.
# TODO(leo) - implement per-IP non-authed rate limiting
rate_limit_user(request, user, domain)
return func(request, *args, **kwargs)
return wrapped_func
return wrapper
def profiled(func):
# type: (FuncT) -> FuncT
"""
This decorator should obviously be used only in a dev environment.
It works best when surrounding a function that you expect to be
called once. One strategy is to write a backend test and wrap the
test case with the profiled decorator.
You can run a single test case like this:
# edit zerver/tests/test_external.py and place @profiled above the test case below
./tools/test-backend zerver.tests.test_external.RateLimitTests.test_ratelimit_decrease
Then view the results like this:
./tools/show-profile-results.py test_ratelimit_decrease.profile
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
# type: (*Any, **Any) -> Any
fn = func.__name__ + ".profile"
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs)
prof.dump_stats(fn)
return retval
return wrapped_func # type: ignore # https://github.com/python/mypy/issues/1927
def uses_mandrill(func):
# type: (FuncT) -> FuncT
"""
This decorator takes a function with keyword argument "mail_client" and
fills it in with the mail_client for the Mandrill account.
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
# type: (*Any, **Any) -> Any
kwargs['mail_client'] = get_mandrill_client()
return func(*args, **kwargs)
return wrapped_func # type: ignore # https://github.com/python/mypy/issues/1927
|
|
# -*- encoding: utf-8 -*-
#
# Copyright 2015-2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from urlparse import parse_qsl
from urlparse import urlparse
except ImportError:
from urllib.parse import parse_qsl
from urllib.parse import urlparse
import base64
import collections
import flask
import shutil
import six
import dci.auth as auth
from dci.db import models
from dci.db import models2
import dci.dci_config as config
from dci.common import utils
from dciauth.v2.headers import generate_headers
import os
import subprocess
# convenient alias
conf = config.CONFIG
def restore_db(engine):
models.metadata.drop_all(engine)
models.metadata.create_all(engine)
def rm_upload_folder():
shutil.rmtree(conf["FILES_UPLOAD_FOLDER"], ignore_errors=True)
def generate_client(app, credentials=None, access_token=None):
attrs = ["status_code", "data", "headers"]
Response = collections.namedtuple("Response", attrs)
if credentials:
token = base64.b64encode(("%s:%s" % credentials).encode("utf8")).decode("utf8")
headers = {
"Authorization": "Basic " + token,
"Content-Type": "application/json",
}
elif access_token:
headers = {
"Authorization": "Bearer " + access_token,
"Content-Type": "application/json",
}
def client_open_decorator(func):
def wrapper(*args, **kwargs):
headers.update(kwargs.get("headers", {}))
kwargs["headers"] = headers
content_type = headers.get("Content-Type")
data = kwargs.get("data")
if data and content_type == "application/json":
kwargs["data"] = flask.json.dumps(data, cls=utils.JSONEncoder)
response = func(*args, **kwargs)
data = response.data
if response.content_type == "application/json":
data = flask.json.loads(data or "{}")
if type(data) == six.binary_type:
data = data.decode("utf8")
return Response(response.status_code, data, response.headers)
return wrapper
client = app.test_client()
client.open = client_open_decorator(client.open)
return client
def generate_token_based_client(app, resource):
attrs = ["status_code", "data", "headers"]
Response = collections.namedtuple("Response", attrs)
def client_open_decorator(func):
def wrapper(*args, **kwargs):
payload = kwargs.get("data")
data = flask.json.dumps(payload, cls=utils.JSONEncoder) if payload else ""
url = urlparse(args[0])
params = dict(parse_qsl(url.query))
headers = kwargs.get("headers", {})
headers.update(
generate_headers(
{
"method": kwargs.get("method"),
"endpoint": url.path,
"params": params,
"data": data,
"host": "localhost",
},
{
"access_key": "%s/%s" % (resource["type"], resource["id"]),
"secret_key": resource["api_secret"],
},
)
)
headers.update({"Content-Type": "application/json"})
kwargs["headers"] = headers
if data:
kwargs["data"] = data
response = func(*args, **kwargs)
data = flask.json.loads(response.data or "{}")
return Response(response.status_code, data, response.headers)
return wrapper
client = app.test_client()
client.open = client_open_decorator(client.open)
return client
def post_file(client, jobstate_id, file_desc, mime="text/plain"):
headers = {
"DCI-JOBSTATE-ID": jobstate_id,
"DCI-NAME": file_desc.name,
"DCI-MIME": mime,
"Content-Type": "text/plain",
}
res = client.post("/api/v1/files", headers=headers, data=file_desc.content)
return res.data["file"]["id"]
def provision(session):
# Create admin
admin_team = models2.Team(name="admin")
admin_user = models2.User(
name="admin",
sso_username="admin",
password=auth.hash_password("admin"),
fullname="Admin",
email="[email protected]",
)
admin_user.team.append(admin_team)
session.add(admin_user)
# Create user
user_team = models2.Team(name="user")
user = models2.User(
name="user",
sso_username="user",
password=auth.hash_password("user"),
fullname="User",
email="[email protected]",
)
user.team.append(user_team)
session.add(user)
# Create user 2
user2_team = models2.Team(name="user2")
user2 = models2.User(
name="user2",
sso_username="user2",
password=auth.hash_password("user2"),
fullname="User2",
email="[email protected]",
)
user2.team.append(user2_team)
session.add(user2)
# Create user no team
user_no_team = models2.User(
name="user_no_team",
sso_username="user_no_team",
password=auth.hash_password("user_no_team"),
fullname="User No Team",
email="[email protected]",
)
session.add(user_no_team)
# Create Red Hat employee
red_hat = models2.Team(name="Red Hat")
rh_employee = models2.User(
name="rh_employee",
sso_username="rh_employee",
password=auth.hash_password("rh_employee"),
fullname="Employee at Red Hat",
email="[email protected]",
)
rh_employee.team.append(red_hat)
session.add(rh_employee)
# Create EPM
epm_team = models2.Team(name="EPM")
epm = models2.User(
name="epm",
sso_username="epm",
password=auth.hash_password("epm"),
fullname="Partner Engineer",
email="[email protected]",
)
epm.team.append(epm_team)
session.add(epm)
# Create product team
session.add(models2.Team(name="product"))
# Create a product
session.add(
models2.Product(
name="Awesome product",
label="AWSM",
description="My Awesome product",
)
)
# Create a second product
session.add(
models2.Product(
name="Best product",
label="BEST",
description="My best product",
)
)
session.commit()
SWIFT = "dci.stores.swift.Swift"
FileDesc = collections.namedtuple("FileDesc", ["name", "content"])
def run_bin(bin_name, env):
env.update(os.environ.copy())
exec_path = os.path.abspath(__file__)
exec_path = os.path.abspath("%s/../../bin/%s" % (exec_path, bin_name))
return subprocess.Popen(exec_path, shell=True, env=env)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from OpenGL.GL import *
from OpenGL.GLU import *
import GestureAgentsPygame.Screen as Screen
import pygame
import atexit
width, height = Screen.size
CALIBRATION_FILE = 'calibration.json'
#default calibration
calibration = {}
calibration['x'] = 0
calibration['y'] = 0
calibration['z'] = 0
calibration['w'] = 1
calibration['h'] = 1
calibration['ax'] = 0
calibration['ay'] = 0
calibration['az'] = 0
try:
fcalibration = open(CALIBRATION_FILE)
calibration.update(json.load(fcalibration))
print "Loaded " + CALIBRATION_FILE
except IOError:
pass
def initializeDisplay():
global texture
w, h = Screen.size
pygame.display.set_mode(Screen.size, pygame.OPENGL | pygame.DOUBLEBUF)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45, float(w) / float(h), 0.5, 150)
glMatrixMode(GL_MODELVIEW)
# set up texturing
glEnable(GL_TEXTURE_2D)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
texture = glGenTextures(1)
def copyT(textureSurface, mtexture=None, format="RGBA", width=width, height=height):
width, height = Screen.size
if mtexture is None:
mtexture = texture
textureData = pygame.image.tostring(textureSurface, format, 1)
formats = {"RGBA": GL_RGBA, "RGB": GL_RGB}
glBindTexture(GL_TEXTURE_2D, mtexture)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(
GL_TEXTURE_2D, 0, formats[format], width, height, 0, formats[format],
GL_UNSIGNED_BYTE, textureData)
def drawT(textureSurface):
glEnable(GL_TEXTURE_2D)
copyT(textureSurface)
glBindTexture(GL_TEXTURE_2D, texture)
glBegin(GL_QUADS)
# Bottom Left Of The Texture and Quad
glTexCoord2f(0, 1)
glVertex2f(0, 0)
# Top Left Of The Texture and Quad
glTexCoord2f(0, 0)
glVertex2f(0, Screen.size[1])
# Top Right Of The Texture and Quad
glTexCoord2f(1, 0)
glVertex2f(Screen.size[0], Screen.size[1])
# Bottom Right Of The Texture and Quad
glTexCoord2f(1, 1)
glVertex2f(Screen.size[0], 0)
glEnd()
glDisable(GL_TEXTURE_2D)
def calibrate():
global calibration
if configurators[configurator] and keyPressed:
configurators[configurator][keyPressed]()
glLoadIdentity()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glScale(1, -1, 1)
glTranslate(calibration['x'], calibration['y'], calibration['z'] - 1)
glRotate(calibration['ax'], 1, 0, 0)
glRotate(calibration['ay'], 0, 1, 0)
glRotate(calibration['az'], 0, 0, 1)
glScale(calibration['w'], calibration['h'], 1)
glPushMatrix()
glTranslate(-0.5, -0.5, 0)
if configurators[configurator]:
glBegin(GL_LINES)
for x in (v / 8.0 for v in range(0, 8)):
glVertex2f(x, 0)
glVertex2f(x, 1)
glVertex2f(1, 0)
glVertex2f(1, 1)
for y in (v / 6.0 for v in range(0, 6)):
glVertex2f(0, y)
glVertex2f(1, y)
glVertex2f(0, 1)
glVertex2f(1, 1)
glEnd()
drawCircle((0.5, 0.5), 0.5)
drawCircle((0.5, 0.5), 1.0 / 3)
drawCircle((0.5, 0.5), 1.0 / 6)
glPopMatrix()
glScale(1.0 / Screen.size[0], 1.0 / Screen.size[1], 1)
glTranslate(-Screen.size[0] / 2, -Screen.size[1] / 2, 0)
def drawCircle(center, radius):
import math
aberration = 3.0 / 4
glBegin(GL_LINE_STRIP)
for angle in range(361):
x = center[0] + math.cos(math.radians(angle)) * radius * aberration
y = center[1] + math.sin(math.radians(angle)) * radius
glVertex2f(x, y)
glEnd()
s = 0.01
sd = 1
def c_change(k, v):
global calibration
calibration[k] += v
CMove = {pygame.K_w: lambda: c_change('y', -s),
pygame.K_s: lambda: c_change('y', s),
pygame.K_a: lambda: c_change('x', -s),
pygame.K_d: lambda: c_change('x', s),
'name': "Move center"}
CZoom = {pygame.K_w: lambda: c_change('h', s),
pygame.K_s: lambda: c_change('h', -s),
pygame.K_a: lambda: c_change('w', -s),
pygame.K_d: lambda: c_change('w', s),
'name': "Zoom"}
CParal = {pygame.K_w: lambda: c_change('ax', sd),
pygame.K_s: lambda: c_change('ax', -sd),
pygame.K_a: lambda: c_change('ay', -sd),
pygame.K_d: lambda: c_change('ay', sd),
'name': "Lateral angles"}
CRota = {pygame.K_w: lambda: c_change('z', s),
pygame.K_s: lambda: c_change('z', -s),
pygame.K_a: lambda: c_change('az', -sd),
pygame.K_d: lambda: c_change('az', sd),
'name': "Rotation and Z"}
configurators = (None, CMove, CZoom, CParal, CRota)
configurator = 0
keyPressed = None
def ConfKey(event):
global configurator, keyPressed
if configurators[configurator] and event.type == pygame.KEYDOWN and event.key in configurators[configurator]:
keyPressed = event.key
elif event.type == pygame.KEYUP and event.key == keyPressed:
keyPressed = None
elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
configurator = (configurator + 1) % len(configurators)
if configurators[configurator]:
print "Configurator: " + configurators[configurator]['name']
else:
print "No Configurator"
@atexit.register
def saveCalibration():
print "Saving " + CALIBRATION_FILE
fcalibration = open(CALIBRATION_FILE, 'w')
json.dump(calibration, fcalibration, sort_keys=True, indent=4)
|
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Audio and video player with simple GUI controls.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import sys
from pyglet.gl import *
import pyglet
from pyglet.window import key
def draw_rect(x, y, width, height):
glBegin(GL_LINE_LOOP)
glVertex2f(x, y)
glVertex2f(x + width, y)
glVertex2f(x + width, y + height)
glVertex2f(x, y + height)
glEnd()
class Control(pyglet.event.EventDispatcher):
x = y = 0
width = height = 10
def __init__(self, parent):
super(Control, self).__init__()
self.parent = parent
def hit_test(self, x, y):
return (self.x < x < self.x + self.width and
self.y < y < self.y + self.height)
def capture_events(self):
self.parent.push_handlers(self)
def release_events(self):
self.parent.remove_handlers(self)
class Button(Control):
charged = False
def draw(self):
if self.charged:
glColor3f(1, 0, 0)
draw_rect(self.x, self.y, self.width, self.height)
glColor3f(1, 1, 1)
self.draw_label()
def on_mouse_press(self, x, y, button, modifiers):
self.capture_events()
self.charged = True
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.charged = self.hit_test(x, y)
def on_mouse_release(self, x, y, button, modifiers):
self.release_events()
if self.hit_test(x, y):
self.dispatch_event('on_press')
self.charged = False
Button.register_event_type('on_press')
class TextButton(Button):
def __init__(self, *args, **kwargs):
super(TextButton, self).__init__(*args, **kwargs)
self._text = pyglet.text.Label('', anchor_x='center', anchor_y='center')
def draw_label(self):
self._text.x = self.x + self.width / 2
self._text.y = self.y + self.height / 2
self._text.draw()
def set_text(self, text):
self._text.text = text
text = property(lambda self: self._text.text,
set_text)
class Slider(Control):
THUMB_WIDTH = 6
THUMB_HEIGHT = 10
GROOVE_HEIGHT = 2
def draw(self):
center_y = self.y + self.height / 2
draw_rect(self.x, center_y - self.GROOVE_HEIGHT / 2,
self.width, self.GROOVE_HEIGHT)
pos = self.x + self.value * self.width / (self.max - self.min)
draw_rect(pos - self.THUMB_WIDTH / 2, center_y - self.THUMB_HEIGHT / 2,
self.THUMB_WIDTH, self.THUMB_HEIGHT)
def coordinate_to_value(self, x):
return float(x - self.x) / self.width * (self.max - self.min) + self.min
def on_mouse_press(self, x, y, button, modifiers):
value = self.coordinate_to_value(x)
self.capture_events()
self.dispatch_event('on_begin_scroll')
self.dispatch_event('on_change', value)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
value = min(max(self.coordinate_to_value(x), self.min), self.max)
self.dispatch_event('on_change', value)
def on_mouse_release(self, x, y, button, modifiers):
self.release_events()
self.dispatch_event('on_end_scroll')
Slider.register_event_type('on_begin_scroll')
Slider.register_event_type('on_end_scroll')
Slider.register_event_type('on_change')
class PlayerWindow(pyglet.window.Window):
GUI_WIDTH = 400
GUI_HEIGHT = 40
GUI_PADDING = 4
GUI_BUTTON_HEIGHT = 16
def __init__(self, player):
super(PlayerWindow, self).__init__(caption='Media Player',
visible=False,
resizable=True)
self.player = player
self.player.push_handlers(self)
# TODO compat #self.player.eos_action = self.player.EOS_PAUSE
self.slider = Slider(self)
self.slider.x = self.GUI_PADDING
self.slider.y = self.GUI_PADDING * 2 + self.GUI_BUTTON_HEIGHT
self.slider.on_begin_scroll = lambda: player.pause()
self.slider.on_end_scroll = lambda: player.play()
self.slider.on_change = lambda value: player.seek(value)
self.play_pause_button = TextButton(self)
self.play_pause_button.x = self.GUI_PADDING
self.play_pause_button.y = self.GUI_PADDING
self.play_pause_button.height = self.GUI_BUTTON_HEIGHT
self.play_pause_button.width = 45
self.play_pause_button.on_press = self.on_play_pause
win = self
self.window_button = TextButton(self)
self.window_button.x = self.play_pause_button.x + \
self.play_pause_button.width + self.GUI_PADDING
self.window_button.y = self.GUI_PADDING
self.window_button.height = self.GUI_BUTTON_HEIGHT
self.window_button.width = 90
self.window_button.text = 'Windowed'
self.window_button.on_press = lambda: win.set_fullscreen(False)
self.controls = [
self.slider,
self.play_pause_button,
self.window_button,
]
x = self.window_button.x + self.window_button.width + self.GUI_PADDING
i = 0
for screen in self.display.get_screens():
screen_button = TextButton(self)
screen_button.x = x
screen_button.y = self.GUI_PADDING
screen_button.height = self.GUI_BUTTON_HEIGHT
screen_button.width = 80
screen_button.text = 'Screen %d' % (i + 1)
screen_button.on_press = \
(lambda s: lambda: win.set_fullscreen(True, screen=s))(screen)
self.controls.append(screen_button)
i += 1
x += screen_button.width + self.GUI_PADDING
def on_eos(self):
self.gui_update_state()
def gui_update_source(self):
if self.player.source:
source = self.player.source
self.slider.min = 0.
self.slider.max = source.duration
self.gui_update_state()
def gui_update_state(self):
if self.player.playing:
self.play_pause_button.text = 'Pause'
else:
self.play_pause_button.text = 'Play'
def get_video_size(self):
if not self.player.source or not self.player.source.video_format:
return 0, 0
video_format = self.player.source.video_format
width = video_format.width
height = video_format.height
if video_format.sample_aspect > 1:
width *= video_format.sample_aspect
elif video_format.sample_aspect < 1:
height /= video_format.sample_aspect
return width, height
def set_default_video_size(self):
'''Make the window size just big enough to show the current
video and the GUI.'''
width = self.GUI_WIDTH
height = self.GUI_HEIGHT
video_width, video_height = self.get_video_size()
width = max(width, video_width)
height += video_height
self.set_size(int(width), int(height))
def on_resize(self, width, height):
'''Position and size video image.'''
super(PlayerWindow, self).on_resize(width, height)
self.slider.width = width - self.GUI_PADDING * 2
height -= self.GUI_HEIGHT
if height <= 0:
return
video_width, video_height = self.get_video_size()
if video_width == 0 or video_height == 0:
return
display_aspect = width / float(height)
video_aspect = video_width / float(video_height)
if video_aspect > display_aspect:
self.video_width = width
self.video_height = width / video_aspect
else:
self.video_height = height
self.video_width = height * video_aspect
self.video_x = (width - self.video_width) / 2
self.video_y = (height - self.video_height) / 2 + self.GUI_HEIGHT
def on_mouse_press(self, x, y, button, modifiers):
for control in self.controls:
if control.hit_test(x, y):
control.on_mouse_press(x, y, button, modifiers)
def on_key_press(self, symbol, modifiers):
if symbol == key.SPACE:
self.on_play_pause()
elif symbol == key.ESCAPE:
self.dispatch_event('on_close')
def on_close(self):
self.player.pause()
self.close()
def on_play_pause(self):
if self.player.playing:
self.player.pause()
else:
if self.player.time >= self.player.source.duration:
self.player.seek(0)
self.player.play()
self.gui_update_state()
def on_draw(self):
self.clear()
# Video
if self.player.source and self.player.source.video_format:
self.player.get_texture().blit(self.video_x,
self.video_y,
width=self.video_width,
height=self.video_height)
# GUI
self.slider.value = self.player.time
for control in self.controls:
control.draw()
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: media_player.py <filename> [<filename> ...]'
sys.exit(1)
have_video = False
for filename in sys.argv[1:]:
player = pyglet.media.Player()
window = PlayerWindow(player)
source = pyglet.media.load(filename)
player.queue(source)
have_video = have_video or bool(source.video_format)
window.gui_update_source()
window.set_default_video_size()
window.set_visible(True)
player.play()
window.gui_update_state()
if not have_video:
pyglet.clock.schedule_interval(lambda dt: None, 0.2)
pyglet.app.run()
|
|
"""Contains the adsorbate class."""
from pygaps import logger
from pygaps.data import ADSORBATE_LIST
from pygaps.units.converter_unit import _PRESSURE_UNITS
from pygaps.units.converter_unit import c_unit
from pygaps.utilities.coolprop_utilities import CP
from pygaps.utilities.coolprop_utilities import thermodynamic_backend
from pygaps.utilities.exceptions import CalculationError
from pygaps.utilities.exceptions import ParameterError
class Adsorbate():
"""
An unified class descriptor for an adsorbate.
Its purpose is to expose properties such as adsorbate name,
and formula, as well as physical properties, such as molar mass
vapour pressure, etc.
The properties can be either calculated through a wrapper over
CoolProp or supplied in the initial adsorbate creation.
All parameters passed are saved in a self.parameters
dictionary.
Parameters
----------
name : str
The name which should be used for this adsorbate.
Other Parameters
----------------
alias : list[str]
Other names the same adsorbate might take.
Example: name=propanol, alias=['1-propanol'].
pyGAPS disregards capitalisation (Propanol = propanol = PROPANOL).
formula : str
A chemical formula for the adsorbate in LaTeX form: He/N_{2}/C_{2}H_{4} etc.
backend_name : str
Used for integration with CoolProp/REFPROP. For a list of names
look at the CoolProp `list of fluids
<http://www.coolprop.org/fluid_properties/PurePseudoPure.html#list-of-fluids>`_
molar_mass : float
Custom value for molar mass (otherwise obtained through CoolProp).
saturation_pressure : float
Custom value for saturation pressure (otherwise obtained through CoolProp).
surface_tension : float
Custom value for surface tension (otherwise obtained through CoolProp).
liquid_density : float
Custom value for liquid density (otherwise obtained through CoolProp).
liquid_molar_density : float
Custom value for liquid molar density (otherwise obtained through CoolProp).
gas_density : float
Custom value for gas density (otherwise obtained through CoolProp).
gas_molar_density : float
Custom value for gas molar density (otherwise obtained through CoolProp).
enthalpy_liquefaction : float
Custom value for enthalpy of liquefaction (otherwise obtained through CoolProp).
Notes
-----
The members of the properties dictionary are left at the discretion of the
user, to keep the class extensible. There are, however, some unique
properties which are used by calculations in other modules listed in the
other parameters section above.
These properties can be either calculated by CoolProp (if the adsorbate
exists in CoolProp/REFPROP) or taken from the parameters dictionary. They
are best accessed using the associated function.
Calculated::
my_adsorbate.surface_tension(77)
Value from dictionary::
my_adsorbate.surface_tension(77, calculate=False)
If available, the underlying CoolProp state object
(http://www.coolprop.org/coolprop/LowLevelAPI.html) can be accessed directly
through the backend variable. For example, to get the CoolProp-calculated
critical pressure::
adsorbate.backend.p_critical()
"""
# special reserved parameters
_reserved_params = [
"name",
"alias",
"_state",
"_backend_mode",
]
def __init__(
self,
name: str,
store: bool = False,
**properties,
):
"""Instantiate by passing a dictionary with the parameters."""
# Adsorbate name
if name is None:
raise ParameterError("Must provide a name for the created adsorbate.")
self.name = name
# List of aliases
alias = properties.pop('alias', None)
# Generate list of aliases
_name = name.lower()
if alias is None:
self.alias = [_name]
else:
if isinstance(alias, str):
self.alias = [alias.lower()]
else:
self.alias = [a.lower() for a in alias]
if _name not in self.alias:
self.alias.append(_name)
#: Adsorbate properties
self.properties = properties
# CoolProp interaction variables, only generate when called
self._state = None
self._backend_mode = None
# Store reference in internal list
if store:
if self not in ADSORBATE_LIST:
ADSORBATE_LIST.append(self)
def __repr__(self):
"""Print adsorbate id."""
return f"<pygaps.Adsorbate '{self.name}'>"
def __str__(self):
"""Print adsorbate standard name."""
return self.name
def __hash__(self):
"""Override hashing as a name hash."""
return hash(self.name)
def __eq__(self, other):
"""Overload equality operator to include aliases."""
if isinstance(other, Adsorbate):
return self.name == other.name
return other.lower() in self.alias
def __add__(self, other):
"""Overload addition operator to use name."""
return self.name + other
def __radd__(self, other):
"""Overload rev addition operator to use name."""
return other + self.name
def print_info(self):
"""Print a short summary of all the adsorbate parameters."""
string = f"pyGAPS Adsorbate: '{self.name}'\n"
string += f"Aliases: { *self.alias,}\n"
if self.properties:
string += "Other properties: \n"
for prop, val in self.properties.items():
string += (f"\t{prop}: {str(val)}\n")
print(string)
@classmethod
def find(cls, name: str):
"""Get the specified adsorbate from the master list.
Parameters
----------
name : str
The name of the adsorbate to search
Returns
-------
Adsorbate
Instance of class
Raises
------
``ParameterError``
If it does not exist in list.
"""
# Skip search if already adsorbate
if isinstance(name, Adsorbate):
return name
if not isinstance(name, str):
raise ParameterError("Pass a string as an adsorbate name.")
# See if adsorbate exists in master list
try:
return next(ads for ads in ADSORBATE_LIST if ads == name)
except StopIteration:
raise ParameterError(
f"Adsorbate '{name}' does not exist in list of adsorbates. "
"First populate pygaps.ADSORBATE_LIST with required adsorbate class."
) from None
@property
def backend(self):
"""Return the CoolProp state associated with the fluid."""
if (not self._backend_mode or self._backend_mode != thermodynamic_backend()):
self._backend_mode = thermodynamic_backend()
self._state = CP.AbstractState(self._backend_mode, self.backend_name)
return self._state
@property
def formula(self) -> str:
"""Return the adsorbate formula."""
formula = self.properties.get('formula')
if formula is None:
return self.name
return formula
def to_dict(self) -> dict:
"""
Return a dictionary of the adsorbate class.
Is the same dictionary that was used to create it.
Returns
-------
dict
dictionary of all parameters
"""
parameters_dict = {
'name': self.name,
'alias': self.alias,
}
parameters_dict.update(self.properties)
return parameters_dict
def get_prop(self, prop: str):
"""
Return a property from the 'properties' dictionary.
Parameters
----------
prop : str
property name desired
Returns
-------
str/float
Value of property in the properties dict
Raises
------
``ParameterError``
If the the property does not exist
in the class dictionary.
"""
req_prop = self.properties.get(prop)
if req_prop is None:
raise ParameterError(
f"Adsorbate '{self.name}' does not have a property named "
f"'{prop}' in its 'parameters' dictionary. Consider adding it "
"manually if you need it and know its value."
)
return req_prop
@property
def backend_name(self) -> str:
"""
Get the CoolProp interaction name of the adsorbate.
Returns
-------
str
Value of backend_name in the properties dict
Raises
------
``ParameterError``
If the the property does not exist
in the class dictionary.
"""
c_name = self.properties.get("backend_name")
if c_name is None:
raise ParameterError(
f"Adsorbate '{self.name}' does not have a property named "
"backend_name. This must be available for CoolProp interaction."
)
return c_name
def molar_mass(self, calculate: bool = True) -> float:
"""
Return the molar mass of the adsorbate.
Parameters
----------
calculate : bool, optional
Whether to calculate the property or look it up in the properties
dictionary, default - True.
Returns
-------
float
Molar mass in g/mol.
Raises
------
``ParameterError``
If the calculation is not requested and the property does not exist
in the class dictionary.
``CalculationError``
If it cannot be calculated, due to a physical reason.
"""
if calculate:
try:
return self.backend.molar_mass() * 1000
except BaseException as err:
_warn_reading_params(err)
return self.molar_mass(calculate=False)
try:
return self.get_prop("molar_mass")
except ParameterError as err:
_raise_calculation_error(err)
def saturation_pressure(self, temp, unit=None, calculate: bool = True) -> float:
"""
Get the saturation pressure at a particular temperature.
Parameters
----------
temp : float
Temperature at which the pressure is desired in K.
unit : str
Unit in which to return the saturation pressure.
If not specifies defaults to Pascal.
calculate : bool, optional
Whether to calculate the property or look it up in the properties
dictionary, default - True.
Returns
-------
float
Pressure in unit requested.
Raises
------
``ParameterError``
If the calculation is not requested and the property does not exist
in the class dictionary.
``CalculationError``
If it cannot be calculated, due to a physical reason.
"""
if calculate:
try:
state = self.backend
state.update(CP.QT_INPUTS, 0.0, temp)
sat_p = state.p()
except BaseException as err:
_warn_reading_params(err)
sat_p = self.saturation_pressure(temp, unit=unit, calculate=False)
if unit is not None:
sat_p = c_unit(_PRESSURE_UNITS, sat_p, 'Pa', unit)
return sat_p
try:
return self.get_prop("saturation_pressure")
except ParameterError as err:
_raise_calculation_error(err)
def surface_tension(self, temp, calculate: bool = True) -> float:
"""
Get the surface tension at a particular temperature.
Parameters
----------
temp : float
Temperature at which the surface_tension is desired in K.
calculate : bool, optional
Whether to calculate the property or look it up in the properties
dictionary, default - True.
Returns
-------
float
Surface tension in mN/m.
Raises
------
``ParameterError``
If the calculation is not requested and the property does not exist
in the class dictionary.
``CalculationError``
If it cannot be calculated, due to a physical reason.
"""
if calculate:
try:
state = self.backend
state.update(CP.QT_INPUTS, 0.0, temp)
return state.surface_tension() * 1000
except BaseException as err:
_warn_reading_params(err)
return self.surface_tension(temp, calculate=False)
try:
return self.get_prop("surface_tension")
except ParameterError as err:
_raise_calculation_error(err)
def liquid_density(self, temp, calculate: bool = True) -> float:
"""
Get the liquid density at a particular temperature.
Parameters
----------
temp : float
Temperature at which the liquid density is desired in K.
calculate : bool, optional.
Whether to calculate the property or look it up in the properties
dictionary, default - True.
Returns
-------
float
Liquid density in g/cm3.
Raises
------
``ParameterError``
If the calculation is not requested and the property does not exist
in the class dictionary.
``CalculationError``
If it cannot be calculated, due to a physical reason.
"""
if calculate:
try:
state = self.backend
state.update(CP.QT_INPUTS, 0.0, temp)
return state.rhomass() / 1000
except BaseException as err:
_warn_reading_params(err)
return self.liquid_density(temp, calculate=False)
try:
return self.get_prop("liquid_density")
except ParameterError as err:
_raise_calculation_error(err)
def liquid_molar_density(self, temp, calculate: bool = True) -> float:
"""
Get the liquid molar density at a particular temperature.
Parameters
----------
temp : float
Temperature at which the liquid density is desired in K.
calculate : bool, optional.
Whether to calculate the property or look it up in the properties
dictionary, default - True.
Returns
-------
float
Molar liquid density in mol/cm3.
Raises
------
``ParameterError``
If the calculation is not requested and the property does not exist
in the class dictionary.
``CalculationError``
If it cannot be calculated, due to a physical reason.
"""
if calculate:
try:
state = self.backend
state.update(CP.QT_INPUTS, 0.0, temp)
return state.rhomolar() / 1e6
except BaseException as err:
_warn_reading_params(err)
return self.liquid_molar_density(temp, calculate=False)
try:
return self.get_prop("liquid_molar_density")
except ParameterError as err:
_raise_calculation_error(err)
def gas_density(self, temp, calculate: bool = True) -> float:
"""
Get the gas molar density at a particular temperature.
Parameters
----------
temp : float
Temperature at which the gas density is desired in K.
calculate : bool, optional.
Whether to calculate the property or look it up in the properties
dictionary, default - True.
Returns
-------
float
Gas density in g/cm3.
Raises
------
``ParameterError``
If the calculation is not requested and the property does not exist
in the class dictionary.
``CalculationError``
If it cannot be calculated, due to a physical reason.
"""
if calculate:
try:
state = self.backend
state.update(CP.QT_INPUTS, 1.0, temp)
return state.rhomass() / 1000
except BaseException as err:
_warn_reading_params(err)
return self.gas_density(temp, calculate=False)
try:
return self.get_prop("gas_density")
except ParameterError as err:
_raise_calculation_error(err)
def gas_molar_density(self, temp, calculate: bool = True) -> float:
"""
Get the gas density at a particular temperature.
Parameters
----------
temp : float
Temperature at which the gas density is desired in K.
calculate : bool, optional.
Whether to calculate the property or look it up in the properties
dictionary, default - True.
Returns
-------
float
Molar gas density in mol/cm3.
Raises
------
``ParameterError``
If the calculation is not requested and the property does not exist
in the class dictionary.
``CalculationError``
If it cannot be calculated, due to a physical reason.
"""
if calculate:
try:
state = self.backend
state.update(CP.QT_INPUTS, 1.0, temp)
return state.rhomolar() / 1e6
except BaseException as err:
_warn_reading_params(err)
return self.gas_molar_density(temp, calculate=False)
try:
return self.get_prop("gas_molar_density")
except ParameterError as err:
_raise_calculation_error(err)
def enthalpy_liquefaction(self, temp, calculate: bool = True) -> float:
"""
Get the enthalpy of liquefaction at a particular temperature.
Parameters
----------
temp : float
Temperature at which the enthalpy of liquefaction is desired, in K.
calculate : bool, optional
Whether to calculate the property or look it up in the properties
dictionary, default - True.
Returns
-------
float
Enthalpy of liquefaction in kJ/mol.
Raises
------
``ParameterError``
If the calculation is not requested and the property does not exist
in the class dictionary.
``CalculationError``
If it cannot be calculated, due to a physical reason.
"""
if calculate:
try:
state = self.backend
state.update(CP.QT_INPUTS, 0.0, temp)
h_liq = state.hmolar()
state.update(CP.QT_INPUTS, 1.0, temp)
h_vap = state.hmolar()
return (h_vap - h_liq) / 1000
except BaseException as err:
_warn_reading_params(err)
return self.enthalpy_liquefaction(temp, calculate=False)
try:
return self.get_prop("enthalpy_liquefaction")
except ParameterError as err:
_raise_calculation_error(err)
def _warn_reading_params(err):
logger.warning(
f"Thermodynamic backend failed with error {err}. "
"Attempting to read parameters dictionary..."
)
def _raise_calculation_error(err):
raise CalculationError(
f"Thermodynamic backend failed (see traceback for error). Also, {err}"
) from err
|
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the SVG0 agent class."""
import copy
from typing import Callable, Dict, Optional
import acme
from acme import specs
from acme.agents.tf.svg0_prior import agent
from acme.tf import savers as tf2_savers
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
import dm_env
import launchpad as lp
import reverb
import sonnet as snt
class DistributedSVG0:
"""Program definition for SVG0."""
def __init__(
self,
environment_factory: Callable[[bool], dm_env.Environment],
network_factory: Callable[[specs.BoundedArray], Dict[str, snt.Module]],
num_actors: int = 1,
num_caches: int = 0,
environment_spec: Optional[specs.EnvironmentSpec] = None,
batch_size: int = 256,
prefetch_size: int = 4,
min_replay_size: int = 1000,
max_replay_size: int = 1000000,
samples_per_insert: Optional[float] = 32.0,
sequence_length: int = 10,
sigma: float = 0.3,
discount: float = 0.99,
policy_optimizer: Optional[snt.Optimizer] = None,
critic_optimizer: Optional[snt.Optimizer] = None,
prior_optimizer: Optional[snt.Optimizer] = None,
distillation_cost: Optional[float] = 1e-3,
entropy_regularizer_cost: Optional[float] = 1e-3,
target_update_period: int = 100,
max_actor_steps: Optional[int] = None,
log_every: float = 10.0,
):
if not environment_spec:
environment_spec = specs.make_environment_spec(environment_factory(False))
# TODO(mwhoffman): Make network_factory directly return the struct.
# TODO(mwhoffman): Make the factory take the entire spec.
def wrapped_network_factory(action_spec):
networks_dict = network_factory(action_spec)
networks = agent.SVG0Networks(
policy_network=networks_dict.get('policy'),
critic_network=networks_dict.get('critic'),
prior_network=networks_dict.get('prior', None),)
return networks
self._environment_factory = environment_factory
self._network_factory = wrapped_network_factory
self._environment_spec = environment_spec
self._sigma = sigma
self._num_actors = num_actors
self._num_caches = num_caches
self._max_actor_steps = max_actor_steps
self._log_every = log_every
self._sequence_length = sequence_length
self._builder = agent.SVG0Builder(
# TODO(mwhoffman): pass the config dataclass in directly.
# TODO(mwhoffman): use the limiter rather than the workaround below.
agent.SVG0Config(
discount=discount,
batch_size=batch_size,
prefetch_size=prefetch_size,
target_update_period=target_update_period,
policy_optimizer=policy_optimizer,
critic_optimizer=critic_optimizer,
prior_optimizer=prior_optimizer,
min_replay_size=min_replay_size,
max_replay_size=max_replay_size,
samples_per_insert=samples_per_insert,
sequence_length=sequence_length,
sigma=sigma,
distillation_cost=distillation_cost,
entropy_regularizer_cost=entropy_regularizer_cost,
))
def replay(self):
"""The replay storage."""
return self._builder.make_replay_tables(self._environment_spec,
self._sequence_length)
def counter(self):
return tf2_savers.CheckpointingRunner(counting.Counter(),
time_delta_minutes=1,
subdirectory='counter')
def coordinator(self, counter: counting.Counter):
return lp_utils.StepsLimiter(counter, self._max_actor_steps)
def learner(
self,
replay: reverb.Client,
counter: counting.Counter,
):
"""The Learning part of the agent."""
# Create the networks to optimize (online) and target networks.
online_networks = self._network_factory(self._environment_spec.actions)
target_networks = copy.deepcopy(online_networks)
# Initialize the networks.
online_networks.init(self._environment_spec)
target_networks.init(self._environment_spec)
dataset = self._builder.make_dataset_iterator(replay)
counter = counting.Counter(counter, 'learner')
logger = loggers.make_default_logger(
'learner', time_delta=self._log_every, steps_key='learner_steps')
return self._builder.make_learner(
networks=(online_networks, target_networks),
dataset=dataset,
counter=counter,
logger=logger,
)
def actor(
self,
replay: reverb.Client,
variable_source: acme.VariableSource,
counter: counting.Counter,
) -> acme.EnvironmentLoop:
"""The actor process."""
# Create the behavior policy.
networks = self._network_factory(self._environment_spec.actions)
networks.init(self._environment_spec)
policy_network = networks.make_policy()
# Create the agent.
actor = self._builder.make_actor(
policy_network=policy_network,
adder=self._builder.make_adder(replay),
variable_source=variable_source,
)
# Create the environment.
environment = self._environment_factory(False)
# Create logger and counter; actors will not spam bigtable.
counter = counting.Counter(counter, 'actor')
logger = loggers.make_default_logger(
'actor',
save_data=False,
time_delta=self._log_every,
steps_key='actor_steps')
# Create the loop to connect environment and agent.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def evaluator(
self,
variable_source: acme.VariableSource,
counter: counting.Counter,
logger: Optional[loggers.Logger] = None,
):
"""The evaluation process."""
# Create the behavior policy.
networks = self._network_factory(self._environment_spec.actions)
networks.init(self._environment_spec)
policy_network = networks.make_policy()
# Create the agent.
actor = self._builder.make_actor(
policy_network=policy_network,
variable_source=variable_source,
deterministic_policy=True,
)
# Make the environment.
environment = self._environment_factory(True)
# Create logger and counter.
counter = counting.Counter(counter, 'evaluator')
logger = logger or loggers.make_default_logger(
'evaluator',
time_delta=self._log_every,
steps_key='evaluator_steps',
)
# Create the run loop and return it.
return acme.EnvironmentLoop(environment, actor, counter, logger)
def build(self, name='svg0'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('replay'):
replay = program.add_node(lp.ReverbNode(self.replay))
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._max_actor_steps:
with program.group('coordinator'):
_ = program.add_node(lp.CourierNode(self.coordinator, counter))
with program.group('learner'):
learner = program.add_node(lp.CourierNode(self.learner, replay, counter))
with program.group('evaluator'):
program.add_node(lp.CourierNode(self.evaluator, learner, counter))
if not self._num_caches:
# Use our learner as a single variable source.
sources = [learner]
else:
with program.group('cacher'):
# Create a set of learner caches.
sources = []
for _ in range(self._num_caches):
cacher = program.add_node(
lp.CacherNode(
learner, refresh_interval_ms=2000, stale_after_ms=4000))
sources.append(cacher)
with program.group('actor'):
# Add actors which pull round-robin from our variable sources.
for actor_id in range(self._num_actors):
source = sources[actor_id % len(sources)]
program.add_node(lp.CourierNode(self.actor, replay, source, counter))
return program
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class AutoHealActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Predefined action to be taken.
"""
RECYCLE = "Recycle"
LOG_EVENT = "LogEvent"
CUSTOM_ACTION = "CustomAction"
class AzureResourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Azure resource type.
"""
WEBSITE = "Website"
TRAFFIC_MANAGER = "TrafficManager"
class BackupItemStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Backup status.
"""
IN_PROGRESS = "InProgress"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
TIMED_OUT = "TimedOut"
CREATED = "Created"
SKIPPED = "Skipped"
PARTIALLY_SUCCEEDED = "PartiallySucceeded"
DELETE_IN_PROGRESS = "DeleteInProgress"
DELETE_FAILED = "DeleteFailed"
DELETED = "Deleted"
class BackupRestoreOperationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the backup.
"""
DEFAULT = "Default"
CLONE = "Clone"
RELOCATION = "Relocation"
SNAPSHOT = "Snapshot"
class BuiltInAuthenticationProvider(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
"""
AZURE_ACTIVE_DIRECTORY = "AzureActiveDirectory"
FACEBOOK = "Facebook"
GOOGLE = "Google"
MICROSOFT_ACCOUNT = "MicrosoftAccount"
TWITTER = "Twitter"
class CloneAbilityResult(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of app.
"""
CLONEABLE = "Cloneable"
PARTIALLY_CLONEABLE = "PartiallyCloneable"
NOT_CLONEABLE = "NotCloneable"
class ConnectionStringType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of database.
"""
MY_SQL = "MySql"
SQL_SERVER = "SQLServer"
SQL_AZURE = "SQLAzure"
CUSTOM = "Custom"
NOTIFICATION_HUB = "NotificationHub"
SERVICE_BUS = "ServiceBus"
EVENT_HUB = "EventHub"
API_HUB = "ApiHub"
DOC_DB = "DocDb"
REDIS_CACHE = "RedisCache"
POSTGRE_SQL = "PostgreSQL"
class ContinuousWebJobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job status.
"""
INITIALIZING = "Initializing"
STARTING = "Starting"
RUNNING = "Running"
PENDING_RESTART = "PendingRestart"
STOPPED = "Stopped"
class CustomHostNameDnsRecordType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Custom DNS record type.
"""
C_NAME = "CName"
A = "A"
class DatabaseType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Database type (e.g. SqlAzure / MySql).
"""
SQL_AZURE = "SqlAzure"
MY_SQL = "MySql"
LOCAL_MY_SQL = "LocalMySql"
POSTGRE_SQL = "PostgreSql"
class DnsVerificationTestResult(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""DNS verification test result.
"""
PASSED = "Passed"
FAILED = "Failed"
SKIPPED = "Skipped"
class FrequencyUnit(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The unit of time for how often the backup should be executed (e.g. for weekly backup, this
should be set to Day and FrequencyInterval should be set to 7)
"""
DAY = "Day"
HOUR = "Hour"
class HostNameType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Hostname type.
"""
VERIFIED = "Verified"
MANAGED = "Managed"
class HostType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether the hostname is a standard or repository hostname.
"""
STANDARD = "Standard"
REPOSITORY = "Repository"
class LogLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Log level.
"""
OFF = "Off"
VERBOSE = "Verbose"
INFORMATION = "Information"
WARNING = "Warning"
ERROR = "Error"
class ManagedPipelineMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Managed pipeline mode.
"""
INTEGRATED = "Integrated"
CLASSIC = "Classic"
class ManagedServiceIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of managed service identity.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
class MSDeployLogEntryType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Log entry type
"""
MESSAGE = "Message"
WARNING = "Warning"
ERROR = "Error"
class MSDeployProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state
"""
ACCEPTED = "accepted"
RUNNING = "running"
SUCCEEDED = "succeeded"
FAILED = "failed"
CANCELED = "canceled"
class MySqlMigrationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of migration operation to be done
"""
LOCAL_TO_REMOTE = "LocalToRemote"
REMOTE_TO_LOCAL = "RemoteToLocal"
class OperationStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current status of the operation.
"""
IN_PROGRESS = "InProgress"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
TIMED_OUT = "TimedOut"
CREATED = "Created"
class PublicCertificateLocation(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Public Certificate Location
"""
CURRENT_USER_MY = "CurrentUserMy"
LOCAL_MACHINE_MY = "LocalMachineMy"
UNKNOWN = "Unknown"
class PublishingProfileFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of the format. Valid values are:
FileZilla3
WebDeploy -- default
Ftp
"""
FILE_ZILLA3 = "FileZilla3"
WEB_DEPLOY = "WebDeploy"
FTP = "Ftp"
class RouteType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of route this is:
DEFAULT - By default, every app has routes to the local address ranges specified by RFC1918
INHERITED - Routes inherited from the real Virtual Network routes
STATIC - Static route set on the app only
These values will be used for syncing an app's routes with those from a Virtual Network.
"""
DEFAULT = "DEFAULT"
INHERITED = "INHERITED"
STATIC = "STATIC"
class ScmType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""SCM type.
"""
NONE = "None"
DROPBOX = "Dropbox"
TFS = "Tfs"
LOCAL_GIT = "LocalGit"
GIT_HUB = "GitHub"
CODE_PLEX_GIT = "CodePlexGit"
CODE_PLEX_HG = "CodePlexHg"
BITBUCKET_GIT = "BitbucketGit"
BITBUCKET_HG = "BitbucketHg"
EXTERNAL_GIT = "ExternalGit"
EXTERNAL_HG = "ExternalHg"
ONE_DRIVE = "OneDrive"
VSO = "VSO"
class SiteAvailabilityState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Management information availability state for the app.
"""
NORMAL = "Normal"
LIMITED = "Limited"
DISASTER_RECOVERY_MODE = "DisasterRecoveryMode"
class SiteExtensionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Site extension type.
"""
GALLERY = "Gallery"
WEB_ROOT = "WebRoot"
class SiteLoadBalancing(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Site load balancing.
"""
WEIGHTED_ROUND_ROBIN = "WeightedRoundRobin"
LEAST_REQUESTS = "LeastRequests"
LEAST_RESPONSE_TIME = "LeastResponseTime"
WEIGHTED_TOTAL_TRAFFIC = "WeightedTotalTraffic"
REQUEST_HASH = "RequestHash"
class SslState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""SSL type.
"""
DISABLED = "Disabled"
SNI_ENABLED = "SniEnabled"
IP_BASED_ENABLED = "IpBasedEnabled"
class SupportedTlsVersions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""MinTlsVersion: configures the minimum version of TLS required for SSL requests
"""
ONE0 = "1.0"
ONE1 = "1.1"
ONE2 = "1.2"
class TriggeredWebJobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job status.
"""
SUCCESS = "Success"
FAILED = "Failed"
ERROR = "Error"
class UnauthenticatedClientAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The action to take when an unauthenticated client attempts to access the app.
"""
REDIRECT_TO_LOGIN_PAGE = "RedirectToLoginPage"
ALLOW_ANONYMOUS = "AllowAnonymous"
class UsageState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""State indicating whether the app has exceeded its quota usage. Read-only.
"""
NORMAL = "Normal"
EXCEEDED = "Exceeded"
class WebJobType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job type.
"""
CONTINUOUS = "Continuous"
TRIGGERED = "Triggered"
|
|
# Copyright (c) 2015, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import os,sys
import pickle
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import rdChemReactions, AllChem
from rdkit import Geometry
from rdkit import RDConfig
import itertools, time
test_data = [("good", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M RGP 1 2 1
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M RGP 1 1 2
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R# 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M RGP 2 1 1 2 2
M END'''),
# chemdraw style
("bad", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
("fail", '''$RXN
ISIS 052820091627
2 1
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
-3.2730 -7.0542 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0
-3.9875 -7.4667 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
V 1 halogen.bromine.aromatic
M END
$MOL
-ISIS- 05280916272D
4 3 0 0 0 0 0 0 0 0999 V2000
3.4375 -7.7917 0.0000 R3 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -7.3792 0.0000 B 0 0 0 0 0 0 0 0 0 0 0 0
4.1520 -6.5542 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
4.8664 -7.7917 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
2 3 1 0 0 0 0
1 2 1 0 0 0 0
2 4 1 0 0 0 0
V 2 boronicacid
M END
$MOL
-ISIS- 05280916272D
2 1 0 0 0 0 0 0 0 0999 V2000
11.2667 -7.3417 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
11.9811 -6.9292 0.0000 R2 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0 0 0
M END'''),
]
unused_rlabel_in_product = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
0.1604 0.3798 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.1604 -0.3798 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0
M END
$MOL
2 1 0 0 0 0 0 0 0 0999 V2000
-1.2690 -1.3345 0.0000 R 0 0 0 0 0 0 0 0 0 1 0 0
1.2690 1.3345 0.0000 R1 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0 0
M END
"""
kekule_rxn = """$RXN
bug.rxn
ChemDraw06121709062D
1 1
$MOL
RDKit 2D
6 6 0 0 0 0 0 0 0 0999 V2000
1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 2 0
3 4 1 0
4 5 2 0
5 6 1 0
6 1 2 0
M END
$MOL
RDKit 2D
6 6 0 0 0 0 0 0 0 0999 V2000
1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 -1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-1.5000 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
-0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
0.7500 1.2990 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
1 2 1 0
2 3 2 0
3 4 1 0
4 5 2 0
5 6 1 0
6 1 2 0
M END
"""
good_res = (0,0,2,1,(((0, 'halogen.bromine.aromatic'),), ((1, 'boronicacid'),)))
bad_res = (3,0,2,1,(((0, 'halogen.bromine.aromatic'),), ((1, 'boronicacid'),)))
class TestCase(unittest.TestCase) :
def test_sanitize(self):
for status, block in test_data:
print("*"*44)
rxna = AllChem.ReactionFromRxnBlock(block)
rxnb = AllChem.ReactionFromRxnBlock(block)
rxna.Initialize()
res = rdChemReactions.PreprocessReaction(rxna)
print(AllChem.ReactionToRxnBlock(rxna))
if status == "good":
self.assertEquals(res, good_res)
elif status == "bad":
self.assertEquals(res, bad_res)
print (">"*44)
rxnb.Initialize()
try:
rdChemReactions.SanitizeRxn(rxnb)
res = rdChemReactions.PreprocessReaction(rxnb)
print(AllChem.ReactionToRxnBlock(rxnb))
self.assertEquals(res, good_res)
assert not status == "fail"
except Exception:
print ("$RXN Failed")
if status == "fail":
continue
raise
def test_unused_rlabel_in_product(self):
rxn = AllChem.ReactionFromRxnBlock(unused_rlabel_in_product)
# test was for a seg fault
rdChemReactions.SanitizeRxn(rxn)
def test_only_aromatize_if_possible(self):
rxn = AllChem.ReactionFromRxnBlock(kekule_rxn)
# test was for a seg fault
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
print(groups)
self.assertFalse(len(groups))
# check normal sanitization
rdChemReactions.SanitizeRxn(rxn)
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
self.assertTrue(len(groups[0]))
# now check adjustparams with ONLY aromatize if possible
rxn = AllChem.ReactionFromRxnBlock(kekule_rxn)
rdChemReactions.SanitizeRxn(rxn)
groups = rxn.RunReactants([Chem.MolFromSmiles("c1ccccc1")])
self.assertTrue(len(groups[0]))
def test_github_4162(self):
rxn = rdChemReactions.ReactionFromSmarts(
"[C:1](=[O:2])-[OD1].[N!H0:3]>>[C:1](=[O:2])[N:3]")
rxn_copy = rdChemReactions.ChemicalReaction(rxn)
rdChemReactions.SanitizeRxn(rxn)
rdChemReactions.SanitizeRxn(rxn_copy)
pkl = rxn.ToBinary()
rxn_from_pickle = rdChemReactions.ChemicalReaction(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = pickle.dumps(rxn)
rxn_from_pickle = pickle.loads(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = rxn_from_pickle.ToBinary()
rxn_from_pickle = rdChemReactions.ChemicalReaction(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
pkl = pickle.dumps(rxn_from_pickle)
rxn_from_pickle = pickle.loads(pkl)
rdChemReactions.SanitizeRxn(rxn_from_pickle)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
import unittest
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
IS_WINDOWS = os.name == 'nt'
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'gtest_source_dir': os.path.dirname(sys.argv[0]),
'gtest_build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
global _gtest_flags_are_parsed
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('gtest_source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('gtest_build_dir'))
def GetTestExecutablePath(executable_name):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(GetBuildDir(), executable_name))
if IS_WINDOWS and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --gtest_build_dir flag or the GTEST_BUILD_DIR\n'
'environment variable. For convenient use, invoke this script via\n'
'mk_test.py.\n'
# TODO([email protected]): change mk_test.py to test.py after renaming
# the file.
'Please run mk_test.py -h for help.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards. Execution results are returned
via the following attributes:
terminated_by_sygnal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child proces exited.
output Child process's stdout and stderr output
combined in a string.
Args:
command: A command to run, in the form of sys.argv.
working_dir: A directory to change into.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
cwd=working_dir, universal_newlines=True)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
try:
if working_dir is not None:
os.chdir(working_dir)
p = popen2.Popen4(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
unittest.main()
|
|
from datetime import datetime, timedelta
import uuid
from flask.ext.login import UserMixin
from werkzeug import generate_password_hash, check_password_hash
from octopus.core import app
from octopus.modules.account.authorise import Authorise
from octopus.modules.account import dao
from octopus.lib import dataobj
class BasicAccount(dataobj.DataObj, dao.BasicAccountDAO, UserMixin):
"""
Most basic possible account, from which all other account objects must extend
{
"id" : "<unique user id>",
"email" : "<user email address (which they will use to login)>",
"password" : "<hashed password>",
"role" : ["<user role>"],
"reset_token" : "<password reset token>",
"reset_expires" : "<password reset token expiration timestamp>",
"activation_token" : "<account activation token>",
"activation_expires" : "<account activation token expiration timestamp>",
"created_date" : "<date account was created>",
"last_updated" : "<date account was last modified>"
}
"""
@property
def email(self):
return self._get_single("email", coerce=self._utf8_unicode())
@email.setter
def email(self, val):
self._set_single("email", val, coerce=self._utf8_unicode())
@property
def hashed_password(self):
return self._get_single("password", coerce=self._utf8_unicode())
@hashed_password.setter
def hashed_password(self, val):
self._set_single("password", val, coerce=self._utf8_unicode())
def set_password(self, password):
coerced = self._utf8_unicode()(password)
self._set_single("password", generate_password_hash(coerced), coerce=self._utf8_unicode())
def check_password(self, password):
coerced = self._utf8_unicode()(password)
existing = self.hashed_password
if existing is None:
return False
return check_password_hash(existing, coerced)
def clear_password(self):
self._delete("password")
@property
def reset_token(self):
return self._get_single("reset_token", coerce=self._utf8_unicode())
def set_reset_token(self, token, timeout=None, expires=None):
if expires is None and timeout is None:
raise dataobj.DataSchemaException("You must provide a timeout or an expiry date for the reset token")
if expires is None:
expires = datetime.utcnow() + timedelta(0, timeout)
if not isinstance(expires, basestring):
expires = expires.strftime("%Y-%m-%dT%H:%M:%SZ")
self._set_single("reset_token", token, coerce=self._utf8_unicode())
self._set_single("reset_expires", expires, coerce=self._date_str())
def remove_reset_token(self):
self._delete("reset_token")
self._delete("reset_expires")
@property
def reset_expires(self):
return self._get_single("reset_expires", coerce=self._date_str())
def is_reset_expired(self):
if self.reset_expires is None:
return True
ed = datetime.strptime(self.reset_expires, "%Y-%m-%dT%H:%M:%SZ")
if ed < datetime.utcnow():
return True
return False
def activate_reset_mode(self):
reset_token = uuid.uuid4().hex
self.set_reset_token(reset_token, app.config.get("ACCOUNT_RESET_TIMEOUT", 86400))
@property
def activation_token(self):
return self._get_single("activation_token", coerce=self._utf8_unicode())
def set_activation_token(self, token, timeout=None, expires=None):
if expires is None and timeout is None:
raise dataobj.DataSchemaException("You must provide a timeout or an expiry date for the activation token")
if expires is None:
expires = datetime.utcnow() + timedelta(0, timeout)
if not isinstance(expires, basestring):
expires = expires.strftime("%Y-%m-%dT%H:%M:%SZ")
self._set_single("activation_token", token, coerce=self._utf8_unicode())
self._set_single("activation_expires", expires, coerce=self._date_str())
def remove_activation_token(self):
self._delete("activation_token")
self._delete("activation_expires")
@property
def activation_expires(self):
return self._get_single("activation_expires", coerce=self._date_str())
def is_activation_expired(self):
if self.activation_expires is None:
return True
ed = datetime.strptime(self.activation_expires, "%Y-%m-%dT%H:%M:%SZ")
if ed < datetime.utcnow():
return True
return False
def activate_activation_mode(self):
activation_token = uuid.uuid4().hex
self.set_activation_token(activation_token, app.config.get("ACCOUNT_ACTIVATE_TIMEOUT", 86400))
@property
def is_super(self):
return Authorise.has_role(app.config["ACCOUNT_SUPER_USER_ROLE"], self.role)
def has_role(self, role):
return Authorise.has_role(role, self.role)
@property
def role(self):
return self._get_list("role", coerce=self._utf8_unicode())
def add_role(self, role):
self._add_to_list("role", role, coerce=self._utf8_unicode())
@role.setter
def role(self, role):
self._set_list("role", role, coerce=self._utf8_unicode())
def can_log_in(self):
return True
def remove(self):
self.delete()
class ContactableAccount(dataobj.DataObj):
"""
Extension option for the basic account which adds key user contact details
{
"id" : "<user email address>",
"name" : "<user's full name>",
"loc" : {
"lat" : <latitude>,
"lon" : <longitude>
},
"phone" : "<user's preferred phone number>",
"password" : "<hashed password>",
"role" : ["<user role>"],
"reset_token" : "<password reset token>",
"reset_expires" : "<password reset token expiration timestamp>",
"activation_token" : "<account activation token>",
"activation_expires" : "<account activation token expiration timestamp>",
"created_date" : "<date account was created>",
"last_updated" : "<date account was last modified>"
}
"""
@property
def name(self):
return self._get_single("name", coerce=self._utf8_unicode())
@name.setter
def name(self, val):
self._set_single("name", val, coerce=self._utf8_unicode(), ignore_none=True)
@property
def location(self):
return (self.lat, self.lon)
def set_location(self, lat, lon):
self._set_single("loc.lat", lat, coerce=self._float())
self._set_single("loc.lon", lon, coerce=self._float())
@location.setter
def location(self, val):
if not isinstance(val, tuple):
raise dataobj.DataSchemaException("location must be a tuple")
if len(val) != 2:
raise dataobj.DataSchemaException("location object must be a tuple of lat/lon only")
self._set_single("loc.lat", val[0], coerce=self._float())
self._set_single("loc.lon", val[1], coerce=self._float())
@location.deleter
def location(self):
self._delete("loc.lat")
self._delete("loc.lon")
@property
def lat(self):
return self._get_single("loc.lat", coerce=self._float())
@property
def lon(self):
return self._get_single("loc.lon", coerce=self._float())
@property
def phone(self):
return self._get_single("phone", coerce=self._utf8_unicode())
@phone.setter
def phone(self, val):
self._set_single("phone", val, coerce=self._utf8_unicode())
@phone.deleter
def phone(self):
self._delete("phone")
class MonitoredAccount(dataobj.DataObj):
"""
Extension for the basic account that adds delete/ban options
{
"id" : "<user email address>",
"password" : "<hashed password>",
"role" : ["<user role>"],
"reset_token" : "<password reset token>",
"reset_expires" : "<password reset token expiration timestamp>",
"activation_token" : "<account activation token>",
"activation_expires" : "<account activation token expiration timestamp>",
"created_date" : "<date account was created>",
"last_updated" : "<date account was last modified>",
"admin" : {
"deleted" : true|false,
"banned" : true|false
}
}
"""
@property
def is_deleted(self):
return self._get_single("admin.deleted", coerce=bool)
def remove(self):
self.set_deleted(True)
def set_deleted(self, val):
self._set_single("admin.deleted", val, coerce=bool)
@property
def is_banned(self):
return self._get_single("admin.banned", coerce=bool)
def set_banned(self, val):
self._set_single("admin.banned", val, coerce=bool)
def can_log_in(self):
return not (self.is_deleted or self.is_banned)
|
|
'''
Settings
========
.. versionadded:: 1.0.7
This module provides a complete and extensible framework for adding a
Settings interface to your application. By default, the interface uses
a :class:`SettingsWithSpinner`, which consists of a
:class:`~kivy.uix.spinner.Spinner` (top) to switch between individual
settings panels (bottom). See :ref:`differentlayouts` for some
alternatives.
.. image:: images/settingswithspinner_kivy.jpg
:align: center
A :class:`SettingsPanel` represents a group of configurable options. The
:attr:`SettingsPanel.title` property is used by :class:`Settings` when a panel
is added: it determines the name of the sidebar button. SettingsPanel controls
a :class:`~kivy.config.ConfigParser` instance.
The panel can be automatically constructed from a JSON definition file: you
describe the settings you want and corresponding sections/keys in the
ConfigParser instance... and you're done!
Settings are also integrated into the :class:`~kivy.app.App` class. Use
:meth:`Settings.add_kivy_panel` to configure the Kivy core settings in a panel.
.. _settings_json:
Create a panel from JSON
------------------------
To create a panel from a JSON-file, you need two things:
* a :class:`~kivy.config.ConfigParser` instance with default values
* a JSON file
.. warning::
The :class:`kivy.config.ConfigParser` is required. You cannot use the
default ConfigParser from Python libraries.
You must create and handle the :class:`~kivy.config.ConfigParser`
object. SettingsPanel will read the values from the associated
ConfigParser instance. Make sure you have set default values (using
:attr:`~kivy.config.ConfigParser.setdefaults`) for all the sections/keys
in your JSON file!
The JSON file contains structured information to describe the available
settings. Here is an example::
[
{
"type": "title",
"title": "Windows"
},
{
"type": "bool",
"title": "Fullscreen",
"desc": "Set the window in windowed or fullscreen",
"section": "graphics",
"key": "fullscreen"
}
]
Each element in the root list represents a setting that the user can configure.
Only the "type" key is mandatory: an instance of the associated class will be
created and used for the setting - other keys are assigned to corresponding
properties of that class.
============== =================================================
Type Associated class
-------------- -------------------------------------------------
title :class:`SettingTitle`
bool :class:`SettingBoolean`
numeric :class:`SettingNumeric`
options :class:`SettingOptions`
string :class:`SettingString`
path :class:`SettingPath`
============== =================================================
.. versionadded:: 1.1.0
Added :attr:`SettingPath` type
In the JSON example above, the first element is of type "title". It will create
a new instance of :class:`SettingTitle` and apply the rest of the key-value
pairs to the properties of that class, i.e. "title": "Windows" sets the
:attr:`~SettingsPanel.title` property of the panel to "Windows".
To load the JSON example to a :class:`Settings` instance, use the
:meth:`Settings.add_json_panel` method. It will automatically instantiate a
:class:`SettingsPanel` and add it to :class:`Settings`::
from kivy.config import ConfigParser
config = ConfigParser()
config.read('myconfig.ini')
s = Settings()
s.add_json_panel('My custom panel', config, 'settings_custom.json')
s.add_json_panel('Another panel', config, 'settings_test2.json')
# then use the s as a widget...
.. _differentlayouts:
Different panel layouts
-----------------------
A kivy :class:`~kivy.app.App` can automatically create and display a
:class:`Settings` instance. See the :attr:`~kivy.app.App.settings_cls`
documentation for details on how to choose which settings class to
display.
Several pre-built settings widgets are available. All except
:class:`SettingsWithNoMenu` include close buttons triggering the
on_close event.
- :class:`Settings`: Displays settings with a sidebar at the left to
switch between json panels.
- :class:`SettingsWithSidebar`: A trivial subclass of
:class:`Settings`.
- :class:`SettingsWithSpinner`: Displays settings with a spinner at
the top, which can be used to switch between json panels. Uses
:class:`InterfaceWithSpinner` as the
:attr:`~Settings.interface_cls`. This is the default behavior from
Kivy 1.8.0.
- :class:`SettingsWithTabbedPanel`: Displays json panels as individual
tabs in a :class:`~kivy.uix.tabbedpanel.TabbedPanel`. Uses
:class:`InterfaceWithTabbedPanel` as the :attr:`~Settings.interface_cls`.
- :class:`SettingsWithNoMenu`: Displays a single json panel, with no
way to switch to other panels and no close button. This makes it
impossible for the user to exit unless
:meth:`~kivy.app.App.close_settings` is overridden with a different
close trigger! Uses :class:`InterfaceWithNoMenu` as the
:attr:`~Settings.interface_cls`.
You can construct your own settings panels with any layout you choose
by setting :attr:`Settings.interface_cls`. This should be a widget
that displays a json settings panel with some way to switch between
panels. An instance will be automatically created by :class:`Settings`.
Interface widgets may be anything you like, but *must* have a method
add_panel that receives newly created json settings panels for the
interface to display. See the documentation for
:class:`InterfaceWithSidebar` for more information. They may
optionally dispatch an on_close event, for instance if a close button
is clicked. This event is used by :class:`Settings` to trigger its own
on_close event.
For a complete, working example, please see
:file:`kivy/examples/settings/main.py`.
'''
__all__ = ('Settings', 'SettingsPanel', 'SettingItem', 'SettingString',
'SettingPath', 'SettingBoolean', 'SettingNumeric', 'SettingOptions',
'SettingTitle', 'SettingsWithSidebar', 'SettingsWithSpinner',
'SettingsWithTabbedPanel', 'SettingsWithNoMenu',
'InterfaceWithSidebar', 'ContentPanel', 'MenuSidebar')
import json
import os
from kivy.factory import Factory
from kivy.metrics import dp
from kivy.config import ConfigParser
from kivy.animation import Animation
from kivy.compat import string_types, text_type
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.tabbedpanel import TabbedPanelHeader
from kivy.uix.button import Button
from kivy.uix.filechooser import FileChooserListView
from kivy.uix.scrollview import ScrollView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, StringProperty, ListProperty, \
BooleanProperty, NumericProperty, DictProperty
class SettingSpacer(Widget):
# Internal class, not documented.
pass
class SettingItem(FloatLayout):
'''Base class for individual settings (within a panel). This class cannot
be used directly; it is used for implementing the other setting classes.
It builds a row with a title/description (left) and a setting control
(right).
Look at :class:`SettingBoolean`, :class:`SettingNumeric` and
:class:`SettingOptions` for usage examples.
:Events:
`on_release`
Fired when the item is touched and then released.
'''
title = StringProperty('<No title set>')
'''Title of the setting, defaults to '<No title set>'.
:attr:`title` is a :class:`~kivy.properties.StringProperty` and defaults to
'<No title set>'.
'''
desc = StringProperty(None, allownone=True)
'''Description of the setting, rendered on the line below the title.
:attr:`desc` is a :class:`~kivy.properties.StringProperty` and defaults to
None.
'''
disabled = BooleanProperty(False)
'''Indicate if this setting is disabled. If True, all touches on the
setting item will be discarded.
:attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
section = StringProperty(None)
'''Section of the token inside the :class:`~kivy.config.ConfigParser`
instance.
:attr:`section` is a :class:`~kivy.properties.StringProperty` and defaults
to None.
'''
key = StringProperty(None)
'''Key of the token inside the :attr:`section` in the
:class:`~kivy.config.ConfigParser` instance.
:attr:`key` is a :class:`~kivy.properties.StringProperty` and defaults to
None.
'''
value = ObjectProperty(None)
'''Value of the token according to the :class:`~kivy.config.ConfigParser`
instance. Any change to this value will trigger a
:meth:`Settings.on_config_change` event.
:attr:`value` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
panel = ObjectProperty(None)
'''(internal) Reference to the SettingsPanel for this setting. You don't
need to use it.
:attr:`panel` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
content = ObjectProperty(None)
'''(internal) Reference to the widget that contains the real setting.
As soon as the content object is set, any further call to add_widget will
call the content.add_widget. This is automatically set.
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
selected_alpha = NumericProperty(0)
'''(internal) Float value from 0 to 1, used to animate the background when
the user touches the item.
:attr:`selected_alpha` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
__events__ = ('on_release', )
def __init__(self, **kwargs):
super(SettingItem, self).__init__(**kwargs)
self.value = self.panel.get_value(self.section, self.key)
def add_widget(self, *largs):
if self.content is None:
return super(SettingItem, self).add_widget(*largs)
return self.content.add_widget(*largs)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
if self.disabled:
return
touch.grab(self)
self.selected_alpha = 1
return super(SettingItem, self).on_touch_down(touch)
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
self.dispatch('on_release')
Animation(selected_alpha=0, d=.25, t='out_quad').start(self)
return True
return super(SettingItem, self).on_touch_up(touch)
def on_release(self):
pass
def on_value(self, instance, value):
if not self.section or not self.key:
return
# get current value in config
panel = self.panel
if not isinstance(value, string_types):
value = str(value)
panel.set_value(self.section, self.key, value)
class SettingBoolean(SettingItem):
'''Implementation of a boolean setting on top of a :class:`SettingItem`. It
is visualized with a :class:`~kivy.uix.switch.Switch` widget. By default,
0 and 1 are used for values: you can change them by setting :attr:`values`.
'''
values = ListProperty(['0', '1'])
'''Values used to represent the state of the setting. If you want to use
"yes" and "no" in your ConfigParser instance::
SettingBoolean(..., values=['no', 'yes'])
.. warning::
You need a minimum of two values, the index 0 will be used as False,
and index 1 as True
:attr:`values` is a :class:`~kivy.properties.ListProperty` and defaults to
['0', '1']
'''
class SettingString(SettingItem):
'''Implementation of a string setting on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it's shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
textinput = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup and
to listen for changes.
:attr:`textinput` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.fbind('on_release', self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
def _validate(self, instance):
self._dismiss()
value = self.textinput.text.strip()
self.value = value
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, None),
size=(popup_width, '250dp'))
# create the textinput used for numeric input
self.textinput = textinput = TextInput(
text=self.value, font_size='24sp', multiline=False,
size_hint_y=None, height='42sp')
textinput.bind(on_text_validate=self._validate)
self.textinput = textinput
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(textinput)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingPath(SettingItem):
'''Implementation of a Path setting on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.filechooser.FileChooserListView` so the user can enter
a custom value.
.. versionadded:: 1.1.0
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it is shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
textinput = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup and
to listen for changes.
:attr:`textinput` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
show_hidden = BooleanProperty(False)
'''Whether to show 'hidden' filenames. What that means is
operating-system-dependent.
:attr:`show_hidden` is an :class:`~kivy.properties.BooleanProperty` and
defaults to False.
.. versionadded:: 1.10.0
'''
dirselect = BooleanProperty(True)
'''Whether to allow selection of directories.
:attr:`dirselect` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
.. versionadded:: 1.10.0
'''
def on_panel(self, instance, value):
if value is None:
return
self.fbind('on_release', self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
def _validate(self, instance):
self._dismiss()
value = self.textinput.selection
if not value:
return
self.value = os.path.realpath(value[0])
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing=5)
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, 0.9),
width=popup_width)
# create the filechooser
initial_path = self.value or os.getcwd()
self.textinput = textinput = FileChooserListView(
path=initial_path, size_hint=(1, 1),
dirselect=self.dirselect, show_hidden=self.show_hidden)
textinput.bind(on_path=self._validate)
# construct the content
content.add_widget(textinput)
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingNumeric(SettingString):
'''Implementation of a numeric setting on top of a :class:`SettingString`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
def _validate(self, instance):
# we know the type just by checking if there is a '.' in the original
# value
is_float = '.' in str(self.value)
self._dismiss()
try:
if is_float:
self.value = text_type(float(self.textinput.text))
else:
self.value = text_type(int(self.textinput.text))
except ValueError:
return
class SettingOptions(SettingItem):
'''Implementation of an option list on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
list of options from which the user can select.
'''
options = ListProperty([])
'''List of all availables options. This must be a list of "string" items.
Otherwise, it will crash. :)
:attr:`options` is a :class:`~kivy.properties.ListProperty` and defaults
to [].
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it is shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.fbind('on_release', self._create_popup)
def _set_option(self, instance):
self.value = instance.text
self.popup.dismiss()
def _create_popup(self, instance):
# create the popup
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
content=content, title=self.title, size_hint=(None, None),
size=(popup_width, '400dp'))
popup.height = len(self.options) * dp(55) + dp(150)
# add all the options
content.add_widget(Widget(size_hint_y=None, height=1))
uid = str(self.uid)
for option in self.options:
state = 'down' if option == self.value else 'normal'
btn = ToggleButton(text=option, state=state, group=uid)
btn.bind(on_release=self._set_option)
content.add_widget(btn)
# finally, add a cancel button to return on the previous panel
content.add_widget(SettingSpacer())
btn = Button(text='Cancel', size_hint_y=None, height=dp(50))
btn.bind(on_release=popup.dismiss)
content.add_widget(btn)
# and open the popup !
popup.open()
class SettingTitle(Label):
'''A simple title label, used to organize the settings in sections.
'''
title = Label.text
panel = ObjectProperty(None)
class SettingsPanel(GridLayout):
'''This class is used to contruct panel settings, for use with a
:class:`Settings` instance or subclass.
'''
title = StringProperty('Default title')
'''Title of the panel. The title will be reused by the :class:`Settings` in
the sidebar.
'''
config = ObjectProperty(None, allownone=True)
'''A :class:`kivy.config.ConfigParser` instance. See module documentation
for more information.
'''
settings = ObjectProperty(None)
'''A :class:`Settings` instance that will be used to fire the
`on_config_change` event.
'''
def __init__(self, **kwargs):
if 'cols' not in kwargs:
self.cols = 1
super(SettingsPanel, self).__init__(**kwargs)
def on_config(self, instance, value):
if value is None:
return
if not isinstance(value, ConfigParser):
raise Exception('Invalid config object, you must use a'
'kivy.config.ConfigParser, not another one !')
def get_value(self, section, key):
'''Return the value of the section/key from the :attr:`config`
ConfigParser instance. This function is used by :class:`SettingItem` to
get the value for a given section/key.
If you don't want to use a ConfigParser instance, you might want to
override this function.
'''
config = self.config
if not config:
return
return config.get(section, key)
def set_value(self, section, key, value):
current = self.get_value(section, key)
if current == value:
return
config = self.config
if config:
config.set(section, key, value)
config.write()
settings = self.settings
if settings:
settings.dispatch('on_config_change',
config, section, key, value)
class InterfaceWithSidebar(BoxLayout):
'''The default Settings interface class. It displays a sidebar menu
with names of available settings panels, which may be used to switch
which one is currently displayed.
See :meth:`~InterfaceWithSidebar.add_panel` for information on the
method you must implement if creating your own interface.
This class also dispatches an event 'on_close', which is triggered
when the sidebar menu's close button is released. If creating your
own interface widget, it should also dispatch such an event which
will automatically be caught by :class:`Settings` and used to
trigger its own 'on_close' event.
'''
menu = ObjectProperty()
'''(internal) A reference to the sidebar menu widget.
:attr:`menu` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
content = ObjectProperty()
'''(internal) A reference to the panel display widget (a
:class:`ContentPanel`).
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
super(InterfaceWithSidebar, self).__init__(*args, **kwargs)
self.menu.close_button.bind(
on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:Parameters:
`panel`: :class:`SettingsPanel`
It should be stored and the interface should provide a way to
switch between panels.
`name`:
The name of the panel as a string. It may be used to represent
the panel but isn't necessarily unique.
`uid`:
A unique int identifying the panel. It should be used to
identify and switch between panels.
'''
self.menu.add_item(name, uid)
self.content.add_panel(panel, name, uid)
def on_close(self, *args):
pass
class InterfaceWithSpinner(BoxLayout):
'''A settings interface that displays a spinner at the top for
switching between panels.
The workings of this class are considered internal and are not
documented. See :meth:`InterfaceWithSidebar` for
information on implementing your own interface class.
'''
__events__ = ('on_close', )
menu = ObjectProperty()
'''(internal) A reference to the sidebar menu widget.
:attr:`menu` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
content = ObjectProperty()
'''(internal) A reference to the panel display widget (a
:class:`ContentPanel`).
:attr:`menu` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def __init__(self, *args, **kwargs):
super(InterfaceWithSpinner, self).__init__(*args, **kwargs)
self.menu.close_button.bind(
on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:Parameters:
`panel`: :class:`SettingsPanel`
It should be stored and the interface should provide a way to
switch between panels.
`name`:
The name of the panel as a string. It may be used to represent
the panel but may not be unique.
`uid`:
A unique int identifying the panel. It should be used to
identify and switch between panels.
'''
self.content.add_panel(panel, name, uid)
self.menu.add_item(name, uid)
def on_close(self, *args):
pass
class ContentPanel(ScrollView):
'''A class for displaying settings panels. It displays a single
settings panel at a time, taking up the full size and shape of the
ContentPanel. It is used by :class:`InterfaceWithSidebar` and
:class:`InterfaceWithSpinner` to display settings.
'''
panels = DictProperty({})
'''(internal) Stores a dictionary mapping settings panels to their uids.
:attr:`panels` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
container = ObjectProperty()
'''(internal) A reference to the GridLayout that contains the
settings panel.
:attr:`container` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
current_panel = ObjectProperty(None)
'''(internal) A reference to the current settings panel.
:attr:`current_panel` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
current_uid = NumericProperty(0)
'''(internal) A reference to the uid of the current settings panel.
:attr:`current_uid` is a
:class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:Parameters:
`panel`: :class:`SettingsPanel`
It should be stored and displayed when requested.
`name`:
The name of the panel as a string. It may be used to represent
the panel.
`uid`:
A unique int identifying the panel. It should be stored and
used to identify panels when switching.
'''
self.panels[uid] = panel
if not self.current_uid:
self.current_uid = uid
def on_current_uid(self, *args):
'''The uid of the currently displayed panel. Changing this will
automatically change the displayed panel.
:Parameters:
`uid`:
A panel uid. It should be used to retrieve and display
a settings panel that has previously been added with
:meth:`add_panel`.
'''
uid = self.current_uid
if uid in self.panels:
if self.current_panel is not None:
self.remove_widget(self.current_panel)
new_panel = self.panels[uid]
self.add_widget(new_panel)
self.current_panel = new_panel
return True
return False # New uid doesn't exist
def add_widget(self, widget):
if self.container is None:
super(ContentPanel, self).add_widget(widget)
else:
self.container.add_widget(widget)
def remove_widget(self, widget):
self.container.remove_widget(widget)
class Settings(BoxLayout):
'''Settings UI. Check module documentation for more information on how
to use this class.
:Events:
`on_config_change`: ConfigParser instance, section, key, value
Fired when the section's key-value pair of a ConfigParser changes.
.. warning:
value will be str/unicode type, regardless of the setting
type (numeric, boolean, etc)
`on_close`
Fired by the default panel when the Close button is pressed.
'''
interface = ObjectProperty(None)
'''(internal) Reference to the widget that will contain, organise and
display the panel configuration panel widgets.
:attr:`interface` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
interface_cls = ObjectProperty(InterfaceWithSidebar)
'''The widget class that will be used to display the graphical
interface for the settings panel. By default, it displays one Settings
panel at a time with a sidebar to switch between them.
:attr:`interface_cls` is an
:class:`~kivy.properties.ObjectProperty` and defaults to
:class:`InterfaceWithSidebar`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
__events__ = ('on_close', 'on_config_change')
def __init__(self, *args, **kargs):
self._types = {}
super(Settings, self).__init__(*args, **kargs)
self.add_interface()
self.register_type('string', SettingString)
self.register_type('bool', SettingBoolean)
self.register_type('numeric', SettingNumeric)
self.register_type('options', SettingOptions)
self.register_type('title', SettingTitle)
self.register_type('path', SettingPath)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
super(Settings, self).on_touch_down(touch)
return True
def register_type(self, tp, cls):
'''Register a new type that can be used in the JSON definition.
'''
self._types[tp] = cls
def on_close(self, *args):
pass
def add_interface(self):
'''(Internal) creates an instance of :attr:`Settings.interface_cls`,
and sets it to :attr:`~Settings.interface`. When json panels are
created, they will be added to this interface which will display them
to the user.
'''
cls = self.interface_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
interface = cls()
self.interface = interface
self.add_widget(interface)
self.interface.bind(on_close=lambda j: self.dispatch('on_close'))
def on_config_change(self, config, section, key, value):
pass
def add_json_panel(self, title, config, filename=None, data=None):
'''Create and add a new :class:`SettingsPanel` using the configuration
`config` with the JSON definition `filename`.
Check the :ref:`settings_json` section in the documentation for more
information about JSON format and the usage of this function.
'''
panel = self.create_json_panel(title, config, filename, data)
uid = panel.uid
if self.interface is not None:
self.interface.add_panel(panel, title, uid)
def create_json_panel(self, title, config, filename=None, data=None):
'''Create new :class:`SettingsPanel`.
.. versionadded:: 1.5.0
Check the documentation of :meth:`add_json_panel` for more information.
'''
if filename is None and data is None:
raise Exception('You must specify either the filename or data')
if filename is not None:
with open(filename, 'r') as fd:
data = json.loads(fd.read())
else:
data = json.loads(data)
if type(data) != list:
raise ValueError('The first element must be a list')
panel = SettingsPanel(title=title, settings=self, config=config)
for setting in data:
# determine the type and the class to use
if 'type' not in setting:
raise ValueError('One setting are missing the "type" element')
ttype = setting['type']
cls = self._types.get(ttype)
if cls is None:
raise ValueError(
'No class registered to handle the <%s> type' %
setting['type'])
# create a instance of the class, without the type attribute
del setting['type']
str_settings = {}
for key, item in setting.items():
str_settings[str(key)] = item
instance = cls(panel=panel, **str_settings)
# instance created, add to the panel
panel.add_widget(instance)
return panel
def add_kivy_panel(self):
'''Add a panel for configuring Kivy. This panel acts directly on the
kivy configuration. Feel free to include or exclude it in your
configuration.
See :meth:`~kivy.app.App.use_kivy_settings` for information on
enabling/disabling the automatic kivy panel.
'''
from kivy import kivy_data_dir
from kivy.config import Config
from os.path import join
self.add_json_panel('Kivy', Config,
join(kivy_data_dir, 'settings_kivy.json'))
class SettingsWithSidebar(Settings):
'''A settings widget that displays settings panels with a sidebar to
switch between them. This is the default behaviour of
:class:`Settings`, and this widget is a trivial wrapper subclass.
'''
class SettingsWithSpinner(Settings):
'''A settings widget that displays one settings panel at a time with a
spinner at the top to switch between them.
'''
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithSpinner
super(SettingsWithSpinner, self).__init__(*args, **kwargs)
class SettingsWithTabbedPanel(Settings):
'''A settings widget that displays settings panels as pages in a
:class:`~kivy.uix.tabbedpanel.TabbedPanel`.
'''
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithTabbedPanel
super(SettingsWithTabbedPanel, self).__init__(*args, **kwargs)
def on_close(self, *args):
pass
class SettingsWithNoMenu(Settings):
'''A settings widget that displays a single settings panel with *no*
Close button. It will not accept more than one Settings panel. It
is intended for use in programs with few enough settings that a
full panel switcher is not useful.
.. warning::
This Settings panel does *not* provide a Close
button, and so it is impossible to leave the settings screen
unless you also add other behaviour or override
:meth:`~kivy.app.App.display_settings` and
:meth:`~kivy.app.App.close_settings`.
'''
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithNoMenu
super(SettingsWithNoMenu, self).__init__(*args, **kwargs)
class InterfaceWithNoMenu(ContentPanel):
'''The interface widget used by :class:`SettingsWithNoMenu`. It
stores and displays a single settings panel.
This widget is considered internal and is not documented. See the
:class:`ContentPanel` for information on defining your own content
widget.
'''
def add_widget(self, widget):
if self.container is not None and len(self.container.children) > 0:
raise Exception(
'ContentNoMenu cannot accept more than one settings panel')
super(InterfaceWithNoMenu, self).add_widget(widget)
class InterfaceWithTabbedPanel(FloatLayout):
'''The content widget used by :class:`SettingsWithTabbedPanel`. It
stores and displays Settings panels in tabs of a TabbedPanel.
This widget is considered internal and is not documented. See
:class:`InterfaceWithSidebar` for information on defining your own
interface widget.
'''
tabbedpanel = ObjectProperty()
close_button = ObjectProperty()
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
super(InterfaceWithTabbedPanel, self).__init__(*args, **kwargs)
self.close_button.bind(on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
scrollview = ScrollView()
scrollview.add_widget(panel)
if not self.tabbedpanel.default_tab_content:
self.tabbedpanel.default_tab_text = name
self.tabbedpanel.default_tab_content = scrollview
else:
panelitem = TabbedPanelHeader(text=name, content=scrollview)
self.tabbedpanel.add_widget(panelitem)
def on_close(self, *args):
pass
class MenuSpinner(BoxLayout):
'''The menu class used by :class:`SettingsWithSpinner`. It provides a
sidebar with an entry for each settings panel.
This widget is considered internal and is not documented. See
:class:`MenuSidebar` for information on menus and creating your own menu
class.
'''
selected_uid = NumericProperty(0)
close_button = ObjectProperty(0)
spinner = ObjectProperty()
panel_names = DictProperty({})
spinner_text = StringProperty()
close_button = ObjectProperty()
def add_item(self, name, uid):
values = self.spinner.values
if name in values:
i = 2
while name + ' {}'.format(i) in values:
i += 1
name = name + ' {}'.format(i)
self.panel_names[name] = uid
self.spinner.values.append(name)
if not self.spinner.text:
self.spinner.text = name
def on_spinner_text(self, *args):
text = self.spinner_text
self.selected_uid = self.panel_names[text]
class MenuSidebar(FloatLayout):
'''The menu used by :class:`InterfaceWithSidebar`. It provides a
sidebar with an entry for each settings panel, which the user may
click to select.
'''
selected_uid = NumericProperty(0)
'''The uid of the currently selected panel. This may be used to switch
between displayed panels, e.g. by binding it to the
:attr:`~ContentPanel.current_uid` of a :class:`ContentPanel`.
:attr:`selected_uid` is a
:class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
buttons_layout = ObjectProperty(None)
'''(internal) Reference to the GridLayout that contains individual
settings panel menu buttons.
:attr:`buttons_layout` is an
:class:`~kivy.properties.ObjectProperty` and defaults to None.
'''
close_button = ObjectProperty(None)
'''(internal) Reference to the widget's Close button.
:attr:`buttons_layout` is an
:class:`~kivy.properties.ObjectProperty` and defaults to None.
'''
def add_item(self, name, uid):
'''This method is used to add new panels to the menu.
:Parameters:
`name`:
The name (a string) of the panel. It should be used
to represent the panel in the menu.
`uid`:
The name (an int) of the panel. It should be used internally
to represent the panel and used to set self.selected_uid when
the panel is changed.
'''
label = SettingSidebarLabel(text=name, uid=uid, menu=self)
if len(self.buttons_layout.children) == 0:
label.selected = True
if self.buttons_layout is not None:
self.buttons_layout.add_widget(label)
def on_selected_uid(self, *args):
'''(internal) unselects any currently selected menu buttons, unless
they represent the current panel.
'''
for button in self.buttons_layout.children:
if button.uid != self.selected_uid:
button.selected = False
class SettingSidebarLabel(Label):
# Internal class, not documented.
selected = BooleanProperty(False)
uid = NumericProperty(0)
menu = ObjectProperty(None)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
self.selected = True
self.menu.selected_uid = self.uid
if __name__ == '__main__':
from kivy.app import App
class SettingsApp(App):
def build(self):
s = Settings()
s.add_kivy_panel()
s.bind(on_close=self.stop)
return s
SettingsApp().run()
|
|
"""
@name: Modules/Core/Config/config_tools.py
@author: D. Brian Kimmel
@contact: [email protected]>
@copyright: (c) 2014-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Jul 15, 2014
@Summary: This handles config files.
mypy Modules/Core/Config/config_tools.py
"""
__updated__ = '2020-02-19'
__version_info__ = (20, 1, 1)
__version__ = '.'.join(map(str, __version_info__))
# Import system type stuff
import os
import datetime
import importlib
from typing import Any, List, Union
from ruamel.yaml import YAML
from ruamel.yaml.compat import StringIO
from ruamel.yaml.comments import CommentedMap
# Import PyMh files
from Modules.Core.Config.login import LoginInformation
from Modules.Core.Config import HostInformation, AccessInformation, RoomLocationInformation, ConfigFileInformation
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.House.Family.family import DeviceFamilyInformation, FamilyInformation
from Modules.Core.Drivers.interface import DriverInterfaceInformation, get_device_driver_Api
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.ConfigTools ')
CONFIG_SUFFIX = '.yaml'
class MyYAML(YAML):
def dump(self, data, stream=None, **kw):
inefficient = False
if stream is None:
inefficient = True
stream = StringIO()
YAML.dump(self, data, stream, **kw)
if inefficient:
return stream.getvalue()
yaml = MyYAML() # or typ='safe'/'unsafe' etc
cm = CommentedMap()
class FileLookup:
""" Get the absolute path of a config file.
"""
def _lookup_config_dir(self) -> str:
""" Find the config dir.
Future enhancement is to allow a heirarchy of possible config directories.
The ultimate top level location is '/etc/pyhouse'.
/etc/pyhouse
~/.pyhouse
./.pyhouse
This is all that is implemented as it is sutible for the Raspberry Pi as we will surely have root access.
@return: The configuration Directory ('/etc/pyhouse' is the default)
"""
return '/etc/pyhouse'
def _search_for_config_file(self, p_name: str, p_dir: str) -> Union[None, str]:
"""
@param p_name: is the file to find
@param p_dir: is the dir tree to search for the file
@return: the absolute path of the file or None if not found.
"""
# LOG.debug('Finding file:"{}" In dir:"{}"'.format(p_name, p_dir))
# print('Looking for:"{}"; in Dir:"{}"'.format(p_name, p_dir))
for l_root, _l_dirs, l_files in os.walk(p_dir):
# print('Root:{}; Dirs:{}; Files:{}'.format(l_root, _l_dirs, l_files))
if p_name in l_files:
l_path = os.path.join(l_root, p_name)
return l_path
# LOG.warning('Not Found "{}"'.format(p_name))
return None
def find_config_file(self, p_name):
""" Given a name like 'computer' or 'Computer', find any config file 'computer.yaml'.
@return: the absolute path of the file or None if not found.
"""
# LOG.debug('Finding Config file:"{}"'.format(p_name))
l_filename = p_name + CONFIG_SUFFIX
l_dir = self._lookup_config_dir()
l_ret = self._search_for_config_file(l_filename, l_dir)
return l_ret
class Tools(FileLookup):
"""
"""
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj) -> None:
LOG.debug('Init')
self.m_pyhouse_obj = p_pyhouse_obj
def get_modules_api(self, p_module_list, p_path):
""" import a list of modules all within the same path
@param p_module_list: is a list of config files to look for and import their modules
@param p_path: is the starting point to look for rhe module to import.
"""
def extract_fields(self, p_obj, p_config, required_list=[], allowed_list=[], groupfield_list=[]):
"""
@param p_obj: is the python object that will contain the config information. Must have sentinal of None.
@param p_config: is the yaml(json) fragment that contains the data
@param required_list: is a list of fields that must be in the config data
@param allowed_list: additional fields that may be in the config data.
@param groupfield_list: are fields that have sub-entries
"""
# LOG.warning('Extracting fields for obj {}'.format(type(p_obj)))
for l_key, l_value in p_config.items():
# LOG.debug('Key: {}; Value: {}'.format(l_key, l_value))
if l_key in groupfield_list:
l_extr = 'extract_' + l_key
LOG.debug('Groupfield Extracting ', l_extr)
continue
# LOG.debug('Extract - Key:"{}"; Value:"{}" '.format(l_key, l_value))
setattr(p_obj, l_key, l_value)
#
for l_key in [l_attr for l_attr in dir(p_obj) if not l_attr.startswith('_') and not callable(getattr(p_obj, l_attr))]:
# LOG.debug('Now checking key: {} - {}'.format(l_key, p_config[l_key]))
if getattr(p_obj, l_key) == None: # Key is missing
# LOG.debug('No key defined: {}'.format(l_key))
if l_key in required_list:
LOG.warning('Config entry "{}" is missing.'.format(l_key))
continue
else: # Key is Present
# LOG.debug('Key defined: {}; Value: {}'.format(l_key, l_value))
if l_key not in allowed_list + required_list:
LOG.warning('Config entry "{}" is not permitted {}.'.format(l_key, allowed_list))
continue
return p_obj
def find_module_list(self, p_modules: List) -> List:
""" Find python modules (or packages) that have a config file.
If it has a config file, it will be imported later, otherwise it is not loaded therefore saving memory.
@param p_configYamlmodules: is a list of Module/Package names to search for config files.
"""
l_list = []
# LOG.info('Search for config files for: {}'.format(p_modules))
for l_part in p_modules:
l_path = self.find_config_file(l_part.lower())
if l_path != None:
l_list.append(l_part)
LOG.info(' Found config file for "{}"'.format(l_part))
else:
LOG.info('Missing config file for "{}"'.format(l_part))
# LOG.debug('Found config files for: {}'.format(l_list))
return l_list
def _do_import(self, p_name, p_path):
""" This will import a module.
Used when we discover that the module is needed because:
It is required
Configuration calles for it.
@param p_name: is the name of the module ('pandora')
@param p_path: is the relative path to the module ('Modules.House.Entertainment.Pandora')
@return: a pointer to the module or None
"""
l_path = p_path + '.' + p_name.lower()
# LOG.debug('Importing\n\tModule: {}\n\tPath: {}'.format(p_name, l_path))
try:
l_ret = importlib.import_module(l_path)
except ImportError as e_err:
l_msg = 'PROG ERROR importing module: "{}", Path: "{}"\n\tErr:{}.'.format(p_name.lower(), l_path, e_err)
LOG.error(l_msg)
l_ret = None
# LOG.debug('Imported "{}" ({})'.format(p_name, l_path))
return l_ret
def import_module_get_api(self, p_module, p_path):
""" import a module with a path
@param p_module: is a module name ("Cameras")
@param p_path: is the starting point to look for the module to import.
@return: an initialized Api or None
"""
l_module_name = p_module
# LOG.info('Get Module Api pointer for "{}" on path "{}"'.format(l_module_name, p_path))
l_ret = self._do_import(l_module_name, p_path)
try:
# LOG.debug(PrettyFormatAny.form(l_ret, 'Module'))
l_api = l_ret.Api(self.m_pyhouse_obj)
except Exception as e_err:
LOG.error('ERROR - Initializing Module: "{}"\n\tError: {}'.format(p_module, e_err))
# LOG.error('Ref: {}'.format(PrettyFormatAny.form(l_ret, 'ModuleRef')))
l_api = None
# LOG.debug('Imported: {}'.format(l_ret))
return l_api
def import_module_list(self, p_modules: List, p_module_path: str) -> dict:
"""
This is seperate from find_module_list because sometimes extra modules have to be imported but have no config file.
@param p_module_path: the place to find the modules - e.g. 'Modules.House'
@return: a dict key=module_name, value=pointer to module Api class.
"""
l_modules: dict = {}
for l_part in p_modules:
l_path = p_module_path
if l_path.endswith('.'):
l_path = p_module_path + l_part
# LOG.debug('Starting import of Part: "{}" at "{}"'.format(l_part, l_path))
l_api = self.import_module_get_api(l_part, l_path)
l_modules[l_part.lower()] = l_api
# LOG.info('Loaded Module: {}'.format(l_modules.keys()))
return l_modules
def yaml_dump_struct(self, p_yaml: Any) -> str:
"""
"""
l_ret = '-Start- {}\n'.format(type(p_yaml))
if isinstance(p_yaml, dict):
l_ret += '-Dict- {}\tLen: {}\n'.format(type(p_yaml), len(p_yaml))
l_ret += '-Attr- {}\n'.format(dir(p_yaml))
if hasattr(p_yaml, 'ca'):
l_ret += '-attr:ca- {}\n'.format(p_yaml.ca)
elif hasattr(p_yaml, 'fa'):
l_ret += '-attr:fa- {}\n'.format(p_yaml.fa)
elif hasattr(p_yaml, 'lc'):
l_ret += '-attr:lc- {}\n'.format(p_yaml.lc)
elif hasattr(p_yaml, 'items'):
l_ret += '-attr:items- {}\n'.format(p_yaml.items)
elif hasattr(p_yaml, 'anchor'):
l_ret += '-attr:anchor- {}\n'.format(p_yaml.anchor)
elif hasattr(p_yaml, 'keys'):
l_ret += '-attr:keys- {}\n'.format(p_yaml.keys)
elif hasattr(p_yaml, 'tag'):
l_ret += '-attr:tag- {}\n'.format(p_yaml.tags)
elif hasattr(p_yaml, 'values'):
l_ret += '-attr:values- {}\n'.format(p_yaml.values)
else:
l_ret += '-noattr- {}\n'.format(dir(p_yaml))
for l_yaml in p_yaml:
self.yaml_dump_struct(p_yaml[l_yaml])
elif isinstance(p_yaml, list):
l_ret += '-List- {}\n'.format(type(p_yaml))
if hasattr(p_yaml, 'ca'):
l_ret += '-Attr\:ca- {}\n'.format(p_yaml.ca)
for _idx, l_yaml in enumerate(p_yaml):
self.yaml_dump_struct(l_yaml)
else:
l_ret += '-4- {}\n'.format(p_yaml)
for l_yaml in p_yaml:
self.yaml_dump_struct(p_yaml[l_yaml])
else:
l_ret += '-Unk-\n'
l_ret += '-5- {}\n'.format(p_yaml)
return l_ret
class SubFields(Tools):
""" Get config sub-fields such as Hosts:, Access:, Rooms: etc.
"""
def _get_name_password(self, p_config):
"""
"""
l_obj = LoginInformation()
l_required = ['Name', 'Password']
for l_key, l_value in p_config.items():
setattr(l_obj, l_key, l_value)
for l_key in [l_attr for l_attr in dir(l_obj) if not l_attr.startswith('_') and not callable(getattr(l_obj, l_attr))]:
if getattr(l_obj, l_key) == None and l_key in l_required:
LOG.warning('Pandora Yaml is missing an entry for "{}"'.format(l_key))
return l_obj
def extract_access_group(self, p_config):
"""
"""
# LOG.warning('Getting Access')
l_obj = AccessInformation()
l_required = ['Name', 'Password']
l_allowed = ['ApiKey', 'AccessKey']
self.extract_fields(l_obj, p_config, required_list=l_required, allowed_list=l_allowed)
return l_obj
def extract_family_group(self, p_config):
"""
Extract the family information when it is given.
Also, create a PyHouse_obj.House.Family entry so we can load the families that were defined in the config files.
@param p_config: is the 'Family' ordereddict
@return: the device object
"""
# LOG.warning('Getting device Family')
l_family_obj = FamilyInformation()
l_device_obj = DeviceFamilyInformation()
l_required = ['Name', 'Address']
l_allowed = ['Type']
self.extract_fields(l_device_obj, p_config, required_list=l_required, allowed_list=l_allowed)
l_key = l_device_obj.Name = l_device_obj.Name.capitalize()
l_family_obj.Name = l_device_obj.Name
# LOG.debug(PrettyFormatAny.form(l_device_obj, 'Device'))
# LOG.debug(PrettyFormatAny.form(l_family_obj, 'Family'))
if l_key not in self.m_pyhouse_obj.House.Family:
LOG.info('Adding Family: "{}"'.format(l_key))
self.m_pyhouse_obj.House.Family[l_key] = l_family_obj
return l_device_obj
def extract_host_group(self, p_config):
"""
@param p_config: is the 'Host' ordereddict
"""
# LOG.warning('Getting Host')
l_obj = HostInformation()
l_required = ['Name', 'Port']
l_allowed = ['IPv4', 'IPv6']
self.extract_fields(l_obj, p_config, required_list=l_required, allowed_list=l_allowed)
return l_obj
def extract_interface_group(self, p_config):
""" Get the Interface sub-fields
Yaml:
- Name: TestPlm
Interface:
Type: Serial
Baud: 19200,8,N,1
Port: /dev/ttyUSB0
Host: Laptop-05
"""
# LOG.warning('Getting Interface')
l_obj = DriverInterfaceInformation()
l_required = ['Type', 'Host', 'Port']
l_allowed = ['ApiKey', 'AccessKey', 'Baud']
self.extract_fields(l_obj, p_config, required_list=l_required, allowed_list=l_allowed)
#
# LOG.debug('Getting driver Api')
if l_obj.Host.lower() == self.m_pyhouse_obj.Computer.Name.lower():
l_obj._isLocal = True
l_driver = get_device_driver_Api(self.m_pyhouse_obj, l_obj)
l_obj._DriverApi = l_driver
# LOG.debug(PrettyFormatAny.form(l_obj, 'Interface'))
return l_obj
def extract_room_group(self, p_config):
"""
"""
# LOG.warning('Getting Room')
l_obj = RoomLocationInformation()
try:
for l_key, l_value in p_config.items():
# LOG.debug('RoomKey:{}; Value:{}'.format(l_key, l_value))
setattr(l_obj, l_key, l_value)
return l_obj
except:
l_obj.Name = p_config
return l_obj
class YamlCreate:
""" For creating and appending to yaml files.
Lights:
- Name: Front Door
Room: Outside
Family:
Name: Insteon
Address: 11.11.11
- Name: Garage
Room: Outside
Dimmable: true
Family:
Name: Insteon
Address: 22.22.22
ordereddict(
[
(
'Lights',
[
ordereddict(
[('Name', 'Front Door'), ('Room', 'Outside'), ('Family', ordereddict([('Name', 'Insteon'), ('Address', '11.11.11')]))]),
ordereddict([('Name', 'Garage'), ('Room', 'Outside'), ('Dimmable', True), ('Family', ordereddict([('Name', 'Insteon'), ('Address', '22.22.22')]))]
)
]
)
]
)
"""
def create_yaml(self, p_tag: str, p_value=None) -> Any:
""" create a yaml structure with a nested-map.
Yaml = ordereddict([('p_tag', p_value)])
p_tag: p_value
@param p_tag: is the top level map string
@return: ordereddict([('p_tag', None)])
"""
if p_tag == None:
LOG.error('Create requires a concrete tag (not "None") "ERROR_TAG" is used as the tag instead !')
p_tag = 'ERROR_TAG'
l_val = p_tag + ':'
if p_value != None:
l_val += ' ' + p_value
l_yaml = MyYAML()
l_yaml.indent(mapping=2, sequence=4, offset=2)
l_data = l_yaml.load(l_val)
return l_data
def XXXadd_key_value_to_map(self, p_yaml, p_key, _p_value):
""" Add a key,Value pair to a map
Test:
Key: Value
New Key: New Value <== Added
@param p_yaml: is the fragment where the addition is to go.
@param p_tag: is a list of tags to add the K,V entry below. The tags are relative to the top of the yaml fragment.
"""
p_yaml.append(p_key)
# print('Yaml: {}'.format(p_yaml))
def add_dict_entry(self, p_yaml, _p_key, p_add_dict):
""" Add a key,Value pair to a map
Test:
Key: Value
New Key: New Value <== Added
@param p_yaml: is the fragment where the addition is to go.
@param p_add_dict: is the dict to add
"""
# print('Yaml: {}'.format(p_yaml))
for l_key, l_val in p_add_dict.items():
# print('Adding: {} : {}'.format(l_key, l_val))
setattr(p_yaml, l_key, l_val)
return p_yaml
def add_list_entry(self, p_yaml, p_key, p_value=None):
"""
Insert a list entry into the list fragment that is the surrounding yaml.
@param p_yaml_frag: is the list fragment where the addition is to go.
@param p_add: is the object to add
"""
l_ix = len(p_yaml) - 2 # This is the Index where the object object needs to be inserted.
p_yaml.insert(l_ix, p_key, 'Xyz')
p_yaml[p_key] = p_value
return p_yaml
def XXXadd_obj(self, p_yaml, p_key, _p_tag):
""" Add a new ordereddict to the yaml after the Key location
@param p_yaml: is the yaml fragment that contains p_key (Rooms)
@param p_key: is the key we will add a new tag into (Room)
@param p_tag: is the
"""
l_working = p_yaml[p_key]
p_obj = l_working
print('Working: {}'.format(l_working))
for l_key in [l_attr for l_attr in dir(p_obj) if not l_attr.startswith('_') and not callable(getattr(l_working, l_attr))]:
_l_val = getattr(l_working, l_key)
# setattr(l_config, l_key, l_val)
pass
def XXXadd_to_obj(self, p_yaml, p_key, p_obj):
"""
"""
l_working = p_yaml[p_key]
# print('Working: {}'.format(l_working))
for l_key in [l_attr for l_attr in dir(p_obj) if not l_attr.startswith('_') and not callable(getattr(l_working, l_attr))]:
_l_val = getattr(l_working, l_key)
# setattr(l_config, l_key, l_val)
class YamlRead:
"""
"""
class YamlWrite:
"""
"""
def add_updated_comment(self, p_contents):
""" Add or modify a comment for when the yaml file was last updated / written.
Light: # Updated 2020-01-02
- Name: Light-01
Room: Kitchen
@param p_contents: is the formatted yaml contents.
@return: the updated contents with the added information
"""
l_ret = p_contents
# Find existing comments if any
# Insert missing comment
# Update the updated comment
return l_ret
class Yaml(YamlRead, YamlWrite, YamlCreate, Tools):
def __init__(self, p_pyhouse_obj):
"""
"""
# LOG.debug('Initializing')
self.m_pyhouse_obj = p_pyhouse_obj
def _find_config_node(self, p_filename):
""" Search the config dir to find the yaml config file.
If unit testing, we must find the file in the source tree.
@return: a ConfigFileInformation() filled in.
"""
l_filename = p_filename.lower()
l_node = ConfigFileInformation()
l_node.Name = l_filename
l_node.Path = self.find_config_file(l_filename)
self.m_pyhouse_obj._Config[l_filename] = l_node
return l_node
def find_first_element(self, p_ordered):
""" Return the first element from an ordered collection
or an arbitrary element from an unordered collection.
Raise StopIteration if the collection is empty.
"""
return next(iter(p_ordered))
def _read_yaml(self, p_filename):
""" Find the Yaml file and read it in.
Save file location
@return: a ConfigFileInformation() filled in
"""
l_node = self._find_config_node(p_filename.lower())
if l_node == None:
LOG.warning('Config file "{}" not found.'.format(p_filename))
return None
l_yaml = MyYAML(typ='rt')
l_yaml.allow_duplicate_keys = True
if l_node.Path == None:
LOG.warning('Config file "{}" was not found.'.format(p_filename))
return None
try:
with open(l_node.Path, 'r') as l_file:
l_data = l_yaml.load(l_file)
except Exception as e_err:
LOG.error('Config file read error; {}\n\tFile: "{}"'.format(e_err, p_filename))
LOG.error(PrettyFormatAny.form(l_node, 'Node'))
return None
# LOG.info('Loaded config file "{}" '.format(p_filename))
return l_data
def _write_yaml(self, p_filename, p_data, addnew=False):
"""
@param p_data: is the yaml data to be written.
@param p_filename: is the name of the read in yaml file 'rooms.yaml'
@param addnew: defaults to false, will add '-new' to the saved filename.
"""
try:
l_path = self.m_pyhouse_obj._Config[p_filename].Path
except Exception as e_err:
l_path = '/etc/pyhouse/'
LOG.error('Bad file {}'.format(e_err))
l_now = datetime.datetime.now()
p_data.insert(0, 'Skip', 'x', comment="Updated: " + str(l_now))
if addnew:
l_path += '-new'
l_yaml = MyYAML(typ='rt')
l_yaml.indent(mapping=2, sequence=4, offset=2)
l_yaml.version = (1, 2)
with open(l_path, 'w+') as l_file:
l_yaml.dump(p_data, l_file)
LOG.debug('Saved Yaml file "{}"'.format(l_path))
def _x(self):
LOG.debug(PrettyFormatAny.form(self.m_pyhouse_obj, 'Dummy'))
class Api(SubFields, Yaml):
""" This is the interface to the config system.
"""
m_pyhouse_obj = None
m_yaml = None
def __init__(self, p_pyhouse_obj):
# LOG.debug('Initializing')
self.m_pyhouse_obj = p_pyhouse_obj
self.m_yaml = Yaml(p_pyhouse_obj)
def read_config_file(self, p_filename):
""" Main config file read routine
@param p_filename: is the name of the config file to read (without .yaml)
@return: the yaml file string or None if no such config file
"""
l_ret = self.m_yaml._read_yaml(p_filename)
# LOG.debug(PrettyFormatAny.form(l_ret, 'Config'))
return l_ret
def write_config_file(self, p_filename, p_data, addnew=False):
""" Main config file write routine
"""
l_ret = self.m_yaml._write_yaml(p_filename, p_data, addnew)
return l_ret
def read_device_config(self):
"""
"""
def write_device_config(self):
"""
"""
# ## END DBK
|
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2017-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
from edb.testbase import server as tb
class TestEdgeQLLinkToScalarTypes(tb.QueryTestCase):
'''The scope is to test unusual scalar links.'''
SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',
'inventory.esdl')
SETUP = os.path.join(os.path.dirname(__file__), 'schemas',
'inventory_setup.edgeql')
async def test_edgeql_links_basic_02(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
tag_set1,
tag_set2,
tag_array,
} ORDER BY .name;
''',
[
{
'name': 'ball',
'tag_set1': {'plastic', 'round'},
'tag_set2': {'plastic', 'round'},
'tag_array': None,
}, {
'name': 'chair',
'tag_set1': {'wood', 'rectangle'},
'tag_set2': [],
'tag_array': ['wood', 'rectangle'],
}, {
'name': 'ectoplasm',
'tag_set1': [],
'tag_set2': [],
'tag_array': None,
}, {
'name': 'floor lamp',
'tag_set1': {'metal', 'plastic'},
'tag_set2': {'metal', 'plastic'},
'tag_array': ['metal', 'plastic'],
}, {
'name': 'mystery toy',
'tag_set1': [],
'tag_set2': [],
'tag_array': None,
}, {
'name': 'table',
'tag_set1': {'wood', 'rectangle'},
'tag_set2': {'wood', 'rectangle'},
'tag_array': ['wood', 'rectangle'],
}, {
'name': 'teapot',
'tag_set1': [],
'tag_set2': [],
'tag_array': ['ceramic', 'round'],
}, {
'name': 'tv',
'tag_set1': [],
'tag_set2': {'plastic', 'rectangle'},
'tag_array': ['plastic', 'rectangle'],
},
]
)
async def test_edgeql_links_map_scalars_01(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
tag_set1 ORDER BY Item.tag_set1 DESC,
tag_set2 ORDER BY Item.tag_set2 ASC,
} ORDER BY .name;
''',
[
{
'name': 'ball',
'tag_set1': ['round', 'plastic'],
'tag_set2': ['plastic', 'round'],
}, {
'name': 'chair',
'tag_set1': ['wood', 'rectangle'],
'tag_set2': [],
}, {
'name': 'ectoplasm',
'tag_set1': [],
'tag_set2': [],
}, {
'name': 'floor lamp',
'tag_set1': ['plastic', 'metal'],
'tag_set2': ['metal', 'plastic'],
}, {
'name': 'mystery toy',
'tag_set1': [],
'tag_set2': [],
}, {
'name': 'table',
'tag_set1': ['wood', 'rectangle'],
'tag_set2': ['rectangle', 'wood'],
}, {
'name': 'teapot',
'tag_set1': [],
'tag_set2': [],
}, {
'name': 'tv',
'tag_set1': [],
'tag_set2': ['plastic', 'rectangle'],
},
]
)
async def test_edgeql_links_map_scalars_02(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
tag_set1 ORDER BY Item.tag_set1 DESC LIMIT 1,
tag_set2 ORDER BY Item.tag_set2 ASC OFFSET 1,
} ORDER BY .name;
''',
[
{
'name': 'ball',
'tag_set1': ['round'],
'tag_set2': ['round'],
}, {
'name': 'chair',
'tag_set1': ['wood'],
'tag_set2': [],
}, {
'name': 'ectoplasm',
'tag_set1': [],
'tag_set2': [],
}, {
'name': 'floor lamp',
'tag_set1': ['plastic'],
'tag_set2': ['plastic'],
}, {
'name': 'mystery toy',
'tag_set1': [],
'tag_set2': [],
}, {
'name': 'table',
'tag_set1': ['wood'],
'tag_set2': ['wood'],
}, {
'name': 'teapot',
'tag_set1': [],
'tag_set2': [],
}, {
'name': 'tv',
'tag_set1': [],
'tag_set2': ['rectangle'],
},
]
)
async def test_edgeql_links_map_scalars_03(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
tag_set1 FILTER Item.tag_set1 > 'p',
tag_set2 FILTER Item.tag_set2 < 'w',
} ORDER BY .name;
''',
[
{
'name': 'ball',
'tag_set1': {'plastic', 'round'},
'tag_set2': {'plastic', 'round'},
}, {
'name': 'chair',
'tag_set1': {'wood', 'rectangle'},
'tag_set2': [],
}, {
'name': 'ectoplasm',
'tag_set1': [],
'tag_set2': [],
}, {
'name': 'floor lamp',
'tag_set1': {'plastic'},
'tag_set2': {'metal', 'plastic'},
}, {
'name': 'mystery toy',
'tag_set1': [],
'tag_set2': [],
}, {
'name': 'table',
'tag_set1': {'wood', 'rectangle'},
'tag_set2': {'rectangle'},
}, {
'name': 'teapot',
'tag_set1': [],
'tag_set2': [],
}, {
'name': 'tv',
'tag_set1': [],
'tag_set2': {'plastic', 'rectangle'},
},
]
)
async def test_edgeql_links_set_01(self):
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER 'plastic' = .tag_set1
ORDER BY .name;
''',
[
{'name': 'ball'},
{'name': 'floor lamp'},
]
)
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER 'plastic' = .tag_set2
ORDER BY .name;
''',
[
{'name': 'ball'},
{'name': 'floor lamp'},
{'name': 'tv'},
]
)
async def test_edgeql_links_set_02(self):
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER 'plastic' IN .tag_set1
ORDER BY .name;
''',
[
{'name': 'ball'},
{'name': 'floor lamp'},
]
)
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER 'plastic' IN .tag_set2
ORDER BY .name;
''',
[
{'name': 'ball'},
{'name': 'floor lamp'},
{'name': 'tv'},
]
)
async def test_edgeql_links_set_03(self):
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER
array_agg(Item.tag_set1 ORDER BY Item.tag_set1) =
['rectangle', 'wood']
ORDER BY .name;
''',
[
{'name': 'chair'},
{'name': 'table'},
]
)
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER
array_agg(Item.tag_set2 ORDER BY Item.tag_set2) =
['rectangle', 'wood']
ORDER BY .name;
''',
[
{'name': 'table'},
]
)
async def test_edgeql_links_set_04(self):
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER .tag_set1 = {'rectangle', 'wood'}
ORDER BY .name;
''',
[
{'name': 'chair'},
{'name': 'table'},
]
)
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER .tag_set2 = {'rectangle', 'wood'}
ORDER BY .name;
''',
[
{'name': 'table'},
{'name': 'tv'},
]
)
async def test_edgeql_links_set_05(self):
await self.assert_query_result(
r'''
# subsets
#
SELECT Item {name}
FILTER .tag_set1 IN {'rectangle', 'wood'}
ORDER BY .name;
''',
[
{'name': 'chair'},
{'name': 'table'},
]
)
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER .tag_set2 IN {'rectangle', 'wood'}
ORDER BY .name;
''',
[
{'name': 'table'},
{'name': 'tv'},
]
)
async def test_edgeql_links_set_06(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
foo := (
# XXX: check test_edgeql_expr_alias for failures first
SELECT _ := Item.tag_set1
FILTER _ = {'rectangle', 'wood'}
),
bar := (
# XXX: check test_edgeql_expr_alias for failures first
SELECT _ := Item.tag_set2
FILTER _ = {'rectangle', 'wood'}
),
}
ORDER BY .name;
''',
[
{
'name': 'ball',
'foo': [],
'bar': [],
}, {
'name': 'chair',
'foo': {'wood', 'rectangle'},
'bar': [],
}, {
'name': 'ectoplasm',
'foo': [],
'bar': [],
}, {
'name': 'floor lamp',
'foo': [],
'bar': [],
}, {
'name': 'mystery toy',
'foo': [],
'bar': [],
}, {
'name': 'table',
'foo': {'wood', 'rectangle'},
'bar': {'wood', 'rectangle'},
}, {
'name': 'teapot',
'foo': [],
'bar': [],
}, {
'name': 'tv',
'foo': [],
'bar': {'rectangle'},
},
],
)
async def test_edgeql_links_set_07(self):
await self.assert_query_result(
r'''
# subsets
SELECT Item {name}
FILTER count( (
# XXX: check test_edgeql_expr_alias for failures first
SELECT _ := Item.tag_set1
FILTER _ IN {'rectangle', 'wood'}
)) = 2
ORDER BY .name;
''',
[
{'name': 'chair'},
{'name': 'table'},
]
)
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER count( (
# XXX: check test_edgeql_expr_alias for failures first
SELECT _ := Item.tag_set2
FILTER _ IN {'rectangle', 'wood'}
)) = 2
ORDER BY .name;
''',
[
{'name': 'table'},
],
)
async def test_edgeql_links_set_08(self):
await self.assert_query_result(
r'''
# match sets
WITH
cmp := {'rectangle', 'wood'},
cmp_count := count(cmp)
SELECT Item {name}
FILTER
cmp_count = count(Item.tag_set1)
AND
cmp_count = count(DISTINCT (Item.tag_set1 UNION cmp))
ORDER BY .name;
''',
[
{'name': 'chair'},
{'name': 'table'},
]
)
await self.assert_query_result(
r'''
WITH
cmp := {'rectangle', 'wood'},
cmp_count := count(cmp)
SELECT Item {name}
FILTER
cmp_count = count(.tag_set2)
AND
cmp_count = count(DISTINCT (.tag_set2 UNION cmp))
ORDER BY .name;
''',
[
{'name': 'table'},
],
)
async def test_edgeql_links_set_10(self):
await self.assert_query_result(
r'''
# same as previous, but with a different syntax, leading
# to a different failure scenario
WITH
cmp := {'rectangle', 'wood'},
cmp_count := count(cmp)
# includes tag_set1 in the shape
SELECT Item {name, tag_set1}
FILTER
cmp_count = count(Item.tag_set1)
AND
cmp_count = count(DISTINCT (Item.tag_set1 UNION cmp))
ORDER BY .name;
''',
[
{'name': 'chair', 'tag_set1': {'rectangle', 'wood'}},
{'name': 'table', 'tag_set1': {'rectangle', 'wood'}},
]
)
await self.assert_query_result(
r'''
WITH
cmp := {'rectangle', 'wood'},
cmp_count := count(cmp)
# includes tag_set1 in the shape
SELECT Item {name, tag_set2}
FILTER
cmp_count = count(Item.tag_set2)
AND
cmp_count = count(DISTINCT (Item.tag_set2 UNION cmp))
ORDER BY .name;
''',
[
{'name': 'table', 'tag_set2': {'rectangle', 'wood'}},
],
)
async def test_edgeql_links_set_11(self):
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER
array_agg(Item.tag_set1 ORDER BY Item.tag_set1) =
array_agg(Item.tag_set2 ORDER BY Item.tag_set2)
ORDER BY .name;
''',
[
{'name': 'ball'},
{'name': 'ectoplasm'},
{'name': 'floor lamp'},
{'name': 'mystery toy'},
{'name': 'table'},
{'name': 'teapot'},
],
)
async def test_edgeql_links_set_12(self):
await self.assert_query_result(
r'''
# find an item with a unique quality
WITH
I2 := Item
SELECT Item {
name,
unique := (
SELECT _ := Item.tag_set1
FILTER _ NOT IN (
(SELECT I2 FILTER I2 != Item).tag_set1
)
)
}
ORDER BY .name;
''',
[
{
'name': 'ball',
'unique': ['round']
}, {
'name': 'chair',
'unique': []
}, {
'name': 'ectoplasm',
'unique': []
}, {
'name': 'floor lamp',
'unique': ['metal']
}, {
'name': 'mystery toy',
'unique': []
}, {
'name': 'table',
'unique': []
}, {
'name': 'teapot',
'unique': []
}, {
'name': 'tv',
'unique': []
},
],
)
async def test_edgeql_links_set_13(self):
await self.assert_query_result(
r'''
# find an item with a unique quality
WITH
I2 := Item
SELECT Item {
name,
unique := count( (
SELECT _ := Item.tag_set1
FILTER _ NOT IN (
(SELECT I2 FILTER I2 != Item).tag_set1
)
))
}
FILTER .unique > 0
ORDER BY .name;
''',
[
{
'name': 'ball',
'unique': 1,
}, {
'name': 'floor lamp',
'unique': 1,
},
],
)
async def test_edgeql_links_set_14(self):
await self.assert_query_result(
r'''
# find an item with a unique quality
WITH
I2 := Item
SELECT Item {
name,
unique := (
# XXX: check test_edgeql_expr_alias for failures first
SELECT _ := Item.tag_set1
FILTER _ NOT IN (
(SELECT I2 FILTER I2 != Item).tag_set1
)
)
}
FILTER count(.unique) > 0
ORDER BY .name;
''',
[
{
'name': 'ball',
'unique': ['round'],
}, {
'name': 'floor lamp',
'unique': ['metal'],
},
],
)
async def test_edgeql_links_set_15(self):
await self.assert_query_result(
r'''
# subsets
SELECT Item {name}
FILTER .tag_set1 IN {'wood', 'plastic'}
ORDER BY count((
SELECT _ := Item.tag_set1
FILTER _ IN {'rectangle', 'plastic', 'wood'}
)) DESC THEN .name;
''',
[
{'name': 'chair'},
{'name': 'table'},
{'name': 'ball'},
{'name': 'floor lamp'},
],
)
async def test_edgeql_links_array_01(self):
await self.assert_query_result(
r'''
# just a simple unpack
SELECT Item {
name,
unpack := (SELECT array_unpack(Item.tag_array))
}
ORDER BY .name;
''',
[
{
'name': 'ball',
'unpack': []
}, {
'name': 'chair',
'unpack': {'rectangle', 'wood'}
}, {
'name': 'ectoplasm',
'unpack': []
}, {
'name': 'floor lamp',
'unpack': {'metal', 'plastic'}
}, {
'name': 'mystery toy',
'unpack': []
}, {
'name': 'table',
'unpack': {'rectangle', 'wood'}
}, {
'name': 'teapot',
'unpack': {'ceramic', 'round'}
}, {
'name': 'tv',
'unpack': {'plastic', 'rectangle'}
},
],
)
async def test_edgeql_links_array_02(self):
await self.assert_query_result(
r'''
# just a simple unpack
SELECT Item {
name,
unpack := array_unpack(Item.tag_array)
}
ORDER BY .name;
''',
[
{
'name': 'ball',
'unpack': []
}, {
'name': 'chair',
'unpack': {'rectangle', 'wood'}
}, {
'name': 'ectoplasm',
'unpack': []
}, {
'name': 'floor lamp',
'unpack': {'metal', 'plastic'}
}, {
'name': 'mystery toy',
'unpack': []
}, {
'name': 'table',
'unpack': {'rectangle', 'wood'}
}, {
'name': 'teapot',
'unpack': {'ceramic', 'round'}
}, {
'name': 'tv',
'unpack': {'plastic', 'rectangle'}
},
],
)
async def test_edgeql_links_array_03(self):
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER 'metal' IN array_unpack(.tag_array)
ORDER BY .name;
''',
[
{'name': 'floor lamp'}
],
)
async def test_edgeql_links_array_04(self):
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER 'metal' = array_unpack(.tag_array)
ORDER BY .name;
''',
[
{'name': 'floor lamp'}
],
)
async def test_edgeql_links_array_05(self):
await self.assert_query_result(
r'''
SELECT Item {name}
# array_get is used to safely default to {}
FILTER array_get(.tag_array, 0) = 'metal'
ORDER BY .name;
''',
[
{'name': 'floor lamp'}
],
)
async def test_edgeql_links_array_06(self):
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER .tag_array = ['metal', 'plastic']
ORDER BY .name;
''',
[
{'name': 'floor lamp'}
],
)
async def test_edgeql_links_array_07(self):
await self.assert_query_result(
r'''
SELECT Item {name}
FILTER NOT EXISTS .tag_array
ORDER BY .name;
''',
[
{'name': 'ball'},
{'name': 'ectoplasm'},
{'name': 'mystery toy'},
],
)
async def test_edgeql_links_array_08(self):
await self.assert_query_result(
r'''
SELECT Item {name}
# no item has 3 elements
FILTER NOT EXISTS array_get(.tag_array, 3)
ORDER BY .name;
''',
[
{'name': 'ball'},
{'name': 'chair'},
{'name': 'ectoplasm'},
{'name': 'floor lamp'},
{'name': 'mystery toy'},
{'name': 'table'},
{'name': 'teapot'},
{'name': 'tv'},
],
)
async def test_edgeql_links_array_09(self):
await self.assert_query_result(
r'''
# find an item with a unique quality
WITH
I2 := Item
SELECT Item {
name,
unique := (
SELECT _ := array_unpack(Item.tag_array)
FILTER _ NOT IN (
SELECT array_unpack(
(SELECT I2 FILTER I2 != Item).tag_array
)
)
)
}
ORDER BY .name;
''',
[
{
'name': 'ball',
'unique': []
}, {
'name': 'chair',
'unique': []
}, {
'name': 'ectoplasm',
'unique': []
}, {
'name': 'floor lamp',
'unique': {'metal'}
}, {
'name': 'mystery toy',
'unique': []
}, {
'name': 'table',
'unique': []
}, {
'name': 'teapot',
'unique': {'ceramic', 'round'}
}, {
'name': 'tv',
'unique': []
},
],
)
async def test_edgeql_links_array_10(self):
await self.assert_query_result(
r'''
# find an item with a unique quality
WITH
I2 := Item
SELECT Item {
name,
unique := (
SELECT _ := array_unpack(Item.tag_array)
FILTER _ NOT IN (
SELECT array_unpack(
(SELECT I2 FILTER I2 != Item).tag_array
)
)
)
}
FILTER count(.unique) > 0
ORDER BY .name;
''',
[
{
'name': 'floor lamp',
'unique': {'metal'}
}, {
'name': 'teapot',
'unique': {'ceramic', 'round'}
},
],
)
async def test_edgeql_links_array_11(self):
await self.assert_query_result(
r'''
# find an item with ALL unique qualities
WITH
I2 := Item
SELECT Item {
name,
tag_array,
}
FILTER
# such that has tag_array
EXISTS Item.tag_array AND
# and such that does not exist
NOT EXISTS (
# another item
SELECT I2
FILTER
# different from current one
I2 != Item
AND
# matching at least one tag
array_unpack(I2.tag_array) =
array_unpack(Item.tag_array)
)
ORDER BY .name;
''',
[
{
'name': 'teapot',
'tag_array': {'ceramic', 'round'}
},
],
)
async def test_edgeql_links_derived_tuple_01(self):
await self.assert_query_result(
r'''
SELECT Item {
n1 := (Item.name,),
n2 := (Item.name,).0,
t1 := (Item.tag_set1,),
t2 := (Item.tag_set1, Item.tag_set2),
t3 := (Item.tag_set1,).0,
t4 := (Item.tag_set1, Item.tag_set2).1,
}
FILTER .name IN {'chair', 'table'}
ORDER BY .name;
''',
[
{
'n1': ['chair'],
'n2': 'chair',
't1': [['rectangle'], ['wood']],
't2': [],
't3': ['rectangle', 'wood'],
't4': [],
},
{
'n1': ['table'],
'n2': 'table',
't1': [['rectangle'], ['wood']],
't2': [['rectangle', 'rectangle'], ['rectangle', 'wood'],
['wood', 'rectangle'], ['wood', 'wood']],
't3': ['rectangle', 'wood'],
't4': ['rectangle', 'rectangle', 'wood', 'wood'],
},
],
sort={
# sort the data
't1': lambda x: x[0],
't2': lambda x: (x[0], x[1]),
't3': lambda x: x,
't4': lambda x: x,
}
)
async def test_edgeql_links_derived_tuple_02(self):
await self.assert_query_result(
r'''
SELECT Item {
n1 := (Item.name, 'foo'),
}
FILTER
.n1.0 = 'chair'
ORDER BY
.name;
''',
[
{
'n1': ['chair', 'foo'],
},
],
)
async def test_edgeql_links_derived_array_01(self):
await self.assert_query_result(
r'''
SELECT Item {
n1 := [Item.name],
n2 := [Item.name][0],
t1 := [Item.tag_set1],
t2 := [Item.tag_set1, Item.tag_set2],
t3 := [Item.tag_set1][0],
t4 := [Item.tag_set1, Item.tag_set2][1],
a1 := Item.tag_array,
a2 := Item.tag_array[0],
}
FILTER .name IN {'chair', 'table'}
ORDER BY .name;
''',
[
{
'n1': ['chair'],
'n2': 'chair',
't1': [['rectangle'], ['wood']],
't2': [],
't3': ['rectangle', 'wood'],
't4': [],
'a1': ['wood', 'rectangle'],
'a2': 'wood',
},
{
'n1': ['table'],
'n2': 'table',
't1': [['rectangle'], ['wood']],
't2': [['rectangle', 'rectangle'], ['rectangle', 'wood'],
['wood', 'rectangle'], ['wood', 'wood']],
't3': ['rectangle', 'wood'],
't4': ['rectangle', 'rectangle', 'wood', 'wood'],
'a1': ['wood', 'rectangle'],
'a2': 'wood',
},
],
sort={
# sort the data
't1': lambda x: x[0],
't2': lambda x: (x[0], x[1]),
't3': lambda x: x,
't4': lambda x: x,
}
)
async def test_edgeql_links_derived_array_02(self):
await self.assert_query_result(
r'''
SELECT Item {
n1 := [Item.name],
n2 := array_get([Item.name], 0),
t1 := [Item.tag_set1],
t2 := [Item.tag_set1, Item.tag_set2],
t3 := array_get([Item.tag_set1], 0),
t4 := array_get([Item.tag_set1, Item.tag_set2], 1),
a1 := Item.tag_array,
a2 := array_get(Item.tag_array, 0),
}
FILTER .name IN {'chair', 'table'}
ORDER BY .name;
''',
[
{
'n1': ['chair'],
'n2': 'chair',
't1': [['rectangle'], ['wood']],
't2': [],
't3': ['rectangle', 'wood'],
't4': [],
'a1': ['wood', 'rectangle'],
'a2': 'wood',
},
{
'n1': ['table'],
'n2': 'table',
't1': [['rectangle'], ['wood']],
't2': [['rectangle', 'rectangle'], ['rectangle', 'wood'],
['wood', 'rectangle'], ['wood', 'wood']],
't3': ['rectangle', 'wood'],
't4': ['rectangle', 'rectangle', 'wood', 'wood'],
'a1': ['wood', 'rectangle'],
'a2': 'wood',
},
],
sort={
# sort the data
't1': lambda x: x[0],
't2': lambda x: (x[0], x[1]),
't3': lambda x: x,
't4': lambda x: x,
}
)
async def test_edgeql_links_derived_array_03(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
a_a1 := Item.tag_array[{0, 1}],
a_t2 := [Item.tag_set1, Item.tag_set2][{0, 1}],
}
FILTER .name IN {'chair', 'table'}
ORDER BY .name;
''',
[
{
'name': 'chair',
'a_a1': ['rectangle', 'wood'],
'a_t2': [],
},
{
'name': 'table',
'a_a1': ['rectangle', 'wood'],
'a_t2': ['rectangle', 'rectangle', 'rectangle',
'rectangle', 'wood', 'wood', 'wood', 'wood'],
},
],
sort={
# sort the data
'a_a1': lambda x: x,
'a_t2': lambda x: x,
}
)
async def test_edgeql_links_derived_array_04(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
a_a1 := array_get(Item.tag_array, {0, 1}),
a_t2 := array_get([Item.tag_set1, Item.tag_set2], {0, 1}),
}
FILTER .name IN {'chair', 'table'}
ORDER BY .name;
''',
[
{
'name': 'chair',
'a_a1': ['rectangle', 'wood'],
'a_t2': [],
},
{
'name': 'table',
'a_a1': ['rectangle', 'wood'],
'a_t2': ['rectangle', 'rectangle', 'rectangle',
'rectangle', 'wood', 'wood', 'wood', 'wood'],
},
],
sort={
# sort the data
'a_a1': lambda x: x,
'a_t2': lambda x: x,
}
)
async def test_edgeql_links_derived_array_05(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
a_a1 := array_get(Item.tag_array, {0, 2}),
a_t2 := array_get([Item.tag_set1, Item.tag_set2], {0, 2}),
}
FILTER .name IN {'ball', 'chair', 'table'}
ORDER BY .name;
''',
[
{
'name': 'ball',
'a_a1': [],
'a_t2': ['plastic', 'plastic', 'round', 'round'],
},
{
'name': 'chair',
'a_a1': ['wood'],
'a_t2': [],
},
{
'name': 'table',
'a_a1': ['wood'],
'a_t2': ['rectangle', 'rectangle', 'wood', 'wood'],
},
],
sort={
# sort the data
'a_a1': lambda x: x,
'a_t2': lambda x: x,
}
)
async def test_edgeql_links_derived_array_06(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
a_a1 := Item.tag_array[1:20],
a_t2 := [Item.tag_set1, Item.tag_set2][1:20],
}
FILTER .name IN {'ball', 'chair', 'table'}
ORDER BY .name;
''',
[
{
'name': 'ball',
'a_a1': None,
'a_t2': [['plastic'], ['plastic'], ['round'], ['round']],
},
{
'name': 'chair',
'a_a1': ['rectangle'],
'a_t2': [],
},
{
'name': 'table',
'a_a1': ['rectangle'],
'a_t2': [['rectangle'], ['rectangle'], ['wood'], ['wood']],
}
],
sort={
# sort the data
'a_a1': lambda x: x,
'a_t2': lambda x: x[0],
}
)
async def test_edgeql_links_derived_array_07(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
a_a1 := Item.tag_array[{1, 2}:20],
a_t2 := [Item.tag_set1, Item.tag_set2][{1, 2}:20],
}
FILTER .name IN {'ball', 'chair', 'table'}
ORDER BY .name;
''',
[
{
'name': 'ball',
'a_a1': [], # empty set of arrays
'a_t2': [[], [], [], [],
['plastic'], ['plastic'], ['round'], ['round']],
},
{
'name': 'chair',
'a_a1': [[], ['rectangle']],
'a_t2': [], # empty set of arrays
},
{
'name': 'table',
'a_a1': [[], ['rectangle']],
'a_t2': [[], [], [], [],
['rectangle'], ['rectangle'], ['wood'], ['wood']],
}
],
sort={
# sort the data
'a_a1': lambda x: x[0] if x else '',
'a_t2': lambda x: x[0] if x else '',
}
)
async def test_edgeql_links_derived_array_08(self):
await self.assert_query_result(
r'''
SELECT Item {
name,
re := re_match(Item.tag_set1, Item.tag_set2),
}
FILTER .name IN {'chair', 'table'}
ORDER BY .name;
''',
[
{
'name': 'chair',
're': [],
},
{
'name': 'table',
're': [['rectangle'], ['wood']],
}
],
sort={
# sort the data
're': lambda x: x[0],
}
)
|
|
"""Authentication for HTTP component."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from datetime import timedelta
from ipaddress import ip_address
import logging
import secrets
from typing import Final
from urllib.parse import unquote
from aiohttp import hdrs
from aiohttp.web import Application, Request, StreamResponse, middleware
import jwt
from homeassistant.auth.const import GROUP_ID_READ_ONLY
from homeassistant.auth.models import User
from homeassistant.components import websocket_api
from homeassistant.core import HomeAssistant, callback
from homeassistant.util import dt as dt_util
from homeassistant.util.network import is_local
from .const import KEY_AUTHENTICATED, KEY_HASS_REFRESH_TOKEN_ID, KEY_HASS_USER
from .request_context import current_request
_LOGGER = logging.getLogger(__name__)
DATA_API_PASSWORD: Final = "api_password"
DATA_SIGN_SECRET: Final = "http.auth.sign_secret"
SIGN_QUERY_PARAM: Final = "authSig"
STORAGE_VERSION = 1
STORAGE_KEY = "http.auth"
CONTENT_USER_NAME = "Home Assistant Content"
@callback
def async_sign_path(
hass: HomeAssistant,
path: str,
expiration: timedelta,
*,
refresh_token_id: str | None = None,
) -> str:
"""Sign a path for temporary access without auth header."""
if (secret := hass.data.get(DATA_SIGN_SECRET)) is None:
secret = hass.data[DATA_SIGN_SECRET] = secrets.token_hex()
if refresh_token_id is None:
if connection := websocket_api.current_connection.get():
refresh_token_id = connection.refresh_token_id
elif (
request := current_request.get()
) and KEY_HASS_REFRESH_TOKEN_ID in request:
refresh_token_id = request[KEY_HASS_REFRESH_TOKEN_ID]
else:
refresh_token_id = hass.data[STORAGE_KEY]
now = dt_util.utcnow()
encoded = jwt.encode(
{
"iss": refresh_token_id,
"path": unquote(path),
"iat": now,
"exp": now + expiration,
},
secret,
algorithm="HS256",
)
return f"{path}?{SIGN_QUERY_PARAM}={encoded}"
@callback
def async_user_not_allowed_do_auth(
hass: HomeAssistant, user: User, request: Request | None = None
) -> str | None:
"""Validate that user is not allowed to do auth things."""
if not user.is_active:
return "User is not active"
if not user.local_only:
return None
# User is marked as local only, check if they are allowed to do auth
if request is None:
request = current_request.get()
if not request:
return "No request available to validate local access"
if "cloud" in hass.config.components:
# pylint: disable=import-outside-toplevel
from hass_nabucasa import remote
if remote.is_cloud_request.get():
return "User is local only"
try:
remote = ip_address(request.remote)
except ValueError:
return "Invalid remote IP"
if is_local(remote):
return None
return "User cannot authenticate remotely"
async def async_setup_auth(hass: HomeAssistant, app: Application) -> None:
"""Create auth middleware for the app."""
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
if (data := await store.async_load()) is None:
data = {}
refresh_token = None
if "content_user" in data:
user = await hass.auth.async_get_user(data["content_user"])
if user and user.refresh_tokens:
refresh_token = list(user.refresh_tokens.values())[0]
if refresh_token is None:
user = await hass.auth.async_create_system_user(
CONTENT_USER_NAME, group_ids=[GROUP_ID_READ_ONLY]
)
refresh_token = await hass.auth.async_create_refresh_token(user)
data["content_user"] = user.id
await store.async_save(data)
hass.data[STORAGE_KEY] = refresh_token.id
async def async_validate_auth_header(request: Request) -> bool:
"""
Test authorization header against access token.
Basic auth_type is legacy code, should be removed with api_password.
"""
try:
auth_type, auth_val = request.headers.get(hdrs.AUTHORIZATION, "").split(
" ", 1
)
except ValueError:
# If no space in authorization header
return False
if auth_type != "Bearer":
return False
refresh_token = await hass.auth.async_validate_access_token(auth_val)
if refresh_token is None:
return False
if async_user_not_allowed_do_auth(hass, refresh_token.user, request):
return False
request[KEY_HASS_USER] = refresh_token.user
request[KEY_HASS_REFRESH_TOKEN_ID] = refresh_token.id
return True
async def async_validate_signed_request(request: Request) -> bool:
"""Validate a signed request."""
if (secret := hass.data.get(DATA_SIGN_SECRET)) is None:
return False
if (signature := request.query.get(SIGN_QUERY_PARAM)) is None:
return False
try:
claims = jwt.decode(
signature, secret, algorithms=["HS256"], options={"verify_iss": False}
)
except jwt.InvalidTokenError:
return False
if claims["path"] != request.path:
return False
refresh_token = await hass.auth.async_get_refresh_token(claims["iss"])
if refresh_token is None:
return False
request[KEY_HASS_USER] = refresh_token.user
request[KEY_HASS_REFRESH_TOKEN_ID] = refresh_token.id
return True
@middleware
async def auth_middleware(
request: Request, handler: Callable[[Request], Awaitable[StreamResponse]]
) -> StreamResponse:
"""Authenticate as middleware."""
authenticated = False
if hdrs.AUTHORIZATION in request.headers and await async_validate_auth_header(
request
):
authenticated = True
auth_type = "bearer token"
# We first start with a string check to avoid parsing query params
# for every request.
elif (
request.method == "GET"
and SIGN_QUERY_PARAM in request.query
and await async_validate_signed_request(request)
):
authenticated = True
auth_type = "signed request"
if authenticated:
_LOGGER.debug(
"Authenticated %s for %s using %s",
request.remote,
request.path,
auth_type,
)
request[KEY_AUTHENTICATED] = authenticated
return await handler(request)
app.middlewares.append(auth_middleware)
|
|
#! /usr/bin/env python
"""
LLSG (Gomez Gonzalez et al. 2016)
"""
__author__ = 'Carlos Alberto Gomez Gonzalez'
__all__ = ['llsg']
import numpy as np
from scipy.linalg import qr
from multiprocessing import cpu_count
from astropy.stats import median_absolute_deviation
from ..conf import time_ini, timing
from ..preproc import cube_derotate, cube_collapse
from ..var import get_annulus_segments, cube_filter_highpass
from ..pca.svd import svd_wrapper, get_eigenvectors
from .thresholding import thresholding
from ..conf.utils_conf import pool_map, iterable
def llsg(cube, angle_list, fwhm, rank=10, thresh=1, max_iter=10,
low_rank_ref=False, low_rank_mode='svd', auto_rank_mode='noise',
residuals_tol=1e-1, cevr=0.9, thresh_mode='soft', nproc=1,
asize=None, n_segments=4, azimuth_overlap=None, radius_int=None,
random_seed=None, imlib='opencv', interpolation='lanczos4',
high_pass=None, collapse='median', full_output=False, verbose=True,
debug=False):
""" Local Low-rank plus Sparse plus Gaussian-noise decomposition (LLSG) as
described in Gomez Gonzalez et al. 2016. This first version of our algorithm
aims at decomposing ADI cubes into three terms L+S+G (low-rank, sparse and
Gaussian noise). Separating the noise from the S component (where the moving
planet should stay) allow us to increase the SNR of potential planets.
The three tunable parameters are the *rank* or expected rank of the L
component, the ``thresh`` or threshold for encouraging sparsity in the S
component and ``max_iter`` which sets the number of iterations. The rest of
parameters can be tuned at the users own risk (do it if you know what you're
doing).
Parameters
----------
cube : numpy ndarray, 3d
Input ADI cube.
angle_list : numpy ndarray, 1d
Corresponding parallactic angle for each frame.
fwhm : float
Known size of the FHWM in pixels to be used.
rank : int, optional
Expected rank of the L component.
thresh : float, optional
Factor that scales the thresholding step in the algorithm.
max_iter : int, optional
Sets the number of iterations.
low_rank_ref :
If True the first estimation of the L component is obtained from the
remaining segments in the same annulus.
low_rank_mode : {'svd', 'brp'}, optional
Sets the method of solving the L update.
auto_rank_mode : {'noise', 'cevr'}, str optional
If ``rank`` is None, then ``auto_rank_mode`` sets the way that the
``rank`` is determined: the noise minimization or the cumulative
explained variance ratio (when 'svd' is used).
residuals_tol : float, optional
The value of the noise decay to be used when ``rank`` is None and
``auto_rank_mode`` is set to ``noise``.
cevr : float, optional
Float value in the range [0,1] for selecting the cumulative explained
variance ratio to choose the rank automatically (if ``rank`` is None).
thresh_mode : {'soft', 'hard'}, optional
Sets the type of thresholding.
nproc : None or int, optional
Number of processes for parallel computing. If None the number of
processes will be set to cpu_count()/2. By default the algorithm works
in single-process mode.
asize : int or None, optional
If ``asize`` is None then each annulus will have a width of ``2*asize``.
If an integer then it is the width in pixels of each annulus.
n_segments : int or list of ints, optional
The number of segments for each annulus. When a single integer is given
it is used for all annuli.
azimuth_overlap : int or None, optional
Sets the amount of azimuthal averaging.
radius_int : int, optional
The radius of the innermost annulus. By default is 0, if >0 then the
central circular area is discarded.
random_seed : int or None, optional
Controls the seed for the Pseudo Random Number generator.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
high_pass : odd int or None, optional
If set to an odd integer <=7, a high-pass filter is applied to the
frames. The ``vip_hci.var.frame_filter_highpass`` is applied twice,
first with the mode ``median-subt`` and a large window, and then with
``laplacian-conv`` and a kernel size equal to ``high_pass``. 5 is an
optimal value when ``fwhm`` is ~4.
collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional
Sets the way of collapsing the frames for producing a final image.
full_output: bool, optional
Whether to return the final median combined image only or with other
intermediate arrays.
verbose : bool, optional
If True prints to stdout intermediate info.
debug : bool, optional
Whether to output some intermediate information.
Returns
-------
frame_s : numpy ndarray, 2d
Final frame (from the S component) after rotation and median-combination.
If ``full_output`` is True, the following intermediate arrays are returned:
list_l_array_der, list_s_array_der, list_g_array_der, frame_l, frame_s,
frame_g
"""
if cube.ndim != 3:
raise TypeError("Input array is not a cube (3d array)")
if not cube.shape[0] == angle_list.shape[0]:
msg = "Angle list vector has wrong length. It must equal the number"
msg += " frames in the cube"
raise TypeError(msg)
if low_rank_mode == 'brp':
if rank is None:
msg = "Auto rank only works with SVD low_rank_mode."
msg += " Set a value for the rank parameter"
raise ValueError(msg)
if low_rank_ref:
msg = "Low_rank_ref only works with SVD low_rank_mode"
raise ValueError(msg)
global cube_init
if high_pass is not None:
cube_init = cube_filter_highpass(cube, 'median-subt', median_size=19,
verbose=False)
cube_init = cube_filter_highpass(cube_init, 'laplacian-conv',
kernel_size=high_pass, verbose=False)
else:
cube_init = cube
if verbose:
start_time = time_ini()
n, y, x = cube.shape
if azimuth_overlap == 0:
azimuth_overlap = None
if radius_int is None:
radius_int = 0
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
# Same number of pixels per annulus
if asize is None:
annulus_width = int(np.ceil(2 * fwhm)) # as in the paper
elif isinstance(asize, int):
annulus_width = asize
n_annuli = int((y / 2 - radius_int) / annulus_width)
# TODO: asize in pxs to be consistent with other functions
if n_segments is None:
n_segments = [4 for _ in range(n_annuli)] # as in the paper
elif isinstance(n_segments, int):
n_segments = [n_segments]*n_annuli
elif n_segments == 'auto':
n_segments = []
n_segments.append(2) # for first annulus
n_segments.append(3) # for second annulus
ld = 2 * np.tan(360/4/2) * annulus_width
for i in range(2, n_annuli): # rest of annuli
radius = i * annulus_width
ang = np.rad2deg(2 * np.arctan(ld / (2 * radius)))
n_segments.append(int(np.ceil(360/ang)))
if verbose:
print('Annuli = {}'.format(n_annuli))
# Azimuthal averaging of residuals
if azimuth_overlap is None:
azimuth_overlap = 360 # no overlapping, single config of segments
n_rots = int(360 / azimuth_overlap)
matrix_s = np.zeros((n_rots, n, y, x))
if full_output:
matrix_l = np.zeros((n_rots, n, y, x))
matrix_g = np.zeros((n_rots, n, y, x))
# Looping the he annuli
if verbose:
print('Processing annulus: ')
for ann in range(n_annuli):
inner_radius = radius_int + ann * annulus_width
n_segments_ann = n_segments[ann]
if verbose:
print('{} : in_rad={}, n_segm={}'.format(ann+1, inner_radius,
n_segments_ann))
# TODO: pool_map as in xloci function: build first a list
for i in range(n_rots):
theta_init = i * azimuth_overlap
indices = get_annulus_segments(cube[0], inner_radius,
annulus_width, n_segments_ann,
theta_init)
patches = pool_map(nproc, _decompose_patch, indices,
iterable(range(n_segments_ann)), n_segments_ann,
rank, low_rank_ref, low_rank_mode, thresh,
thresh_mode, max_iter, auto_rank_mode, cevr,
residuals_tol, random_seed, debug, full_output)
for j in range(n_segments_ann):
yy = indices[j][0]
xx = indices[j][1]
if full_output:
matrix_l[i, :, yy, xx] = patches[j][0]
matrix_s[i, :, yy, xx] = patches[j][1]
matrix_g[i, :, yy, xx] = patches[j][2]
else:
matrix_s[i, :, yy, xx] = patches[j]
if full_output:
list_s_array_der = [cube_derotate(matrix_s[k], angle_list, imlib=imlib,
interpolation=interpolation)
for k in range(n_rots)]
list_frame_s = [cube_collapse(list_s_array_der[k], mode=collapse)
for k in range(n_rots)]
frame_s = cube_collapse(np.array(list_frame_s), mode=collapse)
list_l_array_der = [cube_derotate(matrix_l[k], angle_list, imlib=imlib,
interpolation=interpolation)
for k in range(n_rots)]
list_frame_l = [cube_collapse(list_l_array_der[k], mode=collapse)
for k in range(n_rots)]
frame_l = cube_collapse(np.array(list_frame_l), mode=collapse)
list_g_array_der = [cube_derotate(matrix_g[k], angle_list, imlib=imlib,
interpolation=interpolation)
for k in range(n_rots)]
list_frame_g = [cube_collapse(list_g_array_der[k], mode=collapse)
for k in range(n_rots)]
frame_g = cube_collapse(np.array(list_frame_g), mode=collapse)
else:
list_s_array_der = [cube_derotate(matrix_s[k], angle_list, imlib=imlib,
interpolation=interpolation)
for k in range(n_rots)]
list_frame_s = [cube_collapse(list_s_array_der[k], mode=collapse)
for k in range(n_rots)]
frame_s = cube_collapse(np.array(list_frame_s), mode=collapse)
if verbose:
print('')
timing(start_time)
if full_output:
return(list_l_array_der, list_s_array_der, list_g_array_der,
frame_l, frame_s, frame_g)
else:
return frame_s
def _decompose_patch(indices, i_patch, n_segments_ann, rank, low_rank_ref,
low_rank_mode, thresh, thresh_mode, max_iter,
auto_rank_mode, cevr, residuals_tol, random_seed,
debug=False, full_output=False):
""" Patch decomposition.
"""
j = i_patch
yy = indices[j][0]
xx = indices[j][1]
data_segm = cube_init[:, yy, xx]
if low_rank_ref:
ref_segments = list(range(n_segments_ann))
ref_segments.pop(j)
for m, n in enumerate(ref_segments):
if m == 0:
yy_ref = indices[n][0]
xx_ref = indices[n][1]
else:
yy_ref = np.hstack((yy_ref, indices[n][0]))
xx_ref = np.hstack((xx_ref, indices[n][1]))
data_ref = cube_init[:, yy_ref, xx_ref]
else:
data_ref = data_segm
patch = _patch_rlrps(data_segm, data_ref, rank, low_rank_ref,
low_rank_mode, thresh, thresh_mode,
max_iter, auto_rank_mode, cevr,
residuals_tol, random_seed, debug=debug,
full_output=full_output)
return patch
def _patch_rlrps(array, array_ref, rank, low_rank_ref, low_rank_mode,
thresh, thresh_mode, max_iter, auto_rank_mode='noise',
cevr=0.9, residuals_tol=1e-2, random_seed=None, debug=False,
full_output=False):
""" Patch decomposition based on GoDec/SSGoDec (Zhou & Tao 2011)
"""
############################################################################
# Initializing L and S
############################################################################
L = array
if low_rank_ref:
L_ref = array_ref.T
else:
L_ref = None
S = np.zeros_like(L)
random_state = np.random.RandomState(random_seed)
itr = 0
power = 0
svdlib = 'lapack'
while itr <= max_iter:
########################################################################
# Updating L
########################################################################
if low_rank_mode == 'brp':
Y2 = random_state.randn(L.shape[1], rank)
for _ in range(power + 1):
Y1 = np.dot(L, Y2)
Y2 = np.dot(L.T, Y1)
Q, _ = qr(Y2, mode='economic')
Lnew = np.dot(np.dot(L, Q), Q.T)
elif low_rank_mode == 'svd':
if itr == 0:
PC = get_eigenvectors(rank, L, svdlib, mode=auto_rank_mode,
cevr=cevr, noise_error=residuals_tol,
data_ref=L_ref, debug=debug,
collapse=True)
rank = PC.shape[0] # so we can use the optimized rank
if low_rank_ref:
Lnew = np.dot(np.dot(PC, L).T, PC).T
else:
Lnew = np.dot(np.dot(L, PC.T), PC)
else:
rank_i = min(rank, min(L.shape[0], L.shape[1]))
PC = svd_wrapper(L, svdlib, rank_i, False,
random_state=random_state)
Lnew = np.dot(np.dot(L, PC.T), PC)
else:
raise RuntimeError('Low Rank estimation mode not recognized.')
########################################################################
# Updating S
########################################################################
T = L - Lnew + S
threshold = np.sqrt(median_absolute_deviation(T.ravel())) * thresh
# threshold = np.sqrt(median_absolute_deviation(T, axis=0)) * thresh
# threshmat = np.zeros_like(T)
# for i in range(threshmat.shape[0]):
# threshmat[i] = threshold
# threshold = threshmat
if debug:
print('threshold = {:.3f}'.format(threshold))
S = thresholding(T, threshold, thresh_mode)
T -= S
L = Lnew + T
itr += 1
G = array - L - S
L = L.T
S = S.T
G = G.T
if full_output:
return L, S, G
else:
return S
|
|
from datetime import datetime
from nbgrader import api
from nbgrader.api import InvalidEntry, MissingEntry
from nose.tools import assert_equal, assert_not_equal, assert_raises
class TestGradebook(object):
def setup(self):
self.gb = api.Gradebook("sqlite:///:memory:")
def teardown(self):
self.gb.db.close()
def test_init(self):
assert_equal(self.gb.students, [], "students is not empty")
assert_equal(self.gb.assignments, [], "assignments is not empty")
#### Test students
def test_add_student(self):
s = self.gb.add_student('12345')
assert_equal(s.id, '12345', "incorrect id")
assert_equal(self.gb.students, [s], "student not in students")
# try adding a duplicate student
assert_raises(InvalidEntry, self.gb.add_student, '12345')
# try adding a student with arguments
s = self.gb.add_student('6789', last_name="Bar", first_name="Foo", email="[email protected]")
assert_equal(s.id, '6789', "incorrect id")
assert_equal(s.last_name, "Bar", "incorrect last name")
assert_equal(s.first_name, "Foo", "incorrect first name")
assert_equal(s.email, "[email protected]", "incorrect email")
def test_add_duplicate_student(self):
# we also need this test because this will cause an IntegrityError
# under the hood rather than a FlushError
self.gb.add_student('12345')
assert_raises(InvalidEntry, self.gb.add_student, '12345')
def test_find_student(self):
s1 = self.gb.add_student('12345')
assert_equal(self.gb.find_student('12345'), s1, "student 1 not found")
s2 = self.gb.add_student('abcd')
assert_equal(self.gb.find_student('12345'), s1, "student 1 not found after adding student 2")
assert_equal(self.gb.find_student('abcd'), s2, "student 2 not found")
def test_find_nonexistant_student(self):
assert_raises(MissingEntry, self.gb.find_student, '12345')
#### Test assignments
def test_add_assignment(self):
a = self.gb.add_assignment('foo')
assert_equal(a.name, 'foo', "incorrect name")
assert_equal(self.gb.assignments, [a], "assignment not in assignments")
# try adding a duplicate assignment
assert_raises(InvalidEntry, self.gb.add_assignment, 'foo')
# try adding an assignment with arguments
now = datetime.now()
a = self.gb.add_assignment('bar', duedate=now)
assert_equal(a.name, 'bar', "incorrect name")
assert_equal(a.duedate, now, "incorrect duedate")
# try adding with a string timestamp
a = self.gb.add_assignment('baz', duedate=now.isoformat())
assert_equal(a.name, 'baz', "incorrect name")
assert_equal(a.duedate, now, "incorrect duedate")
def test_add_duplicate_assignment(self):
self.gb.add_assignment('foo')
assert_raises(InvalidEntry, self.gb.add_assignment, 'foo')
def test_find_assignment(self):
a1 = self.gb.add_assignment('foo')
assert_equal(self.gb.find_assignment('foo'), a1, "assignment 1 not found")
a2 = self.gb.add_assignment('bar')
assert_equal(self.gb.find_assignment('foo'), a1, "assignment 1 not found after adding assignment 2")
assert_equal(self.gb.find_assignment('bar'), a2, "assignment 2 not found")
def test_find_nonexistant_assignment(self):
assert_raises(MissingEntry, self.gb.find_assignment, 'foo')
#### Test notebooks
def test_add_notebook(self):
a = self.gb.add_assignment('foo')
n = self.gb.add_notebook('p1', 'foo')
assert_equal(n.name, 'p1', "incorrect name")
assert_equal(n.assignment, a, "assignment set incorrectly")
assert_equal(a.notebooks, [n], "notebook set incorrectly")
# try adding a duplicate assignment
assert_raises(InvalidEntry, self.gb.add_notebook, 'p1', 'foo')
def test_add_duplicate_notebook(self):
# it should be ok to add a notebook with the same name, as long as
# it's for different assignments
self.gb.add_assignment('foo')
self.gb.add_assignment('bar')
n1 = self.gb.add_notebook('p1', 'foo')
n2 = self.gb.add_notebook('p1', 'bar')
assert_not_equal(n1.id, n2.id, "notebooks have the same id")
# but not ok to add a notebook with the same name for the same assignment
assert_raises(InvalidEntry, self.gb.add_notebook, 'p1', 'foo')
def test_find_notebook(self):
self.gb.add_assignment('foo')
n1 = self.gb.add_notebook('p1', 'foo')
assert_equal(self.gb.find_notebook('p1', 'foo'), n1, "notebook 1 not found")
n2 = self.gb.add_notebook('p2', 'foo')
assert_equal(self.gb.find_notebook('p1', 'foo'), n1, "notebook 1 not found after adding notebook 2")
assert_equal(self.gb.find_notebook('p2', 'foo'), n2, "notebook 2 not found")
def test_find_nonexistant_notebook(self):
# check that it doesn't find it when there is nothing in the db
assert_raises(MissingEntry, self.gb.find_notebook, 'p1', 'foo')
# check that it doesn't find it even if the assignment exists
self.gb.add_assignment('foo')
assert_raises(MissingEntry, self.gb.find_notebook, 'p1', 'foo')
def test_update_or_create_notebook(self):
# first test creating it
self.gb.add_assignment('foo')
n1 = self.gb.update_or_create_notebook('p1', 'foo')
assert_equal(self.gb.find_notebook('p1', 'foo'), n1, "notebook not created")
# now test finding/updating it
n2 = self.gb.update_or_create_notebook('p1', 'foo')
assert_equal(n1, n2, "notebooks are not the same")
#### Test grade cells
def test_add_grade_cell(self):
self.gb.add_assignment('foo')
n = self.gb.add_notebook('p1', 'foo')
gc = self.gb.add_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='markdown')
assert_equal(gc.name, 'test1', "incorrect name")
assert_equal(gc.max_score, 2, "incorrect max score")
assert_equal(gc.cell_type, 'markdown', "incorrect cell type")
assert_equal(n.grade_cells, [gc], "grade cells set incorrectly")
assert_equal(gc.notebook, n, "notebook set incorrectly")
def test_add_grade_cell_with_args(self):
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
gc = self.gb.add_grade_cell(
'test1', 'p1', 'foo',
max_score=3, source="blah blah blah",
cell_type="code", checksum="abcde")
assert_equal(gc.name, 'test1', "incorrect name")
assert_equal(gc.max_score, 3, "incorrect max score")
assert_equal(gc.source, "blah blah blah", "incorrect source")
assert_equal(gc.cell_type, "code", "incorrect cell type")
assert_equal(gc.checksum, "abcde", "incorrect checksum")
def test_create_invalid_grade_cell(self):
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
assert_raises(
InvalidEntry, self.gb.add_grade_cell,
'test1', 'p1', 'foo',
max_score=3, source="blah blah blah",
cell_type="something", checksum="abcde")
def test_add_duplicate_grade_cell(self):
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
self.gb.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
assert_raises(InvalidEntry, self.gb.add_grade_cell, 'test1', 'p1', 'foo', max_score=2, cell_type='markdown')
def test_find_grade_cell(self):
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
gc1 = self.gb.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
assert_equal(self.gb.find_grade_cell('test1', 'p1', 'foo'), gc1, "grade cell 1 not found")
gc2 = self.gb.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='code')
assert_equal(self.gb.find_grade_cell('test1', 'p1', 'foo'), gc1, "grade cell 1 not found after adding grade cell 2")
assert_equal(self.gb.find_grade_cell('test2', 'p1', 'foo'), gc2, "grade cell 2 not found")
def test_find_nonexistant_grade_cell(self):
assert_raises(MissingEntry, self.gb.find_grade_cell, 'test1', 'p1', 'foo')
self.gb.add_assignment('foo')
assert_raises(MissingEntry, self.gb.find_grade_cell, 'test1', 'p1', 'foo')
self.gb.add_notebook('p1', 'foo')
assert_raises(MissingEntry, self.gb.find_grade_cell, 'test1', 'p1', 'foo')
def test_update_or_create_grade_cell(self):
# first test creating it
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
gc1 = self.gb.update_or_create_grade_cell('test1', 'p1', 'foo', max_score=2, cell_type='code')
assert_equal(gc1.max_score, 2, "max score is incorrect")
assert_equal(gc1.cell_type, 'code', "cell type is incorrect")
assert_equal(self.gb.find_grade_cell('test1', 'p1', 'foo'), gc1, "grade cell not created")
# now test finding/updating it
assert_equal(gc1.checksum, None, "checksum is not empty")
gc2 = self.gb.update_or_create_grade_cell('test1', 'p1', 'foo', checksum="123456")
assert_equal(gc1, gc2, "grade cells are not the same")
assert_equal(gc1.max_score, 2, "max score is incorrect")
assert_equal(gc1.cell_type, 'code', "cell type is incorrect")
assert_equal(gc1.checksum, "123456", "checksum was not updated")
#### Test solution cells
def test_add_solution_cell(self):
self.gb.add_assignment('foo')
n = self.gb.add_notebook('p1', 'foo')
sc = self.gb.add_solution_cell('test1', 'p1', 'foo', cell_type="code")
assert_equal(sc.name, 'test1', "incorrect name")
assert_equal(sc.cell_type, 'code', "incorrect cell type")
assert_equal(n.solution_cells, [sc], "solution cells set incorrectly")
assert_equal(sc.notebook, n, "notebook set incorrectly")
def test_add_solution_cell_with_args(self):
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
sc = self.gb.add_solution_cell(
'test1', 'p1', 'foo',
source="blah blah blah",
cell_type="code", checksum="abcde")
assert_equal(sc.name, 'test1', "incorrect name")
assert_equal(sc.source, "blah blah blah", "incorrect source")
assert_equal(sc.cell_type, "code", "incorrect cell type")
assert_equal(sc.checksum, "abcde", "incorrect checksum")
def test_create_invalid_solution_cell(self):
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
assert_raises(
InvalidEntry, self.gb.add_solution_cell,
'test1', 'p1', 'foo',
source="blah blah blah",
cell_type="something", checksum="abcde")
def test_add_duplicate_solution_cell(self):
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
self.gb.add_solution_cell('test1', 'p1', 'foo', cell_type="code")
assert_raises(InvalidEntry, self.gb.add_solution_cell, 'test1', 'p1', 'foo', cell_type="code")
def test_find_solution_cell(self):
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
sc1 = self.gb.add_solution_cell('test1', 'p1', 'foo', cell_type="code")
assert_equal(self.gb.find_solution_cell('test1', 'p1', 'foo'), sc1, "solution cell 1 not found")
sc2 = self.gb.add_solution_cell('test2', 'p1', 'foo', cell_type="code")
assert_equal(self.gb.find_solution_cell('test1', 'p1', 'foo'), sc1, "solution cell 1 not found after adding solution cell 2")
assert_equal(self.gb.find_solution_cell('test2', 'p1', 'foo'), sc2, "solution cell 2 not found")
def test_find_nonexistant_solution_cell(self):
assert_raises(MissingEntry, self.gb.find_solution_cell, 'test1', 'p1', 'foo')
self.gb.add_assignment('foo')
assert_raises(MissingEntry, self.gb.find_solution_cell, 'test1', 'p1', 'foo')
self.gb.add_notebook('p1', 'foo')
assert_raises(MissingEntry, self.gb.find_solution_cell, 'test1', 'p1', 'foo')
def test_update_or_create_solution_cell(self):
# first test creating it
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
sc1 = self.gb.update_or_create_solution_cell('test1', 'p1', 'foo', cell_type='code')
assert_equal(sc1.cell_type, 'code', "cell type is incorrect")
assert_equal(self.gb.find_solution_cell('test1', 'p1', 'foo'), sc1, "solution cell not created")
# now test finding/updating it
assert_equal(sc1.checksum, None, "checksum is not empty")
sc2 = self.gb.update_or_create_solution_cell('test1', 'p1', 'foo', checksum="123456")
assert_equal(sc1, sc2, "solution cells are not the same")
assert_equal(sc1.cell_type, 'code', "cell type is incorrect")
assert_equal(sc1.checksum, "123456", "checksum was not updated")
#### Test submissions
def _add_assignment(self):
self.gb.add_assignment('foo')
self.gb.add_notebook('p1', 'foo')
self.gb.add_grade_cell('test1', 'p1', 'foo', max_score=1, cell_type='code')
self.gb.add_grade_cell('test2', 'p1', 'foo', max_score=2, cell_type='markdown')
self.gb.add_solution_cell('solution1', 'p1', 'foo', cell_type='code')
self.gb.add_solution_cell('test2', 'p1', 'foo', cell_type='markdown')
def test_add_submission(self):
self._add_assignment()
self.gb.add_student('hacker123')
self.gb.add_student('bitdiddle')
s1 = self.gb.add_submission('foo', 'hacker123')
s2 = self.gb.add_submission('foo', 'bitdiddle')
assert_equal(self.gb.assignment_submissions('foo'), [s2, s1], "wrong list of submissions")
assert_equal(self.gb.student_submissions('hacker123'), [s1], "wrong submissions for hacker123")
assert_equal(self.gb.student_submissions('bitdiddle'), [s2], "wrong submissions for bitdiddle")
assert_equal(self.gb.find_submission('foo', 'hacker123'), s1, "couldn't find submission for hacker123")
assert_equal(self.gb.find_submission('foo', 'bitdiddle'), s2, "couldn't find submission for bitdiddle")
def test_add_duplicate_submission(self):
self._add_assignment()
self.gb.add_student('hacker123')
self.gb.add_submission('foo', 'hacker123')
assert_raises(InvalidEntry, self.gb.add_submission, 'foo', 'hacker123')
### Test average scores
def test_average_assignment_score(self):
self._add_assignment()
self.gb.add_student('hacker123')
self.gb.add_student('bitdiddle')
self.gb.add_submission('foo', 'hacker123')
self.gb.add_submission('foo', 'bitdiddle')
assert_equal(self.gb.average_assignment_score('foo'), 0.0)
assert_equal(self.gb.average_assignment_code_score('foo'), 0.0)
assert_equal(self.gb.average_assignment_written_score('foo'), 0.0)
g1 = self.gb.find_grade("test1", "p1", "foo", "hacker123")
g2 = self.gb.find_grade("test2", "p1", "foo", "hacker123")
g3 = self.gb.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = self.gb.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
self.gb.db.commit()
assert_equal(self.gb.average_assignment_score('foo'), 2.25)
assert_equal(self.gb.average_assignment_code_score('foo'), 0.75)
assert_equal(self.gb.average_assignment_written_score('foo'), 1.5)
def test_average_notebook_score(self):
self._add_assignment()
self.gb.add_student('hacker123')
self.gb.add_student('bitdiddle')
self.gb.add_submission('foo', 'hacker123')
self.gb.add_submission('foo', 'bitdiddle')
assert_equal(self.gb.average_notebook_score('p1', 'foo'), 0.0)
assert_equal(self.gb.average_notebook_code_score('p1', 'foo'), 0.0)
assert_equal(self.gb.average_notebook_written_score('p1', 'foo'), 0.0)
g1 = self.gb.find_grade("test1", "p1", "foo", "hacker123")
g2 = self.gb.find_grade("test2", "p1", "foo", "hacker123")
g3 = self.gb.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = self.gb.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
self.gb.db.commit()
assert_equal(self.gb.average_notebook_score('p1', 'foo'), 2.25)
assert_equal(self.gb.average_notebook_code_score('p1', 'foo'), 0.75)
assert_equal(self.gb.average_notebook_written_score('p1', 'foo'), 1.5)
## Test mass dictionary queries
def test_student_dicts(self):
self._add_assignment()
self.gb.add_student('hacker123')
self.gb.add_student('bitdiddle')
self.gb.add_submission('foo', 'hacker123')
self.gb.add_submission('foo', 'bitdiddle')
g1 = self.gb.find_grade("test1", "p1", "foo", "hacker123")
g2 = self.gb.find_grade("test2", "p1", "foo", "hacker123")
g3 = self.gb.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = self.gb.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
self.gb.db.commit()
students = self.gb.student_dicts()
assert_equal(
sorted(students, key=lambda x: x["id"]),
sorted([x.to_dict() for x in self.gb.students], key=lambda x: x["id"]))
def test_notebook_submission_dicts(self):
self._add_assignment()
self.gb.add_student('hacker123')
self.gb.add_student('bitdiddle')
self.gb.add_submission('foo', 'hacker123')
self.gb.add_submission('foo', 'bitdiddle')
g1 = self.gb.find_grade("test1", "p1", "foo", "hacker123")
g2 = self.gb.find_grade("test2", "p1", "foo", "hacker123")
g3 = self.gb.find_grade("test1", "p1", "foo", "bitdiddle")
g4 = self.gb.find_grade("test2", "p1", "foo", "bitdiddle")
g1.manual_score = 0.5
g2.manual_score = 2
g3.manual_score = 1
g4.manual_score = 1
self.gb.db.commit()
notebook = self.gb.find_notebook("p1", "foo")
submissions = self.gb.notebook_submission_dicts("p1", "foo")
assert_equal(
sorted(submissions, key=lambda x: x["id"]),
sorted([x.to_dict() for x in notebook.submissions], key=lambda x: x["id"]))
|
|
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>
#
# License: Simplified BSD
import warnings
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from .mxne_debiasing import compute_bias
from ..utils import logger, verbose, sum_squared
from ..time_frequency.stft import stft_norm2, stft, istft
def groups_norm2(A, n_orient):
"""compute squared L2 norms of groups inplace"""
n_positions = A.shape[0] // n_orient
return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
def norm_l2inf(A, n_orient, copy=True):
"""L2-inf norm"""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return sqrt(np.max(groups_norm2(A, n_orient)))
def norm_l21(A, n_orient, copy=True):
"""L21 norm"""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return np.sum(np.sqrt(groups_norm2(A, n_orient)))
def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
"""proximity operator for l21 norm
L2 over columns and L1 over rows => groups contain n_orient rows.
It can eventually take into account the negative frequencies
when a complex value is passed and is_stft=True.
Example
-------
>>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y)
[[ 0. 4. 3. 0. 0.]
[ 0. 4. 3. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l21(Y, 2, 2)
>>> print(Yp)
[[ 0. 2.86862915 2.15147186 0. 0. ]
[ 0. 2.86862915 2.15147186 0. 0. ]]
>>> print(active_set)
[ True True False False]
"""
if len(Y) == 0:
return np.zeros_like(Y), np.zeros((0,), dtype=np.bool)
if shape is not None:
shape_init = Y.shape
Y = Y.reshape(*shape)
n_positions = Y.shape[0] // n_orient
if is_stft:
rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
else:
rows_norm = np.sqrt(np.sum((np.abs(Y) ** 2).reshape(n_positions, -1),
axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
active_set = shrink > 0.0
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if shape is None:
Y *= shrink[active_set][:, np.newaxis]
else:
Y *= shrink[active_set][:, np.newaxis, np.newaxis]
Y = Y.reshape(-1, *shape_init[1:])
return Y, active_set
def prox_l1(Y, alpha, n_orient):
"""proximity operator for l1 norm with multiple orientation support
L2 over orientation and L1 over position (space + time)
Example
-------
>>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y)
[[ 1. 2. 3. 2. 0.]
[ 1. 2. 3. 2. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l1(Y, 2, 2)
>>> print(Yp)
[[ 0. 0.58578644 1.58578644 0.58578644 0. ]
[ 0. 0.58578644 1.58578644 0.58578644 0. ]]
>>> print(active_set)
[ True True False False]
"""
n_positions = Y.shape[0] // n_orient
norms = np.sqrt(np.sum((np.abs(Y) ** 2).T.reshape(-1, n_orient), axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
shrink = shrink.reshape(-1, n_positions).T
active_set = np.any(shrink > 0.0, axis=1)
shrink = shrink[active_set]
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if len(Y) > 0:
for o in range(n_orient):
Y[o::n_orient] *= shrink
return Y, active_set
def dgap_l21(M, G, X, active_set, alpha, n_orient):
"""Duality gaps for the mixed norm inverse problem
For details see:
Gramfort A., Kowalski M. and Hamalainen, M,
Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods, Physics in Medicine and Biology, 2012
http://dx.doi.org/10.1088/0031-9155/57/7/1937
Parameters
----------
M : array of shape [n_sensors, n_times]
data
G : array of shape [n_sensors, n_active]
Gain matrix a.k.a. lead field
X : array of shape [n_active, n_times]
Sources
active_set : array of bool
Mask of active sources
alpha : float
Regularization parameter
n_orient : int
Number of dipoles per locations (typically 1 or 3)
Returns
-------
gap : float
Dual gap
pobj : float
Primal cost
dobj : float
Dual cost. gap = pobj - dobj
R : array of shape [n_sensors, n_times]
Current residual of M - G * X
"""
GX = np.dot(G[:, active_set], X)
R = M - GX
penalty = norm_l21(X, n_orient, copy=True)
nR2 = sum_squared(R)
pobj = 0.5 * nR2 + alpha * penalty
dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
scaling = alpha / dual_norm
scaling = min(scaling, 1.0)
dobj = 0.5 * (scaling ** 2) * nR2 + scaling * np.sum(R * GX)
gap = pobj - dobj
return gap, pobj, dobj, R
@verbose
def _mixed_norm_solver_prox(M, G, alpha, maxit=200, tol=1e-8, verbose=None,
init=None, n_orient=1):
"""Solves L21 inverse problem with proximal iterations and FISTA"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
lipschitz_constant = 1.1 * linalg.norm(G, ord=2) ** 2
if n_sources < n_sensors:
gram = np.dot(G.T, G)
GTM = np.dot(G.T, M)
else:
gram = None
if init is None:
X = 0.0
R = M.copy()
if gram is not None:
R = np.dot(G.T, R)
else:
X = init
if gram is None:
R = M - np.dot(G, X)
else:
R = GTM - np.dot(gram, X)
t = 1.0
Y = np.zeros((n_sources, n_times)) # FISTA aux variable
E = [] # track cost function
active_set = np.ones(n_sources, dtype=np.bool) # start with full AS
for i in range(maxit):
X0, active_set_0 = X, active_set # store previous values
if gram is None:
Y += np.dot(G.T, R) / lipschitz_constant # ISTA step
else:
Y += R / lipschitz_constant # ISTA step
X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * X
Y[active_set_0] -= dt * X0
Y_as = active_set_0 | active_set
if gram is None:
R = M - np.dot(G[:, Y_as], Y[Y_as])
else:
R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
E.append(pobj)
logger.debug("pobj : %s -- gap : %s" % (pobj, gap))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)' % (gap, tol))
break
return X, active_set, E
@verbose
def _mixed_norm_solver_cd(M, G, alpha, maxit=10000, tol=1e-8,
verbose=None, init=None, n_orient=1):
"""Solves L21 inverse problem with coordinate descent"""
from sklearn.linear_model.coordinate_descent import MultiTaskLasso
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
if init is not None:
init = init.T
clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol, normalize=False,
fit_intercept=False, max_iter=maxit,
warm_start=True)
clf.coef_ = init
clf.fit(G, M)
X = clf.coef_.T
active_set = np.any(X, axis=1)
X = X[active_set]
gap, pobj, dobj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
return X, active_set, pobj
@verbose
def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
active_set_size=50, debias=True, n_orient=1,
solver='auto'):
"""Solves L21 inverse solver with active set strategy
Algorithm is detailed in:
Gramfort A., Kowalski M. and Hamalainen, M,
Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods, Physics in Medicine and Biology, 2012
http://dx.doi.org/10.1088/0031-9155/57/7/1937
Parameters
----------
M : array
The data
G : array
The forward operator
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
maxit : int
The number of iterations
tol : float
Tolerance on dual gap for convergence checking
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'auto'
The algorithm to use for the optimization.
Returns
-------
X : array
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
"""
n_dipoles = G.shape[1]
n_positions = n_dipoles // n_orient
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
logger.info("-- ALPHA MAX : %s" % alpha_max)
alpha = float(alpha)
has_sklearn = True
try:
from sklearn.linear_model.coordinate_descent import MultiTaskLasso
except ImportError:
has_sklearn = False
if solver == 'auto':
if has_sklearn and (n_orient == 1):
solver = 'cd'
else:
solver = 'prox'
if solver == 'cd':
if n_orient == 1 and not has_sklearn:
warnings.warn("Scikit-learn >= 0.12 cannot be found. "
"Using proximal iterations instead of coordinate "
"descent.")
solver = 'prox'
if n_orient > 1:
warnings.warn("Coordinate descent is only available for fixed "
"orientation. Using proximal iterations instead of "
"coordinate descent")
solver = 'prox'
if solver == 'cd':
logger.info("Using coordinate descent")
l21_solver = _mixed_norm_solver_cd
else:
logger.info("Using proximal iterations")
l21_solver = _mixed_norm_solver_prox
if active_set_size is not None:
X_init = None
n_sensors, n_times = M.shape
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
active_set = np.zeros(n_positions, dtype=np.bool)
active_set[idx_large_corr[-active_set_size:]] = True
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
for k in range(maxit):
X, as_, E = l21_solver(M, G[:, active_set], alpha,
maxit=maxit, tol=tol, init=X_init,
n_orient=n_orient)
as_ = np.where(active_set)[0][as_]
gap, pobj, dobj, R = dgap_l21(M, G, X, as_, alpha, n_orient)
logger.info('gap = %s, pobj = %s' % (gap, pobj))
if gap < tol:
logger.info('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
else: # add sources
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :])
new_active_idx = new_active_idx.ravel()
idx_old_active_set = as_
active_set_old = active_set.copy()
active_set[new_active_idx] = True
as_size = np.sum(active_set)
logger.info('active set size %s' % as_size)
X_init = np.zeros((as_size, n_times), dtype=X.dtype)
idx_active_set = np.where(active_set)[0]
idx = np.searchsorted(idx_active_set, idx_old_active_set)
X_init[idx] = X
if np.all(active_set_old == active_set):
logger.info('Convergence stopped (AS did not change) !')
break
else:
logger.warning('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
active_set = np.zeros_like(active_set)
active_set[as_] = True
else:
X, active_set, E = l21_solver(M, G, alpha, maxit=maxit,
tol=tol, n_orient=n_orient)
if (active_set.sum() > 0) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
###############################################################################
# TF-MxNE
@verbose
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
"""Compute lipschitz constant for FISTA
It uses a power iteration method.
"""
n_times = M.shape[1]
n_points = G.shape[1]
iv = np.ones((n_points, n_times), dtype=np.float)
v = phi(iv)
L = 1e100
for it in range(100):
L_old = L
logger.info('Lipschitz estimation: iteration = %d' % it)
iv = np.real(phiT(v))
Gv = np.dot(G, iv)
GtGv = np.dot(G.T, Gv)
w = phi(GtGv)
L = np.max(np.abs(w)) # l_inf norm
v = w / L
if abs((L - L_old) / L_old) < tol:
break
return L
def safe_max_abs(A, ia):
"""Compute np.max(np.abs(A[ia])) possible with empty A"""
if np.sum(ia): # ia is not empty
return np.max(np.abs(A[ia]))
else:
return 0.
def safe_max_abs_diff(A, ia, B, ib):
"""Compute np.max(np.abs(A)) possible with empty A"""
A = A[ia] if np.sum(ia) else 0.0
B = B[ib] if np.sum(ia) else 0.0
return np.max(np.abs(A - B))
class _Phi(object):
"""Util class to have phi stft as callable without using
a lambda that does not pickle"""
def __init__(self, wsize, tstep, n_coefs):
self.wsize = wsize
self.tstep = tstep
self.n_coefs = n_coefs
def __call__(self, x):
return stft(x, self.wsize, self.tstep,
verbose=False).reshape(-1, self.n_coefs)
class _PhiT(object):
"""Util class to have phi.T istft as callable without using
a lambda that does not pickle"""
def __init__(self, tstep, n_freq, n_step, n_times):
self.tstep = tstep
self.n_freq = n_freq
self.n_step = n_step
self.n_times = n_times
def __call__(self, z):
return istft(z.reshape(-1, self.n_freq, self.n_step), self.tstep,
self.n_times)
@verbose
def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
n_orient=1, maxit=200, tol=1e-8, log_objective=True,
lipschitz_constant=None, debias=True, verbose=None):
"""Solves TF L21+L1 inverse solver
Algorithm is detailed in:
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations
Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
DOI: 10.1016/j.neuroimage.2012.12.051.
Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries
Gramfort A., Strohmeier D., Haueisen J., Hamalainen M. and Kowalski M.
INFORMATION PROCESSING IN MEDICAL IMAGING
Lecture Notes in Computer Science, 2011, Volume 6801/2011,
600-611, DOI: 10.1007/978-3-642-22092-0_49
http://dx.doi.org/10.1007/978-3-642-22092-0_49
Parameters
----------
M : array
The data.
G : array
The forward operator.
alpha_space : float
The spatial regularization parameter. It should be between 0 and 100.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
wsize: int
length of the STFT window in samples (must be a multiple of 4).
tstep: int
step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
log_objective : bool
If True, the value of the minimized objective function is computed
and stored at every iteration.
lipschitz_constant : float | None
The lipschitz constant of the spatio temporal linear operator.
If None it is estimated.
debias : bool
Debias source estimates.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
X : array
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function at each iteration. If log_objective
is False, it will be empty.
"""
n_sensors, n_times = M.shape
n_dipoles = G.shape[1]
n_step = int(ceil(n_times / float(tstep)))
n_freq = wsize // 2 + 1
n_coefs = n_step * n_freq
phi = _Phi(wsize, tstep, n_coefs)
phiT = _PhiT(tstep, n_freq, n_step, n_times)
Z = np.zeros((0, n_coefs), dtype=np.complex)
active_set = np.zeros(n_dipoles, dtype=np.bool)
R = M.copy() # residual
if lipschitz_constant is None:
lipschitz_constant = 1.1 * tf_lipschitz_constant(M, G, phi, phiT)
logger.info("lipschitz_constant : %s" % lipschitz_constant)
t = 1.0
Y = np.zeros((n_dipoles, n_coefs), dtype=np.complex) # FISTA aux variable
Y[active_set] = Z
E = [] # track cost function
Y_time_as = None
Y_as = None
alpha_time_lc = alpha_time / lipschitz_constant
alpha_space_lc = alpha_space / lipschitz_constant
for i in range(maxit):
Z0, active_set_0 = Z, active_set # store previous values
if active_set.sum() < len(R) and Y_time_as is not None:
# trick when using tight frame to do a first screen based on
# L21 prox (L21 norms are not changed by phi)
GTR = np.dot(G.T, R) / lipschitz_constant
A = GTR.copy()
A[Y_as] += Y_time_as
_, active_set_l21 = prox_l21(A, alpha_space_lc, n_orient)
# just compute prox_l1 on rows that won't be zeroed by prox_l21
B = Y[active_set_l21] + phi(GTR[active_set_l21])
Z, active_set_l1 = prox_l1(B, alpha_time_lc, n_orient)
active_set_l21[active_set_l21] = active_set_l1
active_set_l1 = active_set_l21
else:
Y += np.dot(G.T, phi(R)) / lipschitz_constant # ISTA step
Z, active_set_l1 = prox_l1(Y, alpha_time_lc, n_orient)
Z, active_set_l21 = prox_l21(Z, alpha_space_lc, n_orient,
shape=(-1, n_freq, n_step), is_stft=True)
active_set = active_set_l1
active_set[active_set_l1] = active_set_l21
# Check convergence : max(abs(Z - Z0)) < tol
stop = (safe_max_abs(Z, ~active_set_0[active_set]) < tol and
safe_max_abs(Z0, ~active_set[active_set_0]) < tol and
safe_max_abs_diff(Z, active_set_0[active_set],
Z0, active_set[active_set_0]) < tol)
if stop:
print('Convergence reached !')
break
# FISTA 2 steps
# compute efficiently : Y = Z + ((t0 - 1.0) / t) * (Z - Z0)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * Z
if len(Z0):
Y[active_set_0] -= dt * Z0
Y_as = active_set_0 | active_set
Y_time_as = phiT(Y[Y_as])
R = M - np.dot(G[:, Y_as], Y_time_as)
if log_objective: # log cost function value
Z2 = np.abs(Z)
Z2 **= 2
X = phiT(Z)
RZ = M - np.dot(G[:, active_set], X)
pobj = 0.5 * linalg.norm(RZ, ord='fro') ** 2 \
+ alpha_space * norm_l21(X, n_orient) \
+ alpha_time * np.sqrt(np.sum(Z2.T.reshape(-1, n_orient),
axis=1)).sum()
E.append(pobj)
logger.info("Iteration %d :: pobj %f :: n_active %d" % (i + 1,
pobj, np.sum(active_set)))
else:
logger.info("Iteration %d" % i + 1)
X = phiT(Z)
if (active_set.sum() > 0) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
|
|
#! /usr/bin/python
import sys
import os
import json
import re
##############################
#
# Functinality plan:
# ------------------
#
# wrtr I am entering some text | wrtr -w | --write I am entering some text ~ this text after wrtr appends in the last paragraph, or the current paragraph set in the config
# wrtr -i | --insert 1 | 1.5 Some text ~ this text is inserted as a fift paragraph of chapter one if there is the 4th one ofcourse
# wrtr -p | --prepend Some text ~ this text is prepended to the curent chapter
# wrtr -r | --read empty | 1 | 1-3 | 1.2 | 1.2-1.4 | 1n (with note) | 1.2n ~ empty reads all the notebook
# wrtr -c | --chapter empty | number of a chapter ~ emty shows the current chapter, also shows the stats number of words and chars
# wrtr -rm | --remove notebookname | 1 | 1-3 | 1.2 | 1.2-1.4| 1a (delets the chapter contents)
# wrtr -lc | --list-chapters
# wrtr -ln | --list-notebooks
# wrtr -nc | --new-chapter empty | name of the chapter
# wrtr -t | --title empty | new name for the title
# wrtr -e | --edit 1 | 1.2 ~ edits a paragraph or a chapter
# wrtr -n | --note 1 | 1.2 | empty + note ofc
# wrtr -a | --author Name Surname | empty ~ stores the author in conf file too, empty shows the author
# wrtr --export /dir/ ~ exports to html
# wrtr -cp | --copy 0 1 | 1.2 2 | 1.2 2.3
# wrtr -mv | --move 0 1 | 1.2 2 | 1.2 2.3
# Format:
#
# First three lines describe the document and they are mandatory.
# [title] Untitled \n
# [author] Name Surname \n
# [date;last-modified="timestamp"] timestamp crated\n
# \n
#
# There is a zero chapter, by default, used to add the
# [zero-chapter] \n
# \n
#
# [chapter;note="chapter note"] \n
# \n
# [p;note="paragraph note"] Paragraph text\n
# Ideas:
# - Document stats in the title line
# - Notes, list all notes in a chapter, read all chapter with notes, eddit a note etc... note is bound to chapter or a paragraph
# - --edit 1.2 (edit second paragraph from the first chapter)
# - export to html
_conf = {}
_confname = '.wrtr.conf'
_HOME = os.path.expanduser('~');
_confpath = os.path.join(_HOME, _confname)
FILE = ''
fns = {
'-t': '_setTitle',
'--title': '_setTitle',
'-e': '_edit',
'--edit': '_edit',
'-w': '_write',
'--write': '_write',
'-i': '_write',
'--insert': '_write',
'-r': '_read',
'--read': '_read'
}
del sys.argv[0]
def fn(fname, *argv):
globals()[fname](*params)
def _setTitle(args):
print 'setTitle'
def _edit(args):
print 'edit'
def _write(args):
print 'write'
def _read(args):
print 'read'
def loadConfig():
global _confpath, _conf, FILE
f = open(_confpath, 'r')
_conf = json.loads(f.read())
f.close()
FILE = os.path.join(_conf['path'], _conf['filename']);
def storeConfig():
global _confpath, _conf, FILE
f = open(_confpath, 'w')
f.write(json.dumps(_conf))
f.close()
FILE = os.path.join(_conf['path'], _conf['filename']);
def countDefaultNotebooks(path):
count = []
reg = re.compile(r'^notebook([0-9]+)\.txt$')
for f in os.listdir(path):
fpath = os.path.join(path,f)
match = re.match( reg, f)
if fpath and match:
m = match.group(1);
if match.group(1) == '':
m = 0;
else:
m = int(m)
count.append(m)
if len(count) == 0:
return '';
else:
count = max(count)
return str(count+1);
def changeNotebook():
notebook_name = raw_input("Enter a name or a full path of your notebook file: ")
#TODO: validate file, a file is walid by validating a title first line
pts = os.path.split(notebook_name)
if not pts[0] == '':
_conf['path'] = os.path.expanduser(pts[0])
return True
def newNotebook():
global _conf, FILE
count = countDefaultNotebooks(_conf['path'])
nname = "notebook"+count+".txt"
notebook_name = raw_input("Enter a name of your new notebook file ( "+nname+" ): ")
if notebook_name == '':
notebook_name = nname
_conf['filename'] = notebook_name
_conf['chapter'] = 0
FILE = os.path.join(_conf['path'],_conf['filename'])
f = open(FILE, 'w')
f.write('') #TODO: Add empty title and chapter 1
f.close()
return True
def promptForPath():
global _conf, _HOME
_conf['path'] = os.path.expanduser(raw_input("Enter a path where you will store your notebooks ("+_HOME+"): "))
if _conf['path'] == '':
_conf['path'] = _HOME
else:
if not os.path.exists(_conf['path']):
os.makedirs(_conf['path'])
return True
def changePath():
promptForPath()
storeConfig()
return True;
def appendParagaph(chapter):
return True;
def newChapter(chapter_title, after_chapter):
return True;
def setChapter(chapter, chapter_title):
return True;
def readChapter(chapter):
return True;
def readParagraph(chapter, paragraph):
global _conf
return True;
#TODO: with readline
def editParagraph(chapter, paragraph):
return True;
def getNotebookStats():
#num of chapters, num of paragraphs, num of words
return True;
def _init():
promptForPath()
newNotebook()
storeConfig()
if not os.path.isfile(_confpath):
_init()
else:
loadConfig()
|
|
import mxnet as mx
import random
import sys
import copy
from mxnet.io import DataBatch, DataIter
import numpy as np
from mxnet.image import *
def add_data_args(parser):
data = parser.add_argument_group('Data', 'the input images')
data.add_argument('--data-dir', type=str, default='./data',
help='the data dir')
data.add_argument('--train-image-root', type=str)
data.add_argument('--val-image-root', type=str)
#data.add_argument('--data-train', type=str, help='the training data')
#data.add_argument('--data-val', type=str, help='the validation data')
data.add_argument('--rgb-mean', type=str, default='123.68,116.779,103.939',
help='a tuple of size 3 for the mean rgb')
data.add_argument('--pad-size', type=int, default=0,
help='padding the input image')
data.add_argument('--image-shape', type=str,
help='the image shape feed into the network, e.g. (3,224,224)')
data.add_argument('--num-classes', type=int, help='the number of classes')
#data.add_argument('--num-examples', type=int, help='the number of training examples')
data.add_argument('--data-nthreads', type=int, default=4,
help='number of threads for data decoding')
data.add_argument('--benchmark', type=int, default=0,
help='if 1, then feed the network with synthetic data')
data.add_argument('--dtype', type=str, default='float32',
help='data type: float32 or float16')
return data
def add_data_aug_args(parser):
aug = parser.add_argument_group(
'Image augmentations', 'implemented in src/io/image_aug_default.cc')
aug.add_argument('--random-crop', type=int, default=1,
help='if or not randomly crop the image')
aug.add_argument('--random-mirror', type=int, default=1,
help='if or not randomly flip horizontally')
aug.add_argument('--max-random-h', type=int, default=0,
help='max change of hue, whose range is [0, 180]')
aug.add_argument('--max-random-s', type=int, default=0,
help='max change of saturation, whose range is [0, 255]')
aug.add_argument('--max-random-l', type=int, default=0,
help='max change of intensity, whose range is [0, 255]')
aug.add_argument('--max-random-aspect-ratio', type=float, default=0,
help='max change of aspect ratio, whose range is [0, 1]')
aug.add_argument('--max-random-rotate-angle', type=int, default=0,
help='max angle to rotate, whose range is [0, 360]')
aug.add_argument('--max-random-shear-ratio', type=float, default=0,
help='max ratio to shear, whose range is [0, 1]')
aug.add_argument('--max-random-scale', type=float, default=1,
help='max ratio to scale')
aug.add_argument('--min-random-scale', type=float, default=1,
help='min ratio to scale, should >= img_size/input_shape. otherwise use --pad-size')
return aug
def set_data_aug_level(aug, level):
if level >= 1:
aug.set_defaults(random_crop=1, random_mirror=1)
if level >= 2:
aug.set_defaults(max_random_h=36, max_random_s=50, max_random_l=50)
if level >= 3:
aug.set_defaults(max_random_rotate_angle=10, max_random_shear_ratio=0.1, max_random_aspect_ratio=0.25)
class ImageIterMix(io.DataIter):
def __init__(self, batch_size, data_shape,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
balance = 0, rgb_mean = None, mix_prob = 1.0,
data_name='data', label_name='softmax_label',
**kwargs):
super(ImageIter2, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
#logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
# ' larger number to use more threads.')
class_name = self.__class__.__name__
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array([float(i) for i in line[1:-1]])
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1])
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]])
else:
label = nd.array(img[0])
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.rgb_mean = rgb_mean
if self.rgb_mean:
self.rgb_mean = np.array(self.rgb_mean, dtype=np.float32).reshape(1,1,3)
self.rgb_mean = nd.array(self.rgb_mean)
self.label_width = 1
self.provide_label = [(label_name, (batch_size, ))]
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
self.batch_size = batch_size
self.data_shape = data_shape
self.shuffle = shuffle
self.seq = imgkeys
self.oseq = copy.copy(self.seq)
self.mix_prob = mix_prob
assert self.mix_prob>=0.0
assert self.mix_prob<=1.0
self.balance = balance
if self.balance>0:
assert(self.shuffle)
#self.balance()
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self.reset()
def do_balance(self):
label_dist = {}
for idx in self.oseq:
_label = int(self.imglist[idx][0].asnumpy()[0])
#print(idx, _label)
v = label_dist.get(_label, [])
v.append(idx)
label_dist[_label] = v
items = sorted(label_dist.items(), key = lambda x : len(x[1]), reverse=True)
self.seq = []
tcount = min(len(items[0][1]), self.balance)
print('tcount', tcount)
for item in items:
_label = item[0]
v = item[1]
random.shuffle(v)
_tcount = tcount
#_tcount = len(v)
for i in xrange(_tcount):
ii = i%len(v)
idx = v[ii]
self.seq.append(idx)
print(len(self.seq))
for i in xrange(self.batch_size):
if len(self.seq)%self.batch_size==0:
break
ii = i%len(items)
idx = items[ii][1][0]
self.seq.append(idx)
random.shuffle(self.seq)
print(len(self.seq))
def num_samples(self):
return len(self.seq)
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.shuffle:
if self.balance>0:
self.do_balance()
else:
random.shuffle(self.seq)
self.cur = 0
def next_sample(self):
"""Helper function for reading in next sample."""
if self.seq is not None:
if self.cur >= len(self.seq):
raise StopIteration
idx = self.seq[self.cur]
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def next_sample_random(self):
"""Helper function for reading in next sample."""
if self.seq is not None:
_cur = random.randint(0, len(self.seq)-1)
idx = self.seq[_cur]
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
def _next(self, rand = False):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = 0
try:
while i < batch_size:
if not rand:
label, s = self.next_sample()
else:
label, s = self.next_sample_random()
data = self.imdecode(s)
if self.rgb_mean is not None:
data = nd.cast(data, dtype='float32')
#print('apply mean', self.rgb_mean)
data -= self.rgb_mean
data *= 0.0078125
#_npdata = data.asnumpy()
#_npdata = _npdata.astype(np.float32)
#_npdata -= self.mean
#_npdata *= 0.0078125
#data = mx.nd.array(_npdata)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if i==0:
raise StopIteration
return io.DataBatch([batch_data], [batch_label], batch_size - i)
def next(self):
batch = self._next(rand = False)
if self.mix_prob==0.0:
return batch
dice = random.random()
if dice<self.mix_prob:
mbatch = self._next(rand = True)
item_count = self.batch_size - batch.pad
for i in xrange(item_count):
batch.data[0][i]+=mbatch.data[0][i]
batch.data[0][i]/=2.0
return batch
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
return imdecode(s)
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
def read_lst(file):
ret = []
with open(file, 'r') as f:
for line in f:
vec = line.strip().split("\t")
label = int(vec[1])
img_path = vec[2]
ret.append( [label, img_path] )
return ret
def get_rec_iter(args, kv=None):
image_shape = tuple([int(l) for l in args.image_shape.split(',')])
rgb_mean = None
if len(args.rgb_mean)>0:
rgb_mean = [float(x) for x in args.rgb_mean.split(',')]
dtype = np.float32;
if 'dtype' in args:
if args.dtype == 'float16':
dtype = np.float16
if kv:
(rank, nworker) = (kv.rank, kv.num_workers)
else:
(rank, nworker) = (0, 1)
#print(rank, nworker, args.batch_size)
#train_resize = int(image_shape[1]*1.5)
#train_resize = image_shape[1]+32
#if train_resize>640:
# train_resize = None
if not args.retrain:
train = ImageIter2(
path_root = args.train_image_root,
path_imglist = args.train_lst,
balance = 1000,
data_shape = image_shape,
batch_size = args.batch_size,
#resize = train_resize,
rand_crop = True,
rand_resize = True,
rand_mirror = True,
shuffle = True,
brightness = 0.4,
contrast = 0.4,
saturation = 0.4,
pca_noise = 0.1,
rgb_mean = rgb_mean,
#data_name = 'data_source',
#label_name = 'label_source',
num_parts = nworker,
part_index = rank)
if args.val_lst is not None:
val = ImageIter2(
path_root = args.val_image_root,
path_imglist = args.val_lst,
batch_size = args.batch_size,
data_shape = image_shape,
resize = int(image_shape[1]*1.125),
rand_crop = False,
rand_resize = False,
rand_mirror = False,
rgb_mean = rgb_mean,
num_parts = nworker,
part_index = rank)
else:
val = None
else:
#rnd = random.randint(0,1)
#if rnd==1:
# train_resize = None
train_resize = None
train = ImageIter2(
path_root = args.train_image_root,
path_imglist = args.train_lst,
balance = 9999999,
data_shape = image_shape,
batch_size = args.batch_size,
resize = train_resize,
rand_resize = True,
rand_crop = True,
rand_mirror = True,
shuffle = True,
brightness = 0.1,
contrast = 0.1,
saturation = 0.1,
pca_noise = 0.1,
#data_name = 'data_source',
#label_name = 'label_source',
num_parts = nworker,
part_index = rank)
val = ImageIter2(
path_root = args.val_image_root,
path_imglist = args.val_lst,
batch_size = args.batch_size,
data_shape = image_shape,
resize = int(image_shape[1]*1.125),
rand_crop = False,
rand_resize = False,
rand_mirror = False,
num_parts = nworker,
part_index = rank)
return (train, val)
def test_st():
pass
if __name__ == '__main__':
test_st()
|
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates an Android Studio project from a GN target."""
import argparse
import codecs
import logging
import os
import shutil
import subprocess
import sys
import zipfile
_BUILD_ANDROID = os.path.join(os.path.dirname(__file__), os.pardir)
sys.path.append(_BUILD_ANDROID)
import devil_chromium
from devil.utils import run_tests_helper
from pylib import constants
from pylib.constants import host_paths
sys.path.append(os.path.join(_BUILD_ANDROID, 'gyp'))
import jinja_template
from util import build_utils
_DEFAULT_ANDROID_MANIFEST_PATH = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'AndroidManifest.xml')
_JINJA_TEMPLATE_PATH = os.path.join(
os.path.dirname(__file__), 'build.gradle.jinja')
_JAVA_SUBDIR = 'symlinked-java'
_SRCJARS_SUBDIR = 'extracted-srcjars'
def _RebasePath(path_or_list, new_cwd=None, old_cwd=None):
"""Makes the given path(s) relative to new_cwd, or absolute if not specified.
If new_cwd is not specified, absolute paths are returned.
If old_cwd is not specified, constants.GetOutDirectory() is assumed.
"""
if not isinstance(path_or_list, basestring):
return [_RebasePath(p, new_cwd, old_cwd) for p in path_or_list]
if old_cwd is None:
old_cwd = constants.GetOutDirectory()
old_cwd = os.path.abspath(old_cwd)
if new_cwd:
new_cwd = os.path.abspath(new_cwd)
return os.path.relpath(os.path.join(old_cwd, path_or_list), new_cwd)
return os.path.abspath(os.path.join(old_cwd, path_or_list))
def _IsSubpathOf(child, parent):
"""Returns whether |child| is a subpath of |parent|."""
return not os.path.relpath(child, parent).startswith(os.pardir)
def _WriteFile(path, data):
"""Writes |data| to |path|, constucting parent directories if necessary."""
logging.info('Writing %s', path)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with codecs.open(path, 'w', 'utf-8') as output_file:
output_file.write(data)
def _RunNinja(output_dir, ninja_targets):
cmd = ['ninja', '-C', output_dir, '-j50']
cmd.extend(ninja_targets)
logging.info('Running: %r', cmd)
subprocess.check_call(cmd)
class _ProjectEntry(object):
"""Helper class for various path transformations."""
def __init__(self, gn_target):
assert gn_target.startswith('//'), gn_target
if ':' not in gn_target:
gn_target = '%s:%s' % (gn_target, os.path.basename(gn_target))
self._gn_target = gn_target
self._build_config = None
@classmethod
def FromBuildConfigPath(cls, path):
prefix = 'gen/'
suffix = '.build_config'
assert path.startswith(prefix) and path.endswith(suffix), path
subdir = path[len(prefix):-len(suffix)]
return cls('//%s:%s' % (os.path.split(subdir)))
def __hash__(self):
return hash(self._gn_target)
def __eq__(self, other):
return self._gn_target == other.GnTarget()
def GnTarget(self):
return self._gn_target
def NinjaTarget(self):
return self._gn_target[2:]
def GnBuildConfigTarget(self):
return '%s__build_config' % self._gn_target
def NinjaBuildConfigTarget(self):
return '%s__build_config' % self.NinjaTarget()
def GradleSubdir(self):
"""Returns the output subdirectory."""
return self.NinjaTarget().replace(':', os.path.sep)
def ProjectName(self):
"""Returns the Gradle project name."""
return self.GradleSubdir().replace(os.path.sep, '\\$')
def BuildConfig(self):
"""Reads and returns the project's .build_config JSON."""
if not self._build_config:
path = os.path.join('gen', self.GradleSubdir() + '.build_config')
self._build_config = build_utils.ReadJson(_RebasePath(path))
return self._build_config
def _ComputeJavaSourceDirs(java_files):
"""Returns the list of source directories for the given files."""
found_roots = set()
for path in java_files:
path_root = path
# Recognize these tokens as top-level.
while os.path.basename(path_root) not in ('javax', 'org', 'com', 'src'):
assert path_root, 'Failed to find source dir for ' + path
path_root = os.path.dirname(path_root)
# Assume that if we've hit "src", the we're at the root.
if os.path.basename(path_root) != 'src':
path_root = os.path.dirname(path_root)
found_roots.add(path_root)
return list(found_roots)
def _CreateSymlinkTree(entry_output_dir, symlink_dir, desired_files,
parent_dirs):
"""Creates a directory tree of symlinks to the given files.
The idea here is to replicate a directory tree while leaving out files within
it not listed by |desired_files|.
"""
assert _IsSubpathOf(symlink_dir, entry_output_dir)
if os.path.exists(symlink_dir):
shutil.rmtree(symlink_dir)
for target_path in desired_files:
prefix = next(d for d in parent_dirs if target_path.startswith(d))
subpath = os.path.relpath(target_path, prefix)
symlinked_path = os.path.join(symlink_dir, subpath)
symlinked_dir = os.path.dirname(symlinked_path)
if not os.path.exists(symlinked_dir):
os.makedirs(symlinked_dir)
relpath = os.path.relpath(target_path, symlinked_dir)
logging.debug('Creating symlink %s -> %s', symlinked_path, relpath)
os.symlink(relpath, symlinked_path)
def _CreateJavaSourceDir(entry_output_dir, java_sources_file):
"""Computes and constructs when necessary the list of java source directories.
1. Computes the root java source directories from the list of files.
2. Determines whether there are any .java files in them that are not included
in |java_sources_file|.
3. If not, returns the list of java source directories. If so, constructs a
tree of symlinks within |entry_output_dir| of all files in
|java_sources_file|.
"""
java_dirs = []
if java_sources_file:
java_files = _RebasePath(build_utils.ReadSourcesList(java_sources_file))
java_dirs = _ComputeJavaSourceDirs(java_files)
found_java_files = build_utils.FindInDirectories(java_dirs, '*.java')
unwanted_java_files = set(found_java_files) - set(java_files)
missing_java_files = set(java_files) - set(found_java_files)
if unwanted_java_files:
logging.debug('Target requires .java symlinks: %s', entry_output_dir)
symlink_dir = os.path.join(entry_output_dir, _JAVA_SUBDIR)
_CreateSymlinkTree(entry_output_dir, symlink_dir, java_files, java_dirs)
java_dirs = [symlink_dir]
if missing_java_files:
logging.warning('Some java files were not found: %s', missing_java_files)
return java_dirs
def _GenerateLocalProperties(sdk_dir):
"""Returns the data for project.properties as a string."""
return '\n'.join([
'# Generated by //build/android/gradle/generate_gradle.py',
'sdk.dir=%s' % sdk_dir,
''])
def _GenerateGradleFile(build_config, config_json, java_dirs, relativize,
use_gradle_process_resources):
"""Returns the data for a project's build.gradle."""
deps_info = build_config['deps_info']
gradle = build_config['gradle']
if deps_info['type'] == 'android_apk':
target_type = 'android_apk'
elif deps_info['type'] == 'java_library' and not deps_info['is_prebuilt']:
if deps_info['requires_android']:
target_type = 'android_library'
else:
target_type = 'java_library'
else:
return None
variables = {}
variables['template_type'] = target_type
variables['use_gradle_process_resources'] = use_gradle_process_resources
variables['build_tools_version'] = config_json['build_tools_version']
variables['compile_sdk_version'] = config_json['compile_sdk_version']
android_manifest = gradle.get('android_manifest',
_DEFAULT_ANDROID_MANIFEST_PATH)
variables['android_manifest'] = relativize(android_manifest)
variables['java_dirs'] = relativize(java_dirs)
variables['prebuilts'] = relativize(gradle['dependent_prebuilt_jars'])
deps = [_ProjectEntry.FromBuildConfigPath(p)
for p in gradle['dependent_android_projects']]
variables['android_project_deps'] = [d.ProjectName() for d in deps]
deps = [_ProjectEntry.FromBuildConfigPath(p)
for p in gradle['dependent_java_projects']]
variables['java_project_deps'] = [d.ProjectName() for d in deps]
processor = jinja_template.JinjaProcessor(host_paths.DIR_SOURCE_ROOT)
return processor.Render(_JINJA_TEMPLATE_PATH, variables)
def _GenerateRootGradle():
"""Returns the data for the root project's build.gradle."""
variables = {'template_type': 'root'}
processor = jinja_template.JinjaProcessor(host_paths.DIR_SOURCE_ROOT)
return processor.Render(_JINJA_TEMPLATE_PATH, variables)
def _GenerateSettingsGradle(project_entries):
"""Returns the data for settings.gradle."""
project_name = os.path.basename(os.path.dirname(host_paths.DIR_SOURCE_ROOT))
lines = []
lines.append('// Generated by //build/android/gradle/generate_gradle.py')
lines.append('rootProject.name = "%s"' % project_name)
lines.append('rootProject.projectDir = settingsDir')
lines.append('')
for entry in project_entries:
# Example target: android_webview:android_webview_java__build_config
lines.append('include ":%s"' % entry.ProjectName())
lines.append('project(":%s").projectDir = new File(settingsDir, "%s")' %
(entry.ProjectName(), entry.GradleSubdir()))
return '\n'.join(lines)
def _ExtractSrcjars(entry_output_dir, srcjar_tuples):
"""Extracts all srcjars to the directory given by the tuples."""
extracted_paths = set(s[1] for s in srcjar_tuples)
for extracted_path in extracted_paths:
assert _IsSubpathOf(extracted_path, entry_output_dir)
if os.path.exists(extracted_path):
shutil.rmtree(extracted_path)
for srcjar_path, extracted_path in srcjar_tuples:
logging.info('Extracting %s to %s', srcjar_path, extracted_path)
with zipfile.ZipFile(srcjar_path) as z:
z.extractall(extracted_path)
def _FindAllProjectEntries(main_entry):
"""Returns the list of all _ProjectEntry instances given the root project."""
found = set()
to_scan = [main_entry]
while to_scan:
cur_entry = to_scan.pop()
if cur_entry in found:
continue
found.add(cur_entry)
build_config = cur_entry.BuildConfig()
sub_config_paths = build_config['deps_info']['deps_configs']
to_scan.extend(
_ProjectEntry.FromBuildConfigPath(p) for p in sub_config_paths)
return list(found)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output-directory',
help='Path to the root build directory.')
parser.add_argument('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level')
parser.add_argument('--target',
help='GN target to generate project for.',
default='//chrome/android:chrome_public_apk')
parser.add_argument('--project-dir',
help='Root of the output project.',
default=os.path.join('$CHROMIUM_OUTPUT_DIR', 'gradle'))
parser.add_argument('--use-gradle-process-resources',
action='store_true',
help='Have gradle generate R.java rather than ninja')
args = parser.parse_args()
if args.output_directory:
constants.SetOutputDirectory(args.output_directory)
constants.CheckOutputDirectory()
output_dir = constants.GetOutDirectory()
devil_chromium.Initialize(output_directory=output_dir)
run_tests_helper.SetLogLevel(args.verbose_count)
gradle_output_dir = os.path.abspath(
args.project_dir.replace('$CHROMIUM_OUTPUT_DIR', output_dir))
logging.warning('Creating project at: %s', gradle_output_dir)
main_entry = _ProjectEntry(args.target)
logging.warning('Building .build_config files...')
_RunNinja(output_dir, [main_entry.NinjaBuildConfigTarget()])
all_entries = _FindAllProjectEntries(main_entry)
logging.info('Found %d dependent build_config targets.', len(all_entries))
config_json = build_utils.ReadJson(
os.path.join(output_dir, 'gradle', 'config.json'))
project_entries = []
srcjar_tuples = []
for entry in all_entries:
build_config = entry.BuildConfig()
if build_config['deps_info']['type'] not in ('android_apk', 'java_library'):
continue
entry_output_dir = os.path.join(gradle_output_dir, entry.GradleSubdir())
relativize = lambda x, d=entry_output_dir: _RebasePath(x, d)
srcjars = _RebasePath(build_config['gradle'].get('bundled_srcjars', []))
if not args.use_gradle_process_resources:
srcjars += _RebasePath(build_config['javac']['srcjars'])
java_sources_file = build_config['gradle'].get('java_sources_file')
if java_sources_file:
java_sources_file = _RebasePath(java_sources_file)
java_dirs = _CreateJavaSourceDir(entry_output_dir, java_sources_file)
if srcjars:
java_dirs.append(os.path.join(entry_output_dir, _SRCJARS_SUBDIR))
data = _GenerateGradleFile(build_config, config_json, java_dirs, relativize,
args.use_gradle_process_resources)
if data:
project_entries.append(entry)
srcjar_tuples.extend(
(s, os.path.join(entry_output_dir, _SRCJARS_SUBDIR)) for s in srcjars)
_WriteFile(os.path.join(entry_output_dir, 'build.gradle'), data)
_WriteFile(os.path.join(gradle_output_dir, 'build.gradle'),
_GenerateRootGradle())
_WriteFile(os.path.join(gradle_output_dir, 'settings.gradle'),
_GenerateSettingsGradle(project_entries))
sdk_path = _RebasePath(config_json['android_sdk_root'])
_WriteFile(os.path.join(gradle_output_dir, 'local.properties'),
_GenerateLocalProperties(sdk_path))
if srcjar_tuples:
logging.warning('Building all .srcjar files...')
targets = _RebasePath([s[0] for s in srcjar_tuples], output_dir)
_RunNinja(output_dir, targets)
_ExtractSrcjars(gradle_output_dir, srcjar_tuples)
logging.warning('Project created successfully!')
if __name__ == '__main__':
main()
|
|
# orm/interfaces.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines some key base classes prominent within the internals.
This module and the classes within are mostly private, though some attributes
are exposed when inspecting mappings.
"""
from __future__ import annotations
import collections
import typing
from typing import Any
from typing import cast
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from . import exc as orm_exc
from . import path_registry
from .base import _MappedAttribute as _MappedAttribute
from .base import EXT_CONTINUE as EXT_CONTINUE
from .base import EXT_SKIP as EXT_SKIP
from .base import EXT_STOP as EXT_STOP
from .base import InspectionAttr as InspectionAttr
from .base import InspectionAttrExtensionType as InspectionAttrExtensionType
from .base import InspectionAttrInfo as InspectionAttrInfo
from .base import MANYTOMANY as MANYTOMANY
from .base import MANYTOONE as MANYTOONE
from .base import NotExtension as NotExtension
from .base import ONETOMANY as ONETOMANY
from .base import SQLORMOperations
from .. import ColumnElement
from .. import inspect
from .. import inspection
from .. import util
from ..sql import operators
from ..sql import roles
from ..sql import visitors
from ..sql._typing import _ColumnsClauseElement
from ..sql.base import ExecutableOption
from ..sql.cache_key import HasCacheKey
from ..sql.elements import SQLCoreOperations
from ..sql.schema import Column
from ..sql.type_api import TypeEngine
from ..util.typing import TypedDict
if typing.TYPE_CHECKING:
from .decl_api import RegistryType
_T = TypeVar("_T", bound=Any)
class ORMStatementRole(roles.StatementRole):
__slots__ = ()
_role_name = (
"Executable SQL or text() construct, including ORM " "aware objects"
)
class ORMColumnsClauseRole(roles.ColumnsClauseRole):
__slots__ = ()
_role_name = "ORM mapped entity, aliased entity, or Column expression"
class ORMEntityColumnsClauseRole(ORMColumnsClauseRole):
__slots__ = ()
_role_name = "ORM mapped or aliased entity"
class ORMFromClauseRole(roles.StrictFromClauseRole):
__slots__ = ()
_role_name = "ORM mapped entity, aliased entity, or FROM expression"
class ORMColumnDescription(TypedDict):
name: str
type: Union[Type, TypeEngine]
aliased: bool
expr: _ColumnsClauseElement
entity: Optional[_ColumnsClauseElement]
class _IntrospectsAnnotations:
__slots__ = ()
def declarative_scan(
self,
registry: "RegistryType",
cls: type,
key: str,
annotation: Optional[type],
is_dataclass_field: Optional[bool],
) -> None:
"""Perform class-specific initializaton at early declarative scanning
time.
.. versionadded:: 2.0
"""
class _MapsColumns(_MappedAttribute[_T]):
"""interface for declarative-capable construct that delivers one or more
Column objects to the declarative process to be part of a Table.
"""
__slots__ = ()
@property
def mapper_property_to_assign(self) -> Optional["MapperProperty[_T]"]:
"""return a MapperProperty to be assigned to the declarative mapping"""
raise NotImplementedError()
@property
def columns_to_assign(self) -> List[Column]:
"""A list of Column objects that should be declaratively added to the
new Table object.
"""
raise NotImplementedError()
@inspection._self_inspects
class MapperProperty(
HasCacheKey, _MappedAttribute[_T], InspectionAttr, util.MemoizedSlots
):
"""Represent a particular class attribute mapped by :class:`_orm.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`_schema.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`_orm.relationship`,
represented in the mapping as an instance of
:class:`.Relationship`.
"""
__slots__ = (
"_configure_started",
"_configure_finished",
"parent",
"key",
"info",
)
_cache_key_traversal = [
("parent", visitors.ExtendedInternalTraversal.dp_has_cache_key),
("key", visitors.ExtendedInternalTraversal.dp_string),
]
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
The collection typically only applies to a Relationship.
"""
is_property = True
"""Part of the InspectionAttr interface; states this object is a
mapper property.
"""
comparator: PropComparator[_T]
"""The :class:`_orm.PropComparator` instance that implements SQL
expression construction on behalf of this mapped attribute."""
@property
def _links_to_entity(self):
"""True if this MapperProperty refers to a mapped entity.
Should only be True for Relationship, False for all others.
"""
raise NotImplementedError()
def _memoized_attr_info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`_orm.relationship`, or
:func:`.composite`
functions.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
def setup(self, context, query_entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
def create_row_processor(
self, context, query_entity, path, mapper, result, adapter, populators
):
"""Produce row processing functions and append to the given
set of populators lists.
"""
def cascade_iterator(
self, type_, state, dict_, visited_states, halt_on=None
):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
This method typically only applies to Relationship.
"""
return iter(())
def set_parent(self, parent, init):
"""Set the parent mapper that references this MapperProperty.
This method is overridden by some subclasses to perform extra
setup when the mapper is first known.
"""
self.parent = parent
def instrument_class(self, mapper):
"""Hook called by the Mapper to the property to initiate
instrumentation of the class attribute managed by this
MapperProperty.
The MapperProperty here will typically call out to the
attributes module to set up an InstrumentedAttribute.
This step is the first of two steps to set up an InstrumentedAttribute,
and is called early in the mapper setup process.
The second step is typically the init_class_attribute step,
called from StrategizedProperty via the post_instrument_class()
hook. This step assigns additional state to the InstrumentedAttribute
(specifically the "impl") which has been determined after the
MapperProperty has determined what kind of persistence
management it needs to do (e.g. scalar, object, collection, etc).
"""
def __init__(self):
self._configure_started = False
self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
The given Mapper is the Mapper invoking the operation, which
may not be the same Mapper as self.parent in an inheritance
scenario; however, Mapper will always at least be a sub-mapper of
self.parent.
This method is typically used by StrategizedProperty, which delegates
it to LoaderStrategy.init_class_attribute() to perform final setup
on the class-bound InstrumentedAttribute.
"""
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
"""
def __repr__(self):
return "<%s at 0x%x; %s>" % (
self.__class__.__name__,
id(self),
getattr(self, "key", "no key"),
)
@inspection._self_inspects
class PropComparator(
SQLORMOperations[_T], operators.ColumnOperators[SQLORMOperations]
):
r"""Defines SQL operations for ORM mapped attributes.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.Relationship`, and :class:`.Composite`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \
ColumnProperty,\
Composite,\
Relationship
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(Relationship.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(Composite.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
.. seealso::
:class:`.ColumnProperty.Comparator`
:class:`.Relationship.Comparator`
:class:`.Composite.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = "prop", "property", "_parententity", "_adapt_to_entity"
__visit_name__ = "orm_prop_comparator"
def __init__(
self,
prop,
parentmapper,
adapt_to_entity=None,
):
self.prop = self.property = prop
self._parententity = adapt_to_entity or parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def _bulk_update_tuples(self, value):
"""Receive a SQL expression that represents a value in the SET
clause of an UPDATE statement.
Return a tuple that can be passed to a :class:`_expression.Update`
construct.
"""
return [(self.__clause_element__(), value)]
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parententity, adapt_to_entity)
@property
def _parentmapper(self):
"""legacy; this is renamed to _parententity to be
compatible with QueryableAttribute."""
return inspect(self._parententity).mapper
@property
def _propagate_attrs(self):
# this suits the case in coercions where we don't actually
# call ``__clause_element__()`` but still need to get
# resolved._propagate_attrs. See #6558.
return util.immutabledict(
{
"compile_state_plugin": "orm",
"plugin_subject": self._parentmapper,
}
)
def _criterion_exists(
self, criterion: Optional[SQLCoreOperations[Any]] = None, **kwargs: Any
) -> ColumnElement[Any]:
return self.prop.comparator._criterion_exists(criterion, **kwargs)
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@property
def info(self):
return self.property.info
@staticmethod
def _any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def _has_op(left, other, **kwargs):
return left.has(other, **kwargs)
@staticmethod
def _of_type_op(a, class_):
return a.of_type(class_)
any_op = cast(operators.OperatorType, _any_op)
has_op = cast(operators.OperatorType, _has_op)
of_type_op = cast(operators.OperatorType, _of_type_op)
if typing.TYPE_CHECKING:
def operate(
self, op: operators.OperatorType, *other: Any, **kwargs: Any
) -> "SQLCoreOperations[Any]":
...
def reverse_operate(
self, op: operators.OperatorType, other: Any, **kwargs: Any
) -> "SQLCoreOperations[Any]":
...
def of_type(self, class_) -> "SQLORMOperations[_T]":
r"""Redefine this object in terms of a polymorphic subclass,
:func:`_orm.with_polymorphic` construct, or :func:`_orm.aliased`
construct.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
.. seealso::
:ref:`queryguide_join_onclause` - in the :ref:`queryguide_toplevel`
:ref:`inheritance_of_type`
"""
return self.operate(PropComparator.of_type_op, class_)
def and_(self, *criteria) -> "SQLORMOperations[_T]":
"""Add additional criteria to the ON clause that's represented by this
relationship attribute.
E.g.::
stmt = select(User).join(
User.addresses.and_(Address.email_address != 'foo')
)
stmt = select(User).options(
joinedload(User.addresses.and_(Address.email_address != 'foo'))
)
.. versionadded:: 1.4
.. seealso::
:ref:`orm_queryguide_join_on_augmented`
:ref:`loader_option_criteria`
:func:`.with_loader_criteria`
"""
return self.operate(operators.and_, *criteria)
def any(
self, criterion: Optional[SQLCoreOperations[Any]] = None, **kwargs
) -> ColumnElement[bool]:
r"""Return a SQL expression representing true if this element
references a member which meets the given criterion.
The usual implementation of ``any()`` is
:meth:`.Relationship.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(
self, criterion: Optional[SQLCoreOperations[Any]] = None, **kwargs
) -> ColumnElement[bool]:
r"""Return a SQL expression representing true if this element
references a member which meets the given criterion.
The usual implementation of ``has()`` is
:meth:`.Relationship.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty[_T]):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
The mechanics of StrategizedProperty are used for every Query
invocation for every mapped attribute participating in that Query,
to determine first how the attribute will be rendered in SQL
and secondly how the attribute will retrieve a value from a result
row and apply it to a mapped object. The routines here are very
performance-critical.
"""
__slots__ = (
"_strategies",
"strategy",
"_wildcard_token",
"_default_path_loader_key",
"strategy_key",
)
inherit_cache = True
strategy_wildcard_key = None
strategy_key: Tuple[Any, ...]
def _memoized_attr__wildcard_token(self):
return (
f"{self.strategy_wildcard_key}:{path_registry._WILDCARD_TOKEN}",
)
def _memoized_attr__default_path_loader_key(self):
return (
"loader",
(f"{self.strategy_wildcard_key}:{path_registry._DEFAULT_TOKEN}",),
)
def _get_context_loader(self, context, path):
load = None
search_path = path[self]
# search among: exact match, "attr.*", "default" strategy
# if any.
for path_key in (
search_path._loader_key,
search_path._wildcard_path_loader_key,
search_path._default_path_loader_key,
):
if path_key in context.attributes:
load = context.attributes[path_key]
break
# note that if strategy_options.Load is placing non-actionable
# objects in the context like defaultload(), we would
# need to continue the loop here if we got such an
# option as below.
# if load.strategy or load.local_opts:
# break
return load
def _get_strategy(self, key):
try:
return self._strategies[key]
except KeyError:
pass
# run outside to prevent transfer of exception context
cls = self._strategy_lookup(self, *key)
# this previously was setting self._strategies[cls], that's
# a bad idea; should use strategy key at all times because every
# strategy has multiple keys at this point
self._strategies[key] = strategy = cls(self, key)
return strategy
def setup(self, context, query_entity, path, adapter, **kwargs):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.setup_query(
context, query_entity, path, loader, adapter, **kwargs
)
def create_row_processor(
self, context, query_entity, path, mapper, result, adapter, populators
):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.create_row_processor(
context,
query_entity,
path,
loader,
mapper,
result,
adapter,
populators,
)
def do_init(self):
self._strategies = {}
self.strategy = self._get_strategy(self.strategy_key)
def post_instrument_class(self, mapper):
if (
not self.parent.non_primary
and not mapper.class_manager._attr_has_impl(self.key)
):
self.strategy.init_class_attribute(mapper)
_all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
# ensure each subclass of the strategy has its
# own _strategy_keys collection
if "_strategy_keys" not in dec_cls.__dict__:
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, requesting_property, *key):
requesting_property.parent._with_polymorphic_mappers
for prop_cls in cls.__mro__:
if prop_cls in cls._all_strategies:
strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
for property_type, strats in cls._all_strategies.items():
if key in strats:
intended_property_type = property_type
actual_strategy = strats[key]
break
else:
intended_property_type = None
actual_strategy = None
raise orm_exc.LoaderStrategyException(
cls,
requesting_property,
intended_property_type,
actual_strategy,
key,
)
class ORMOption(ExecutableOption):
"""Base class for option objects that are passed to ORM queries.
These options may be consumed by :meth:`.Query.options`,
:meth:`.Select.options`, or in a more general sense by any
:meth:`.Executable.options` method. They are interpreted at
statement compile time or execution time in modern use. The
deprecated :class:`.MapperOption` is consumed at ORM query construction
time.
.. versionadded:: 1.4
"""
__slots__ = ()
_is_legacy_option = False
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" SELECT statements that occur for relationship
lazy loaders as well as attribute load / refresh operations.
"""
_is_compile_state = False
_is_criteria_option = False
_is_strategy_option = False
class CompileStateOption(HasCacheKey, ORMOption):
"""base for :class:`.ORMOption` classes that affect the compilation of
a SQL query and therefore need to be part of the cache key.
.. note:: :class:`.CompileStateOption` is generally non-public and
should not be used as a base class for user-defined options; instead,
use :class:`.UserDefinedOption`, which is easier to use as it does not
interact with ORM compilation internals or caching.
:class:`.CompileStateOption` defines an internal attribute
``_is_compile_state=True`` which has the effect of the ORM compilation
routines for SELECT and other statements will call upon these options when
a SQL string is being compiled. As such, these classes implement
:class:`.HasCacheKey` and need to provide robust ``_cache_key_traversal``
structures.
The :class:`.CompileStateOption` class is used to implement the ORM
:class:`.LoaderOption` and :class:`.CriteriaOption` classes.
.. versionadded:: 1.4.28
"""
__slots__ = ()
_is_compile_state = True
def process_compile_state(self, compile_state):
"""Apply a modification to a given :class:`.CompileState`.
This method is part of the implementation of a particular
:class:`.CompileStateOption` and is only invoked internally
when an ORM query is compiled.
"""
def process_compile_state_replaced_entities(
self, compile_state, mapper_entities
):
"""Apply a modification to a given :class:`.CompileState`,
given entities that were replaced by with_only_columns() or
with_entities().
This method is part of the implementation of a particular
:class:`.CompileStateOption` and is only invoked internally
when an ORM query is compiled.
.. versionadded:: 1.4.19
"""
class LoaderOption(CompileStateOption):
"""Describe a loader modification to an ORM statement at compilation time.
.. versionadded:: 1.4
"""
__slots__ = ()
def process_compile_state_replaced_entities(
self, compile_state, mapper_entities
):
self.process_compile_state(compile_state)
class CriteriaOption(CompileStateOption):
"""Describe a WHERE criteria modification to an ORM statement at
compilation time.
.. versionadded:: 1.4
"""
__slots__ = ()
_is_criteria_option = True
def get_global_criteria(self, attributes):
"""update additional entity criteria options in the given
attributes dictionary.
"""
class UserDefinedOption(ORMOption):
"""Base class for a user-defined option that can be consumed from the
:meth:`.SessionEvents.do_orm_execute` event hook.
"""
__slots__ = ("payload",)
_is_legacy_option = False
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def __init__(self, payload=None):
self.payload = payload
@util.deprecated_cls(
"1.4",
"The :class:`.MapperOption class is deprecated and will be removed "
"in a future release. For "
"modifications to queries on a per-execution basis, use the "
":class:`.UserDefinedOption` class to establish state within a "
":class:`.Query` or other Core statement, then use the "
":meth:`.SessionEvents.before_orm_execute` hook to consume them.",
constructor=None,
)
class MapperOption(ORMOption):
"""Describe a modification to a Query"""
__slots__ = ()
_is_legacy_option = True
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def process_query(self, query):
"""Apply a modification to the given :class:`_query.Query`."""
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
This is typically applied during a lazy load or scalar refresh
operation to propagate options stated in the original Query to the
new Query being used for the load. It occurs for those options that
specify propagate_to_loaders=True.
"""
self.process_query(query)
class LoaderStrategy:
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = (
"parent_property",
"is_class_level",
"parent",
"key",
"strategy_key",
"strategy_opts",
)
def __init__(self, parent, strategy_key):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
self.strategy_key = strategy_key
self.strategy_opts = dict(strategy_key)
def init_class_attribute(self, mapper):
pass
def setup_query(
self, compile_state, query_entity, path, loadopt, adapter, **kwargs
):
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self):
return str(self.parent_property)
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import netaddr
from oslo_log import log
from oslo_serialization import jsonutils as json
import six
from tempest.common import compute
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import misc as misc_utils
from tempest.lib import exceptions as lib_exc
from tempest.services.network import resources as net_resources
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
# Clients (in alphabetical order)
cls.flavors_client = cls.manager.flavors_client
cls.compute_floating_ips_client = (
cls.manager.compute_floating_ips_client)
if CONF.service_available.glance:
# Glance image client v1
cls.image_client = cls.manager.image_client
# Compute image client
cls.compute_images_client = cls.manager.compute_images_client
cls.keypairs_client = cls.manager.keypairs_client
# Nova security groups client
cls.compute_security_groups_client = (
cls.manager.compute_security_groups_client)
cls.compute_security_group_rules_client = (
cls.manager.compute_security_group_rules_client)
cls.servers_client = cls.manager.servers_client
cls.interface_client = cls.manager.interfaces_client
# Neutron network client
cls.network_client = cls.manager.network_client
cls.networks_client = cls.manager.networks_client
cls.ports_client = cls.manager.ports_client
cls.routers_client = cls.manager.routers_client
cls.subnets_client = cls.manager.subnets_client
cls.floating_ips_client = cls.manager.floating_ips_client
cls.security_groups_client = cls.manager.security_groups_client
cls.security_group_rules_client = (
cls.manager.security_group_rules_client)
# Heat client
cls.orchestration_client = cls.manager.orchestration_client
if CONF.volume_feature_enabled.api_v1:
cls.volumes_client = cls.manager.volumes_client
cls.snapshots_client = cls.manager.snapshots_client
else:
cls.volumes_client = cls.manager.volumes_v2_client
cls.snapshots_client = cls.manager.snapshots_v2_client
# ## Methods to handle sync and async deletes
def setUp(self):
super(ScenarioTest, self).setUp()
self.cleanup_waits = []
# NOTE(mtreinish) This is safe to do in setUp instead of setUp class
# because scenario tests in the same test class should not share
# resources. If resources were shared between test cases then it
# should be a single scenario test instead of multiples.
# NOTE(yfried): this list is cleaned at the end of test_methods and
# not at the end of the class
self.addCleanup(self._wait_for_cleanups)
def delete_wrapper(self, delete_thing, *args, **kwargs):
"""Ignores NotFound exceptions for delete operations.
@param delete_thing: delete method of a resource. method will be
executed as delete_thing(*args, **kwargs)
"""
try:
# Tempest clients return dicts, so there is no common delete
# method available. Using a callable instead
delete_thing(*args, **kwargs)
except lib_exc.NotFound:
# If the resource is already missing, mission accomplished.
pass
def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
cleanup_callable, cleanup_args=None,
cleanup_kwargs=None, waiter_client=None):
"""Adds wait for async resource deletion at the end of cleanups
@param waiter_callable: callable to wait for the resource to delete
with the following waiter_client if specified.
@param thing_id: the id of the resource to be cleaned-up
@param thing_id_param: the name of the id param in the waiter
@param cleanup_callable: method to load pass to self.addCleanup with
the following *cleanup_args, **cleanup_kwargs.
usually a delete method.
"""
if cleanup_args is None:
cleanup_args = []
if cleanup_kwargs is None:
cleanup_kwargs = {}
self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
wait_dict = {
'waiter_callable': waiter_callable,
thing_id_param: thing_id
}
if waiter_client:
wait_dict['client'] = waiter_client
self.cleanup_waits.append(wait_dict)
def _wait_for_cleanups(self):
# To handle async delete actions, a list of waits is added
# which will be iterated over as the last step of clearing the
# cleanup queue. That way all the delete calls are made up front
# and the tests won't succeed unless the deletes are eventually
# successful. This is the same basic approach used in the api tests to
# limit cleanup execution time except here it is multi-resource,
# because of the nature of the scenario tests.
for wait in self.cleanup_waits:
waiter_callable = wait.pop('waiter_callable')
waiter_callable(**wait)
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
body = client.create_keypair(name=name)
self.addCleanup(client.delete_keypair, name)
return body['keypair']
def create_server(self, name=None, image_id=None, flavor=None,
validatable=False, wait_until=None,
wait_on_delete=True, clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
"""
# NOTE(jlanoux): As a first step, ssh checks in the scenario
# tests need to be run regardless of the run_validation and
# validatable parameters and thus until the ssh validation job
# becomes voting in CI. The test resources management and IP
# association are taken care of in the scenario tests.
# Therefore, the validatable parameter is set to false in all
# those tests. In this way create_server just return a standard
# server and the scenario tests always perform ssh checks.
# Needed for the cross_tenant_traffic test:
if clients is None:
clients = self.manager
vnic_type = CONF.network.port_vnic_type
# If vnic_type is configured create port for
# every network
if vnic_type:
ports = []
networks = []
create_port_body = {'binding:vnic_type': vnic_type,
'namestart': 'port-smoke'}
if kwargs:
# Convert security group names to security group ids
# to pass to create_port
if 'security_groups' in kwargs:
security_groups =\
clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
for s in security_groups])
sec_groups_names = [s['name'] for s in kwargs.pop(
'security_groups')]
security_groups_ids = [sec_dict[s]
for s in sec_groups_names]
if security_groups_ids:
create_port_body[
'security_groups'] = security_groups_ids
networks = kwargs.pop('networks')
# If there are no networks passed to us we look up
# for the tenant's private networks and create a port
# if there is only one private network. The same behaviour
# as we would expect when passing the call to the clients
# with no networks
if not networks:
networks = clients.networks_client.list_networks(
filters={'router:external': False})
self.assertEqual(1, len(networks),
"There is more than one"
" network for the tenant")
for net in networks:
net_id = net['uuid']
port = self._create_port(network_id=net_id,
client=clients.ports_client,
**create_port_body)
ports.append({'port': port.id})
if ports:
kwargs['networks'] = ports
self.ports = ports
tenant_network = self.get_tenant_network()
body, servers = compute.create_test_server(
clients,
tenant_network=tenant_network,
wait_until=wait_until,
name=name, flavor=flavor,
image_id=image_id, **kwargs)
# TODO(jlanoux) Move wait_on_delete in compute.py
if wait_on_delete:
self.addCleanup(waiters.wait_for_server_termination,
clients.servers_client,
body['id'])
self.addCleanup_with_wait(
waiter_callable=waiters.wait_for_server_termination,
thing_id=body['id'], thing_id_param='server_id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[clients.servers_client.delete_server, body['id']],
waiter_client=clients.servers_client)
server = clients.servers_client.show_server(body['id'])['server']
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
imageRef=None, volume_type=None, wait_on_delete=True):
if name is None:
name = data_utils.rand_name(self.__class__.__name__)
kwargs = {'display_name': name,
'snapshot_id': snapshot_id,
'imageRef': imageRef,
'volume_type': volume_type}
if size is not None:
kwargs.update({'size': size})
volume = self.volumes_client.create_volume(**kwargs)['volume']
if wait_on_delete:
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
self.addCleanup(self.delete_wrapper,
self.volumes_client.delete_volume, volume['id'])
else:
self.addCleanup_with_wait(
waiter_callable=self.volumes_client.wait_for_resource_deletion,
thing_id=volume['id'], thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.volumes_client.delete_volume, volume['id']])
# NOTE(e0ne): Cinder API v2 uses name instead of display_name
if 'display_name' in volume:
self.assertEqual(name, volume['display_name'])
else:
self.assertEqual(name, volume['name'])
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
if secgroup_id is None:
sgs = _client.list_security_groups()['security_groups']
for sg in sgs:
if sg['name'] == 'default':
secgroup_id = sg['id']
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = _client_rules.create_security_group_rule(
parent_group_id=secgroup_id, **ruleset)['security_group_rule']
rules.append(sg_rule)
return rules
def _create_security_group(self):
# Create security group
sg_name = data_utils.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self.compute_security_groups_client.create_security_group(
name=sg_name, description=sg_desc)['security_group']
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(
self.delete_wrapper,
self.compute_security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
self._create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def get_remote_client(self, ip_address, username=None, private_key=None):
"""Get a SSH client to a remote server
@param ip_address the server floating or fixed IP address to use
for ssh validation
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
@return a RemoteClient object
"""
if username is None:
username = CONF.validation.image_ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.validation.auth_method == 'keypair':
password = None
if private_key is None:
private_key = self.keypair['private_key']
else:
password = CONF.validation.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip_address, username,
pkey=private_key,
password=password)
try:
linux_client.validate_authentication()
except Exception as e:
message = ('Initializing SSH connection to %(ip)s failed. '
'Error: %(error)s' % {'ip': ip_address,
'error': e})
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
self._log_console_output()
raise
return linux_client
def _image_create(self, name, fmt, path,
disk_format=None, properties=None):
if properties is None:
properties = {}
name = data_utils.rand_name('%s-' % name)
image_file = open(path, 'rb')
self.addCleanup(image_file.close)
params = {
'name': name,
'container_format': fmt,
'disk_format': disk_format or fmt,
'is_public': 'False',
}
params['properties'] = properties
image = self.image_client.create_image(**params)['image']
self.addCleanup(self.image_client.delete_image, image['id'])
self.assertEqual("queued", image['status'])
self.image_client.update_image(image['id'], data=image_file)
return image['id']
def glance_image_create(self):
img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
img_container_format = CONF.scenario.img_container_format
img_disk_format = CONF.scenario.img_disk_format
img_properties = CONF.scenario.img_properties
LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
"properties: %s, ami: %s, ari: %s, aki: %s" %
(img_path, img_container_format, img_disk_format,
img_properties, ami_img_path, ari_img_path, aki_img_path))
try:
image = self._image_create('scenario-img',
img_container_format,
img_path,
disk_format=img_disk_format,
properties=img_properties)
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
image = self._image_create('scenario-ami', 'ami',
path=ami_img_path,
properties=properties)
LOG.debug("image:%s" % image)
return image
def _log_console_output(self, servers=None):
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
return
if not servers:
servers = self.servers_client.list_servers()
servers = servers['servers']
for server in servers:
console_output = self.servers_client.get_console_output(
server['id'])['output']
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
def _log_net_info(self, exc):
# network debug is called as part of ssh init
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
def create_server_snapshot(self, server, name=None):
# Glance client
_image_client = self.image_client
# Compute client
_images_client = self.compute_images_client
if name is None:
name = data_utils.rand_name('scenario-snapshot')
LOG.debug("Creating a snapshot image for server: %s", server['name'])
image = _images_client.create_image(server['id'], name=name)
image_id = image.response['location'].split('images/')[1]
_image_client.wait_for_image_status(image_id, 'active')
self.addCleanup_with_wait(
waiter_callable=_image_client.wait_for_resource_deletion,
thing_id=image_id, thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[_image_client.delete_image, image_id])
snapshot_image = _image_client.get_image_meta(image_id)
bdm = snapshot_image.get('properties', {}).get('block_device_mapping')
if bdm:
bdm = json.loads(bdm)
if bdm and 'snapshot_id' in bdm[0]:
snapshot_id = bdm[0]['snapshot_id']
self.addCleanup(
self.snapshots_client.wait_for_resource_deletion,
snapshot_id)
self.addCleanup(
self.delete_wrapper, self.snapshots_client.delete_snapshot,
snapshot_id)
self.snapshots_client.wait_for_snapshot_status(snapshot_id,
'available')
image_name = snapshot_image['name']
self.assertEqual(name, image_name)
LOG.debug("Created snapshot image %s for server %s",
image_name, server['name'])
return snapshot_image
def nova_volume_attach(self, server, volume_to_attach):
volume = self.servers_client.attach_volume(
server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
% CONF.compute.volume_device_name)['volumeAttachment']
self.assertEqual(volume_to_attach['id'], volume['id'])
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
# Return the updated volume after the attachment
return self.volumes_client.show_volume(volume['id'])['volume']
def nova_volume_detach(self, server, volume):
self.servers_client.detach_volume(server['id'], volume['id'])
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
volume = self.volumes_client.show_volume(volume['id'])['volume']
self.assertEqual('available', volume['status'])
def rebuild_server(self, server_id, image=None,
preserve_ephemeral=False, wait=True,
rebuild_kwargs=None):
if image is None:
image = CONF.compute.image_ref
rebuild_kwargs = rebuild_kwargs or {}
LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
server_id, image, preserve_ephemeral)
self.servers_client.rebuild_server(
server_id=server_id, image_ref=image,
preserve_ephemeral=preserve_ephemeral,
**rebuild_kwargs)
if wait:
waiters.wait_for_server_status(self.servers_client,
server_id, 'ACTIVE')
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None):
timeout = ping_timeout or CONF.validation.ping_timeout
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return (proc.returncode == 0) == should_succeed
caller = misc_utils.find_test_caller()
LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
' expected result is %(should_succeed)s' % {
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'should_succeed':
'reachable' if should_succeed else 'unreachable'
})
result = tempest.test.call_until_true(ping, timeout, 1)
LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
'ping result is %(result)s' % {
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'result': 'expected' if result else 'unexpected'
})
return result
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True):
"""Check server connectivity
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
self.get_remote_client(ip_address, username, private_key)
def check_public_network_connectivity(self, ip_address, username,
private_key, should_connect=True,
msg=None, servers=None):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
LOG.debug('checking network connections to IP %s with user: %s' %
(ip_address, username))
try:
self.check_vm_connectivity(ip_address,
username,
private_key,
should_connect=should_connect)
except Exception:
ex_msg = 'Public network connectivity check failed'
if msg:
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers)
raise
def create_floating_ip(self, thing, pool_name=None):
"""Create a floating IP and associates to a server on Nova"""
floating_ip = (self.compute_floating_ips_client.
create_floating_ip(pool=pool_name)['floating_ip'])
self.addCleanup(self.delete_wrapper,
self.compute_floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.compute_floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], thing['id'])
return floating_ip
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
if dev_name is not None:
ssh_client.make_fs(dev_name)
ssh_client.mount(dev_name, mount_path)
cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
ssh_client.exec_command(cmd_timestamp)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
% mount_path)
if dev_name is not None:
ssh_client.umount(mount_path)
return timestamp
def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
if dev_name is not None:
ssh_client.mount(dev_name, mount_path)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
% mount_path)
if dev_name is not None:
ssh_client.umount(mount_path)
return timestamp
def get_server_ip(self, server):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
"""
if CONF.validation.connect_method == 'floating':
# The tests calling this method don't have a floating IP
# and can't make use of the validattion resources. So the
# method is creating the floating IP there.
return self.create_floating_ip(server)['ip']
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
if address['version'] == CONF.validation.ip_version_for_ssh:
return address['addr']
raise exceptions.ServerUnreachable()
else:
raise exceptions.InvalidConfiguration()
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
This class provide helpers for network scenario tests, using the neutron
API. Helpers from ancestor which use the nova network API are overridden
with the neutron API.
This Class also enforces using Neutron instead of novanetwork.
Subclassed tests will be skipped if Neutron is not enabled
"""
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(NetworkScenarioTest, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException('Neutron not available')
@classmethod
def resource_setup(cls):
super(NetworkScenarioTest, cls).resource_setup()
cls.tenant_id = cls.manager.identity_client.tenant_id
def _create_network(self, client=None, networks_client=None,
routers_client=None, tenant_id=None,
namestart='network-smoke-'):
if not client:
client = self.network_client
if not networks_client:
networks_client = self.networks_client
if not routers_client:
routers_client = self.routers_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = networks_client.create_network(name=name, tenant_id=tenant_id)
network = net_resources.DeletableNetwork(
networks_client=networks_client, routers_client=routers_client,
**result['network'])
self.assertEqual(network.name, name)
self.addCleanup(self.delete_wrapper, network.delete)
return network
def _list_networks(self, *args, **kwargs):
"""List networks using admin creds """
networks_list = self.admin_manager.networks_client.list_networks(
*args, **kwargs)
return networks_list['networks']
def _list_subnets(self, *args, **kwargs):
"""List subnets using admin creds """
subnets_list = self.admin_manager.subnets_client.list_subnets(
*args, **kwargs)
return subnets_list['subnets']
def _list_routers(self, *args, **kwargs):
"""List routers using admin creds """
routers_list = self.admin_manager.routers_client.list_routers(
*args, **kwargs)
return routers_list['routers']
def _list_ports(self, *args, **kwargs):
"""List ports using admin creds """
ports_list = self.admin_manager.ports_client.list_ports(
*args, **kwargs)
return ports_list['ports']
def _list_agents(self, *args, **kwargs):
"""List agents using admin creds """
agents_list = self.admin_manager.network_agents_client.list_agents(
*args, **kwargs)
return agents_list['agents']
def _create_subnet(self, network, client=None, subnets_client=None,
routers_client=None, namestart='subnet-smoke',
**kwargs):
"""Create a subnet for the given network
within the cidr block configured for tenant networks.
"""
if not client:
client = self.network_client
if not subnets_client:
subnets_client = self.subnets_client
if not routers_client:
routers_client = self.routers_client
def cidr_in_use(cidr, tenant_id):
"""Check cidr existence
:returns: True if subnet with cidr already exist in tenant
False else
"""
cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
return len(cidr_in_use) != 0
ip_version = kwargs.pop('ip_version', 4)
if ip_version == 6:
tenant_cidr = netaddr.IPNetwork(
CONF.network.tenant_network_v6_cidr)
num_bits = CONF.network.tenant_network_v6_mask_bits
else:
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
num_bits = CONF.network.tenant_network_mask_bits
result = None
str_cidr = None
# Repeatedly attempt subnet creation with sequential cidr
# blocks until an unallocated block is found.
for subnet_cidr in tenant_cidr.subnet(num_bits):
str_cidr = str(subnet_cidr)
if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
continue
subnet = dict(
name=data_utils.rand_name(namestart),
network_id=network.id,
tenant_id=network.tenant_id,
cidr=str_cidr,
ip_version=ip_version,
**kwargs
)
try:
result = subnets_client.create_subnet(**subnet)
break
except lib_exc.Conflict as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
subnet = net_resources.DeletableSubnet(
network_client=client, subnets_client=subnets_client,
routers_client=routers_client, **result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
self.addCleanup(self.delete_wrapper, subnet.delete)
return subnet
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.ports_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_resources.DeletablePort(ports_client=client,
**result['port'])
self.addCleanup(self.delete_wrapper, port.delete)
return port
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
ports = self._list_ports(device_id=server['id'], fixed_ip=ip_addr)
# A port can have more then one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
port_map = [(p["id"], fxip["ip_address"])
for p in ports
for fxip in p["fixed_ips"]
if netaddr.valid_ipv4(fxip["ip_address"])
and p['status'] == 'ACTIVE']
inactive = [p for p in ports if p['status'] != 'ACTIVE']
if inactive:
LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
self.assertNotEqual(0, len(port_map),
"No IPv4 addresses found in: %s" % ports)
self.assertEqual(len(port_map), 1,
"Found multiple IPv4 addresses: %s. "
"Unable to determine which port to target."
% port_map)
return port_map[0]
def _get_network_by_name(self, network_name):
net = self._list_networks(name=network_name)
self.assertNotEqual(len(net), 0,
"Unable to get network by name: %s" % network_name)
return net_resources.AttributeDict(net[0])
def create_floating_ip(self, thing, external_network_id=None,
port_id=None, client=None):
"""Create a floating IP and associates to a resource/port on Neutron"""
if not external_network_id:
external_network_id = CONF.network.public_network_id
if not client:
client = self.floating_ips_client
if not port_id:
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
else:
ip4 = None
result = client.create_floatingip(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing['tenant_id'],
fixed_ip_address=ip4
)
floating_ip = net_resources.DeletableFloatingIp(
client=client,
**result['floatingip'])
self.addCleanup(self.delete_wrapper, floating_ip.delete)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
port_id, _ = self._get_server_port_id_and_ip4(server)
floating_ip.update(port_id=port_id)
self.assertEqual(port_id, floating_ip.port_id)
return floating_ip
def _disassociate_floating_ip(self, floating_ip):
""":param floating_ip: type DeletableFloatingIp"""
floating_ip.update(port_id=None)
self.assertIsNone(floating_ip.port_id)
return floating_ip
def check_floating_ip_status(self, floating_ip, status):
"""Verifies floatingip reaches the given status
:param floating_ip: net_resources.DeletableFloatingIp floating IP to
to check status
:param status: target status
:raises: AssertionError if status doesn't match
"""
def refresh():
floating_ip.refresh()
return status == floating_ip.status
tempest.test.call_until_true(refresh,
CONF.network.build_timeout,
CONF.network.build_interval)
self.assertEqual(status, floating_ip.status,
message="FloatingIP: {fp} is at status: {cst}. "
"failed to reach status: {st}"
.format(fp=floating_ip, cst=floating_ip.status,
st=status))
LOG.info("FloatingIP: {fp} is at status: {st}"
.format(fp=floating_ip, st=status))
def _check_tenant_network_connectivity(self, server,
username,
private_key,
should_connect=True,
servers_for_debug=None):
if not CONF.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
try:
for net_name, ip_addresses in six.iteritems(server['addresses']):
for ip_address in ip_addresses:
self.check_vm_connectivity(ip_address['addr'],
username,
private_key,
should_connect=should_connect)
except Exception as e:
LOG.exception('Tenant network connectivity check failed')
self._log_console_output(servers_for_debug)
self._log_net_info(e)
raise
def _check_remote_connectivity(self, source, dest, should_succeed=True,
nic=None):
"""check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
:param dest: and IP to ping against
:param should_succeed: boolean should ping succeed or not
:param nic: specific network interface to ping from
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
def ping_remote():
try:
source.ping_host(dest, nic=nic)
except lib_exc.SSHExecCommandFailed:
LOG.warning('Failed to ping IP: %s via a ssh connection '
'from: %s.' % (dest, source.ssh_client.host))
return not should_succeed
return should_succeed
return tempest.test.call_until_true(ping_remote,
CONF.validation.ping_timeout,
1)
def _create_security_group(self, security_group_rules_client=None,
tenant_id=None,
namestart='secgroup-smoke',
security_groups_client=None):
if security_group_rules_client is None:
security_group_rules_client = self.security_group_rules_client
if security_groups_client is None:
security_groups_client = self.security_groups_client
if tenant_id is None:
tenant_id = security_groups_client.tenant_id
secgroup = self._create_empty_security_group(
namestart=namestart, client=security_groups_client,
tenant_id=tenant_id)
# Add rules to the security group
rules = self._create_loginable_secgroup_rule(
security_group_rules_client=security_group_rules_client,
secgroup=secgroup,
security_groups_client=security_groups_client)
for rule in rules:
self.assertEqual(tenant_id, rule.tenant_id)
self.assertEqual(secgroup.id, rule.security_group_id)
return secgroup
def _create_empty_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
"""Create a security group without rules.
Default rules will be created:
- IPv4 egress to any
- IPv6 egress to any
:param tenant_id: secgroup will be created in this tenant
:returns: DeletableSecurityGroup -- containing the secgroup created
"""
if client is None:
client = self.security_groups_client
if not tenant_id:
tenant_id = client.tenant_id
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
description=sg_desc)
sg_dict['tenant_id'] = tenant_id
result = client.create_security_group(**sg_dict)
secgroup = net_resources.DeletableSecurityGroup(
client=client, routers_client=self.routers_client,
**result['security_group']
)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(tenant_id, secgroup.tenant_id)
self.assertEqual(secgroup.description, sg_desc)
self.addCleanup(self.delete_wrapper, secgroup.delete)
return secgroup
def _default_security_group(self, client=None, tenant_id=None):
"""Get default secgroup for given tenant_id.
:returns: DeletableSecurityGroup -- default secgroup for given tenant
"""
if client is None:
client = self.security_groups_client
if not tenant_id:
tenant_id = client.tenant_id
sgs = [
sg for sg in client.list_security_groups().values()[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
]
msg = "No default security group for tenant %s." % (tenant_id)
self.assertTrue(len(sgs) > 0, msg)
return net_resources.DeletableSecurityGroup(client=client,
**sgs[0])
def _create_security_group_rule(self, secgroup=None,
sec_group_rules_client=None,
tenant_id=None,
security_groups_client=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
default secgroup in tenant_id.
:param secgroup: type DeletableSecurityGroup.
:param tenant_id: if secgroup not passed -- the tenant in which to
search for default secgroup
:param kwargs: a dictionary containing rule parameters:
for example, to allow incoming ssh:
rule = {
direction: 'ingress'
protocol:'tcp',
port_range_min: 22,
port_range_max: 22
}
"""
if sec_group_rules_client is None:
sec_group_rules_client = self.security_group_rules_client
if security_groups_client is None:
security_groups_client = self.security_groups_client
if not tenant_id:
tenant_id = security_groups_client.tenant_id
if secgroup is None:
secgroup = self._default_security_group(
client=security_groups_client, tenant_id=tenant_id)
ruleset = dict(security_group_id=secgroup.id,
tenant_id=secgroup.tenant_id)
ruleset.update(kwargs)
sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
sg_rule = net_resources.DeletableSecurityGroupRule(
client=sec_group_rules_client,
**sg_rule['security_group_rule']
)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
return sg_rule
def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
secgroup=None,
security_groups_client=None):
"""Create loginable security group rule
These rules are intended to permit inbound ssh and icmp
traffic from all sources, so no group_id is provided.
Setting a group_id would only permit traffic from ports
belonging to the same security group.
"""
if security_group_rules_client is None:
security_group_rules_client = self.security_group_rules_client
if security_groups_client is None:
security_groups_client = self.security_groups_client
rules = []
rulesets = [
dict(
# ssh
protocol='tcp',
port_range_min=22,
port_range_max=22,
),
dict(
# ping
protocol='icmp',
),
dict(
# ipv6-icmp for ping6
protocol='icmp',
ethertype='IPv6',
)
]
sec_group_rules_client = security_group_rules_client
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
sg_rule = self._create_security_group_rule(
sec_group_rules_client=sec_group_rules_client,
secgroup=secgroup,
security_groups_client=security_groups_client,
**ruleset)
except lib_exc.Conflict as ex:
# if rule already exist - skip rule and continue
msg = 'Security group rule already exists'
if msg not in ex._error_string:
raise ex
else:
self.assertEqual(r_direction, sg_rule.direction)
rules.append(sg_rule)
return rules
def _get_router(self, client=None, tenant_id=None):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
if not client:
client = self.routers_client
if not tenant_id:
tenant_id = client.tenant_id
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
body = client.show_router(router_id)
return net_resources.AttributeDict(**body['router'])
elif network_id:
router = self._create_router(client, tenant_id)
router.set_gateway(network_id)
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
client = self.routers_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = client.create_router(name=name,
admin_state_up=True,
tenant_id=tenant_id)
router = net_resources.DeletableRouter(routers_client=client,
**result['router'])
self.assertEqual(router.name, name)
self.addCleanup(self.delete_wrapper, router.delete)
return router
def _update_router_admin_state(self, router, admin_state_up):
router.update(admin_state_up=admin_state_up)
self.assertEqual(admin_state_up, router.admin_state_up)
def create_networks(self, client=None, networks_client=None,
routers_client=None, subnets_client=None,
tenant_id=None, dns_nameservers=None):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
on the same shared network.
:param client: network client to create resources with.
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
:returns: network, subnet, router
"""
if CONF.baremetal.driver_enabled:
# NOTE(Shrews): This exception is for environments where tenant
# credential isolation is available, but network separation is
# not (the current baremetal case). Likely can be removed when
# test account mgmt is reworked:
# https://blueprints.launchpad.net/tempest/+spec/test-accounts
if not CONF.compute.fixed_network_name:
m = 'fixed_network_name must be specified in config'
raise exceptions.InvalidConfiguration(m)
network = self._get_network_by_name(
CONF.compute.fixed_network_name)
router = None
subnet = None
else:
network = self._create_network(
client=client, networks_client=networks_client,
tenant_id=tenant_id)
router = self._get_router(client=routers_client,
tenant_id=tenant_id)
subnet_kwargs = dict(network=network, client=client,
subnets_client=subnets_client,
routers_client=routers_client)
# use explicit check because empty list is a valid option
if dns_nameservers is not None:
subnet_kwargs['dns_nameservers'] = dns_nameservers
subnet = self._create_subnet(**subnet_kwargs)
subnet.add_to_router(router.id)
return network, subnet, router
# power/provision states as of icehouse
class BaremetalPowerStates(object):
"""Possible power states of an Ironic node."""
POWER_ON = 'power on'
POWER_OFF = 'power off'
REBOOT = 'rebooting'
SUSPEND = 'suspended'
class BaremetalProvisionStates(object):
"""Possible provision states of an Ironic node."""
NOSTATE = None
INIT = 'initializing'
ACTIVE = 'active'
BUILDING = 'building'
DEPLOYWAIT = 'wait call-back'
DEPLOYING = 'deploying'
DEPLOYFAIL = 'deploy failed'
DEPLOYDONE = 'deploy complete'
DELETING = 'deleting'
DELETED = 'deleted'
ERROR = 'error'
class BaremetalScenarioTest(ScenarioTest):
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(BaremetalScenarioTest, cls).skip_checks()
if (not CONF.service_available.ironic or
not CONF.baremetal.driver_enabled):
msg = 'Ironic not available or Ironic compute driver not enabled'
raise cls.skipException(msg)
@classmethod
def setup_clients(cls):
super(BaremetalScenarioTest, cls).setup_clients()
cls.baremetal_client = cls.admin_manager.baremetal_client
@classmethod
def resource_setup(cls):
super(BaremetalScenarioTest, cls).resource_setup()
# allow any issues obtaining the node list to raise early
cls.baremetal_client.list_nodes()
def _node_state_timeout(self, node_id, state_attr,
target_states, timeout=10, interval=1):
if not isinstance(target_states, list):
target_states = [target_states]
def check_state():
node = self.get_node(node_id=node_id)
if node.get(state_attr) in target_states:
return True
return False
if not tempest.test.call_until_true(
check_state, timeout, interval):
msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
(node_id, state_attr, target_states))
raise exceptions.TimeoutException(msg)
def wait_provisioning_state(self, node_id, state, timeout):
self._node_state_timeout(
node_id=node_id, state_attr='provision_state',
target_states=state, timeout=timeout)
def wait_power_state(self, node_id, state):
self._node_state_timeout(
node_id=node_id, state_attr='power_state',
target_states=state, timeout=CONF.baremetal.power_timeout)
def wait_node(self, instance_id):
"""Waits for a node to be associated with instance_id."""
def _get_node():
node = None
try:
node = self.get_node(instance_id=instance_id)
except lib_exc.NotFound:
pass
return node is not None
if not tempest.test.call_until_true(
_get_node, CONF.baremetal.association_timeout, 1):
msg = ('Timed out waiting to get Ironic node by instance id %s'
% instance_id)
raise exceptions.TimeoutException(msg)
def get_node(self, node_id=None, instance_id=None):
if node_id:
_, body = self.baremetal_client.show_node(node_id)
return body
elif instance_id:
_, body = self.baremetal_client.show_node_by_instance_uuid(
instance_id)
if body['nodes']:
return body['nodes'][0]
def get_ports(self, node_uuid):
ports = []
_, body = self.baremetal_client.list_node_ports(node_uuid)
for port in body['ports']:
_, p = self.baremetal_client.show_port(port['uuid'])
ports.append(p)
return ports
def add_keypair(self):
self.keypair = self.create_keypair()
def boot_instance(self):
self.instance = self.create_server(
key_name=self.keypair['name'])
self.wait_node(self.instance['id'])
self.node = self.get_node(instance_id=self.instance['id'])
self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
self.wait_provisioning_state(
self.node['uuid'],
[BaremetalProvisionStates.DEPLOYWAIT,
BaremetalProvisionStates.ACTIVE],
timeout=15)
self.wait_provisioning_state(self.node['uuid'],
BaremetalProvisionStates.ACTIVE,
timeout=CONF.baremetal.active_timeout)
waiters.wait_for_server_status(self.servers_client,
self.instance['id'], 'ACTIVE')
self.node = self.get_node(instance_id=self.instance['id'])
self.instance = (self.servers_client.show_server(self.instance['id'])
['server'])
def terminate_instance(self):
self.servers_client.delete_server(self.instance['id'])
self.wait_power_state(self.node['uuid'],
BaremetalPowerStates.POWER_OFF)
self.wait_provisioning_state(
self.node['uuid'],
BaremetalProvisionStates.NOSTATE,
timeout=CONF.baremetal.unprovision_timeout)
class EncryptionScenarioTest(ScenarioTest):
"""Base class for encryption scenario tests"""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(EncryptionScenarioTest, cls).setup_clients()
if CONF.volume_feature_enabled.api_v1:
cls.admin_volume_types_client = cls.os_adm.volume_types_client
else:
cls.admin_volume_types_client = cls.os_adm.volume_types_v2_client
def create_volume_type(self, client=None, name=None):
if not client:
client = self.admin_volume_types_client
if not name:
name = 'generic'
randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: %s", randomized_name)
body = client.create_volume_type(
name=randomized_name)['volume_type']
self.assertIn('id', body)
self.addCleanup(client.delete_volume_type, body['id'])
return body
def create_encryption_type(self, client=None, type_id=None, provider=None,
key_size=None, cipher=None,
control_location=None):
if not client:
client = self.admin_volume_types_client
if not type_id:
volume_type = self.create_volume_type()
type_id = volume_type['id']
LOG.debug("Creating an encryption type for volume type: %s", type_id)
client.create_encryption_type(
type_id, provider=provider, key_size=key_size, cipher=cipher,
control_location=control_location)['encryption']
class ObjectStorageScenarioTest(ScenarioTest):
"""Provide harness to do Object Storage scenario tests.
Subclasses implement the tests that use the methods provided by this
class.
"""
@classmethod
def skip_checks(cls):
super(ObjectStorageScenarioTest, cls).skip_checks()
if not CONF.service_available.swift:
skip_msg = ("%s skipped as swift is not available" %
cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(ObjectStorageScenarioTest, cls).setup_credentials()
operator_role = CONF.object_storage.operator_role
cls.os_operator = cls.get_client_manager(roles=[operator_role])
@classmethod
def setup_clients(cls):
super(ObjectStorageScenarioTest, cls).setup_clients()
# Clients for Swift
cls.account_client = cls.os_operator.account_client
cls.container_client = cls.os_operator.container_client
cls.object_client = cls.os_operator.object_client
def get_swift_stat(self):
"""get swift status for our user account."""
self.account_client.list_account_containers()
LOG.debug('Swift status information obtained successfully')
def create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
self.container_client.create_container(name)
# look for the container to assure it is created
self.list_and_check_container_objects(name)
LOG.debug('Container %s created' % (name))
self.addCleanup(self.delete_wrapper,
self.container_client.delete_container,
name)
return name
def delete_container(self, container_name):
self.container_client.delete_container(container_name)
LOG.debug('Container %s deleted' % (container_name))
def upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
obj_data = data_utils.arbitrary_string()
self.object_client.create_object(container_name, obj_name, obj_data)
self.addCleanup(self.delete_wrapper,
self.object_client.delete_object,
container_name,
obj_name)
return obj_name, obj_data
def delete_object(self, container_name, filename):
self.object_client.delete_object(container_name, filename)
self.list_and_check_container_objects(container_name,
not_present_obj=[filename])
def list_and_check_container_objects(self, container_name,
present_obj=None,
not_present_obj=None):
# List objects for a given container and assert which are present and
# which are not.
if present_obj is None:
present_obj = []
if not_present_obj is None:
not_present_obj = []
_, object_list = self.container_client.list_container_contents(
container_name)
if present_obj:
for obj in present_obj:
self.assertIn(obj, object_list)
if not_present_obj:
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
def change_container_acl(self, container_name, acl):
metadata_param = {'metadata_prefix': 'x-container-',
'metadata': {'read': acl}}
self.container_client.update_container_metadata(container_name,
**metadata_param)
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertEqual(resp['x-container-read'], acl)
def download_and_verify(self, container_name, obj_name, expected_data):
_, obj = self.object_client.get_object(container_name, obj_name)
self.assertEqual(obj, expected_data)
|
|
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class CompleteDisputedTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def run_test(self):
alice = self.nodes[0]
bob = self.nodes[1]
charlie = self.nodes[2]
# generate some coins and send them to bob
generated_coins = 10
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: Address endpoint not found")
else:
raise TestFailure("CompleteDisputedTest - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, generated_coins)
time.sleep(20)
# create a profile for charlie
pro = {"name": "Charlie"}
api_url = charlie["gateway_url"] + "ob/profile"
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: Profile post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDisputedTest - FAIL: Profile POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# make charlie a moderator
with open('testdata/moderation.json') as listing_file:
moderation_json = json.load(listing_file, object_pairs_hook=OrderedDict)
api_url = charlie["gateway_url"] + "ob/moderator"
r = requests.put(api_url, data=json.dumps(moderation_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: Moderator post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDisputedTest - FAIL: Moderator POST failed. Reason: %s", resp["reason"])
moderatorId = charlie["peerId"]
time.sleep(4)
# post profile for alice
with open('testdata/profile.json') as profile_file:
profile_json = json.load(profile_file, object_pairs_hook=OrderedDict)
api_url = alice["gateway_url"] + "ob/profile"
requests.post(api_url, data=json.dumps(profile_json, indent=4))
# post listing to alice
with open('testdata/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
if self.bitcoincash:
listing_json["metadata"]["pricingCurrency"] = "tbch"
listing_json["moderators"] = [moderatorId]
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDisputedTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
slug = resp["slug"]
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ob/listings/" + alice["peerId"]
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["moderator"] = moderatorId
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
self.print_logs(alice, "ob.log")
raise TestFailure("CompleteDisputedTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("CompleteDisputedTest - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("CompleteDisputedTest - FAIL: Bob incorrectly saved as funded")
# check the sale saved correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("CompleteDisputedTest - FAIL: Alice purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("CompleteDisputedTest - FAIL: Alice incorrectly saved as funded")
# fund order
spend = {
"address": payment_address,
"amount": payment_amount,
"feeLevel": "NORMAL"
}
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDisputedTest - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("CompleteDisputedTest - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("CompleteDisputedTest - FAIL: Bob incorrectly saved as unfunded")
# check alice detected payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("CompleteDisputedTest - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("CompleteDisputedTest - FAIL: Alice incorrectly saved as unfunded")
# alice send order fulfillment
with open('testdata/fulfillment.json') as fulfillment_file:
fulfillment_json = json.load(fulfillment_file, object_pairs_hook=OrderedDict)
fulfillment_json["orderId"] = orderId
fulfillment_json["slug"] = slug
api_url = alice["gateway_url"] + "ob/orderfulfillment"
r = requests.post(api_url, data=json.dumps(fulfillment_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: Fulfillment post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDisputedTest - FAIL: Fulfillment POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# Bob open dispute
dispute = {
"orderId": orderId,
"claim": "Bastard ripped me off"
}
api_url = bob["gateway_url"] + "ob/opendispute/"
r = requests.post(api_url, data=json.dumps(dispute, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: OpenDispute post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDisputedTest - FAIL: OpenDispute POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# Bob check dispute opened correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "DISPUTED":
raise TestFailure("CompleteDisputedTest - FAIL: Bob failed to detect his dispute")
# Alice check dispute opened correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "DISPUTED":
raise TestFailure("CompleteDisputedTest - FAIL: Alice failed to detect the dispute")
# Charlie check dispute opened correctly
api_url = charlie["gateway_url"] + "ob/case/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load case from Clarlie")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if resp["state"] != "DISPUTED":
raise TestFailure("CompleteDisputedTest - FAIL: Charlie failed to detect the dispute")
# Charlie close dispute
dispute_resolution = {
"OrderID": orderId,
"Resolution": "I'm siding with Bob",
"BuyerPercentage": 100,
"VendorPercentage": 0
}
api_url = charlie["gateway_url"] + "ob/closedispute/"
r = requests.post(api_url, data=json.dumps(dispute_resolution, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: CloseDispute post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDisputedTest - FAIL: CloseDispute POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# Alice check dispute closed correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "DECIDED":
self.print_logs(alice, "ob.log")
raise TestFailure("CompleteDisputedTest - FAIL: Alice failed to detect the dispute resolution")
# Bob check dispute closed correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if resp["state"] != "DECIDED":
raise TestFailure("CompleteDisputedTest - FAIL: Bob failed to detect the dispute resolution")
# Charlie check dispute closed correctly
api_url = charlie["gateway_url"] + "ob/case/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load case from Charlie")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if resp["state"] != "RESOLVED":
raise TestFailure("CompleteDisputedTest - FAIL: Charlie failed to detect the dispute resolution")
# Bob release funds
release = {
"OrderID": orderId,
}
api_url = bob["gateway_url"] + "ob/releasefunds/"
r = requests.post(api_url, data=json.dumps(release, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: ReleaseFunds post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDisputedTest - FAIL: ReleaseFunds POST failed. Reason: %s", resp["reason"])
time.sleep(20)
self.send_bitcoin_cmd("generate", 1)
time.sleep(2)
# Check bob received payout
api_url = bob["gateway_url"] + "wallet/balance"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
#unconfirmed = int(resp["unconfirmed"])
if confirmed <= (generated_coins*100000000) - payment_amount:
raise TestFailure("CompleteDisputedTest - FAIL: Bob failed to detect dispute payout")
elif r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: Receive coins endpoint not found")
else:
raise TestFailure("CompleteDisputedTest - FAIL: Unknown response")
# Bob check payout transaction recorded
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if len(resp["paymentAddressTransactions"]) != 2:
raise TestFailure("CompleteDisputedTest - FAIL: Bob failed to record payout transaction")
if resp["state"] != "RESOLVED":
raise TestFailure("CompleteDisputedTest - FAIL: Bob failed to set state to RESOLVED")
# Alice check payout transaction recorded
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text, object_pairs_hook=OrderedDict)
if len(resp["paymentAddressTransactions"]) != 2:
raise TestFailure("CompleteDisputedTest - FAIL: Alice failed to record payout transaction")
if resp["state"] != "RESOLVED":
raise TestFailure("CompleteDisputedTest - FAIL: Alice failed to set state to RESOLVED")
# bob send order completion
oc = {
"orderId": orderId,
"ratings": [
{
"slug": slug,
"overall": 4,
"quality": 5,
"description": 5,
"customerService": 4,
"deliverySpeed": 3,
"Review": "I love it!"
}
]
}
api_url = bob["gateway_url"] + "ob/ordercompletion"
r = requests.post(api_url, data=json.dumps(oc, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDisputedTest - FAIL: Completion post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDisputedTest - FAIL: Completion POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# check alice received completion
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "COMPLETED":
raise TestFailure("CompleteDisputedTest - FAIL: Alice failed to detect order completion")
# check bob set completion correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDisputedTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "COMPLETED":
raise TestFailure("CompleteDisputedTest - FAIL: Bob failed to order completion")
print("CompleteDisputedTest - PASS")
if __name__ == '__main__':
print("Running CompleteDisputedTest")
CompleteDisputedTest().main(["--regtest", "--disableexchangerates"])
|
|
#!/usr/bin/env python
import os
import rlp
import utils
import copy
bin_to_nibbles_cache = {}
hti = {}
for i, c in enumerate('0123456789abcdef'):
hti[c] = i
def bin_to_nibbles(s):
"""convert string s to nibbles (half-bytes)
>>> bin_to_nibbles("")
[]
>>> bin_to_nibbles("h")
[6, 8]
>>> bin_to_nibbles("he")
[6, 8, 6, 5]
>>> bin_to_nibbles("hello")
[6, 8, 6, 5, 6, 12, 6, 12, 6, 15]
"""
return [hti[c] for c in s.encode('hex')]
def nibbles_to_bin(nibbles):
if any(x > 15 or x < 0 for x in nibbles):
raise Exception("nibbles can only be [0,..15]")
if len(nibbles) % 2:
raise Exception("nibbles must be of even numbers")
res = ''
for i in range(0, len(nibbles), 2):
res += chr(16 * nibbles[i] + nibbles[i + 1])
return res
NIBBLE_TERMINATOR = 16
RECORDING = 1
NONE = 0
VERIFYING = -1
proving = False
class ProofConstructor():
def __init__(self):
self.mode = []
self.nodes = []
self.exempt = []
def push(self, mode, nodes=[]):
global proving
proving = True
self.mode.append(mode)
self.exempt.append(set())
if mode == VERIFYING:
self.nodes.append(set([rlp.encode(x) for x in nodes]))
else:
self.nodes.append(set())
def pop(self):
global proving
self.mode.pop()
self.nodes.pop()
self.exempt.pop()
if not self.mode:
proving = False
def get_nodelist(self):
return map(rlp.decode, list(self.nodes[-1]))
def get_nodes(self):
return self.nodes[-1]
def add_node(self, node):
node = rlp.encode(node)
if node not in self.exempt[-1]:
self.nodes[-1].add(node)
def add_exempt(self, node):
self.exempt[-1].add(rlp.encode(node))
def get_mode(self):
return self.mode[-1]
proof = ProofConstructor()
class InvalidSPVProof(Exception):
pass
def with_terminator(nibbles):
nibbles = nibbles[:]
if not nibbles or nibbles[-1] != NIBBLE_TERMINATOR:
nibbles.append(NIBBLE_TERMINATOR)
return nibbles
def without_terminator(nibbles):
nibbles = nibbles[:]
if nibbles and nibbles[-1] == NIBBLE_TERMINATOR:
del nibbles[-1]
return nibbles
def adapt_terminator(nibbles, has_terminator):
if has_terminator:
return with_terminator(nibbles)
else:
return without_terminator(nibbles)
def pack_nibbles(nibbles):
"""pack nibbles to binary
:param nibbles: a nibbles sequence. may have a terminator
"""
if nibbles[-1:] == [NIBBLE_TERMINATOR]:
flags = 2
nibbles = nibbles[:-1]
else:
flags = 0
oddlen = len(nibbles) % 2
flags |= oddlen # set lowest bit if odd number of nibbles
if oddlen:
nibbles = [flags] + nibbles
else:
nibbles = [flags, 0] + nibbles
o = ''
for i in range(0, len(nibbles), 2):
o += chr(16 * nibbles[i] + nibbles[i + 1])
return o
def unpack_to_nibbles(bindata):
"""unpack packed binary data to nibbles
:param bindata: binary packed from nibbles
:return: nibbles sequence, may have a terminator
"""
o = bin_to_nibbles(bindata)
flags = o[0]
if flags & 2:
o.append(NIBBLE_TERMINATOR)
if flags & 1 == 1:
o = o[1:]
else:
o = o[2:]
return o
def starts_with(full, part):
''' test whether the items in the part is
the leading items of the full
'''
if len(full) < len(part):
return False
return full[:len(part)] == part
(
NODE_TYPE_BLANK,
NODE_TYPE_LEAF,
NODE_TYPE_EXTENSION,
NODE_TYPE_BRANCH
) = tuple(range(4))
def is_key_value_type(node_type):
return node_type in [NODE_TYPE_LEAF,
NODE_TYPE_EXTENSION]
BLANK_NODE = ''
BLANK_ROOT = utils.sha3rlp('')
class Trie(object):
def __init__(self, db, root_hash=BLANK_ROOT):
'''it also present a dictionary like interface
:param db key value database
:root: blank or trie node in form of [key, value] or [v0,v1..v15,v]
'''
self.db = db # Pass in a database object directly
self.set_root_hash(root_hash)
# def __init__(self, dbfile, root_hash=BLANK_ROOT):
# '''it also present a dictionary like interface
# :param dbfile: key value database
# :root: blank or trie node in form of [key, value] or [v0,v1..v15,v]
# '''
# if isinstance(dbfile, str):
# dbfile = os.path.abspath(dbfile)
# self.db = DB(dbfile)
# else:
# self.db = dbfile # Pass in a database object directly
# self.set_root_hash(root_hash)
# For SPV proof production/verification purposes
def spv_grabbing(self, node):
global proving
if not proving:
pass
elif proof.get_mode() == RECORDING:
proof.add_node(copy.copy(node))
# print 'recording %s' % utils.sha3(rlp.encode(node)).encode('hex')
elif proof.get_mode() == VERIFYING:
# print 'verifying %s' % utils.sha3(rlp.encode(node)).encode('hex')
if rlp.encode(node) not in proof.get_nodes():
raise InvalidSPVProof("Proof invalid!")
def spv_storing(self, node):
global proving
if not proving:
pass
elif proof.get_mode() == RECORDING:
proof.add_exempt(copy.copy(node))
elif proof.get_mode() == VERIFYING:
proof.add_node(copy.copy(node))
@property
def root_hash(self):
'''always empty or a 32 bytes string
'''
return self.get_root_hash()
def get_root_hash(self):
if self.root_node == BLANK_NODE:
return BLANK_ROOT
assert isinstance(self.root_node, list)
val = rlp.encode(self.root_node)
key = utils.sha3(val)
self.db.put(key, val)
self.spv_grabbing(self.root_node)
return key
@root_hash.setter
def root_hash(self, value):
self.set_root_hash(value)
def set_root_hash(self, root_hash):
if root_hash == BLANK_ROOT:
self.root_node = BLANK_NODE
return
assert isinstance(root_hash, (str, unicode))
assert len(root_hash) in [0, 32]
self.root_node = self._decode_to_node(root_hash)
def clear(self):
''' clear all tree data
'''
self._delete_child_storage(self.root_node)
self._delete_node_storage(self.root_node)
self.root_node = BLANK_NODE
def _delete_child_storage(self, node):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BRANCH:
for item in node[:16]:
self._delete_child_storage(self._decode_to_node(item))
elif is_key_value_type(node_type):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_EXTENSION:
self._delete_child_storage(self._decode_to_node(node[1]))
def _encode_node(self, node):
if node == BLANK_NODE:
return BLANK_NODE
assert isinstance(node, list)
rlpnode = rlp.encode(node)
if len(rlpnode) < 32:
return node
hashkey = utils.sha3(rlpnode)
self.db.put(hashkey, rlpnode)
self.spv_storing(node)
return hashkey
def _decode_to_node(self, encoded):
if encoded == BLANK_NODE:
return BLANK_NODE
if isinstance(encoded, list):
return encoded
o = rlp.decode(self.db.get(encoded))
self.spv_grabbing(o)
return o
def _get_node_type(self, node):
''' get node type and content
:param node: node in form of list, or BLANK_NODE
:return: node type
'''
if node == BLANK_NODE:
return NODE_TYPE_BLANK
if len(node) == 2:
nibbles = unpack_to_nibbles(node[0])
has_terminator = (nibbles and nibbles[-1] == NIBBLE_TERMINATOR)
return NODE_TYPE_LEAF if has_terminator\
else NODE_TYPE_EXTENSION
if len(node) == 17:
return NODE_TYPE_BRANCH
def _get(self, node, key):
""" get value inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
:return:
BLANK_NODE if does not exist, otherwise value or hash
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return BLANK_NODE
if node_type == NODE_TYPE_BRANCH:
# already reach the expected node
if not key:
return node[-1]
sub_node = self._decode_to_node(node[key[0]])
return self._get(sub_node, key[1:])
# key value node
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
return node[1] if key == curr_key else BLANK_NODE
if node_type == NODE_TYPE_EXTENSION:
# traverse child nodes
if starts_with(key, curr_key):
sub_node = self._decode_to_node(node[1])
return self._get(sub_node, key[len(curr_key):])
else:
return BLANK_NODE
def _update(self, node, key, value):
""" update item inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
.. note:: key may be []
:param value: value string
:return: new node
if this node is changed to a new node, it's parent will take the
responsibility to *store* the new node storage, and delete the old
node storage
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return [pack_nibbles(with_terminator(key)), value]
elif node_type == NODE_TYPE_BRANCH:
if not key:
node[-1] = value
else:
new_node = self._update_and_delete_storage(
self._decode_to_node(node[key[0]]),
key[1:], value)
node[key[0]] = self._encode_node(new_node)
return node
elif is_key_value_type(node_type):
return self._update_kv_node(node, key, value)
def _update_and_delete_storage(self, node, key, value):
old_node = node[:]
new_node = self._update(node, key, value)
if old_node != new_node:
self._delete_node_storage(old_node)
return new_node
def _update_kv_node(self, node, key, value):
node_type = self._get_node_type(node)
curr_key = without_terminator(unpack_to_nibbles(node[0]))
is_inner = node_type == NODE_TYPE_EXTENSION
# find longest common prefix
prefix_length = 0
for i in range(min(len(curr_key), len(key))):
if key[i] != curr_key[i]:
break
prefix_length = i + 1
remain_key = key[prefix_length:]
remain_curr_key = curr_key[prefix_length:]
if remain_key == [] == remain_curr_key:
if not is_inner:
return [node[0], value]
new_node = self._update_and_delete_storage(
self._decode_to_node(node[1]), remain_key, value)
elif remain_curr_key == []:
if is_inner:
new_node = self._update_and_delete_storage(
self._decode_to_node(node[1]), remain_key, value)
else:
new_node = [BLANK_NODE] * 17
new_node[-1] = node[1]
new_node[remain_key[0]] = self._encode_node([
pack_nibbles(with_terminator(remain_key[1:])),
value
])
else:
new_node = [BLANK_NODE] * 17
if len(remain_curr_key) == 1 and is_inner:
new_node[remain_curr_key[0]] = node[1]
else:
new_node[remain_curr_key[0]] = self._encode_node([
pack_nibbles(
adapt_terminator(remain_curr_key[1:], not is_inner)
),
node[1]
])
if remain_key == []:
new_node[-1] = value
else:
new_node[remain_key[0]] = self._encode_node([
pack_nibbles(with_terminator(remain_key[1:])), value
])
if prefix_length:
# create node for key prefix
return [pack_nibbles(curr_key[:prefix_length]),
self._encode_node(new_node)]
else:
return new_node
def _getany(self, node, reverse=False, path=[]):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return None
if node_type == NODE_TYPE_BRANCH:
if node[16]:
return [16]
scan_range = range(16)
if reverse:
scan_range.reverse()
for i in scan_range:
o = self._getany(self._decode_to_node(node[i]), path=path + [i])
if o:
return [i] + o
return None
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
return curr_key
if node_type == NODE_TYPE_EXTENSION:
curr_key = without_terminator(unpack_to_nibbles(node[0]))
sub_node = self._decode_to_node(node[1])
return self._getany(sub_node, path=path + curr_key)
def _iter(self, node, key, reverse=False, path=[]):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return None
elif node_type == NODE_TYPE_BRANCH:
if len(key):
sub_node = self._decode_to_node(node[key[0]])
o = self._iter(sub_node, key[1:], reverse, path + [key[0]])
if o:
return [key[0]] + o
if reverse:
scan_range = range(key[0] if len(key) else 0)
else:
scan_range = range(key[0] + 1 if len(key) else 0, 16)
for i in scan_range:
sub_node = self._decode_to_node(node[i])
o = self._getany(sub_node, reverse, path + [i])
if o:
return [i] + o
if reverse and node[16]:
return [16]
return None
descend_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
if reverse:
return descend_key if descend_key < key else None
else:
return descend_key if descend_key > key else None
if node_type == NODE_TYPE_EXTENSION:
# traverse child nodes
sub_node = self._decode_to_node(node[1])
sub_key = key[len(descend_key):]
if starts_with(key, descend_key):
o = self._iter(sub_node, sub_key, reverse, path + descend_key)
elif descend_key > key[:len(descend_key)] and not reverse:
o = self._getany(sub_node, sub_key, False, path + descend_key)
elif descend_key < key[:len(descend_key)] and reverse:
o = self._getany(sub_node, sub_key, True, path + descend_key)
else:
o = None
return descend_key + o if o else None
def next(self, key):
key = bin_to_nibbles(key)
o = self._iter(self.root_node, key)
return nibbles_to_bin(o) if o else None
def prev(self, key):
key = bin_to_nibbles(key)
o = self._iter(self.root_node, key, reverse=True)
return nibbles_to_bin(o) if o else None
def _delete_node_storage(self, node):
'''delete storage
:param node: node in form of list, or BLANK_NODE
'''
if node == BLANK_NODE:
return
assert isinstance(node, list)
encoded = self._encode_node(node)
if len(encoded) < 32:
return
"""
===== FIXME ====
in the current trie implementation two nodes can share identical subtrees
thus we can not safely delete nodes for now
"""
# self.db.delete(encoded) # FIXME
def _delete(self, node, key):
""" update item inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
.. note:: key may be []
:return: new node
if this node is changed to a new node, it's parent will take the
responsibility to *store* the new node storage, and delete the old
node storage
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return BLANK_NODE
if node_type == NODE_TYPE_BRANCH:
return self._delete_branch_node(node, key)
if is_key_value_type(node_type):
return self._delete_kv_node(node, key)
def _normalize_branch_node(self, node):
'''node should have only one item changed
'''
not_blank_items_count = sum(1 for x in range(17) if node[x])
assert not_blank_items_count >= 1
if not_blank_items_count > 1:
return node
# now only one item is not blank
not_blank_index = [i for i, item in enumerate(node) if item][0]
# the value item is not blank
if not_blank_index == 16:
return [pack_nibbles(with_terminator([])), node[16]]
# normal item is not blank
sub_node = self._decode_to_node(node[not_blank_index])
sub_node_type = self._get_node_type(sub_node)
if is_key_value_type(sub_node_type):
# collape subnode to this node, not this node will have same
# terminator with the new sub node, and value does not change
new_key = [not_blank_index] + \
unpack_to_nibbles(sub_node[0])
return [pack_nibbles(new_key), sub_node[1]]
if sub_node_type == NODE_TYPE_BRANCH:
return [pack_nibbles([not_blank_index]),
self._encode_node(sub_node)]
assert False
def _delete_and_delete_storage(self, node, key):
old_node = node[:]
new_node = self._delete(node, key)
if old_node != new_node:
self._delete_node_storage(old_node)
return new_node
def _delete_branch_node(self, node, key):
# already reach the expected node
if not key:
node[-1] = BLANK_NODE
return self._normalize_branch_node(node)
encoded_new_sub_node = self._encode_node(
self._delete_and_delete_storage(
self._decode_to_node(node[key[0]]), key[1:])
)
if encoded_new_sub_node == node[key[0]]:
return node
node[key[0]] = encoded_new_sub_node
if encoded_new_sub_node == BLANK_NODE:
return self._normalize_branch_node(node)
return node
def _delete_kv_node(self, node, key):
node_type = self._get_node_type(node)
assert is_key_value_type(node_type)
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if not starts_with(key, curr_key):
# key not found
return node
if node_type == NODE_TYPE_LEAF:
return BLANK_NODE if key == curr_key else node
# for inner key value type
new_sub_node = self._delete_and_delete_storage(
self._decode_to_node(node[1]), key[len(curr_key):])
if self._encode_node(new_sub_node) == node[1]:
return node
# new sub node is BLANK_NODE
if new_sub_node == BLANK_NODE:
return BLANK_NODE
assert isinstance(new_sub_node, list)
# new sub node not blank, not value and has changed
new_sub_node_type = self._get_node_type(new_sub_node)
if is_key_value_type(new_sub_node_type):
# collape subnode to this node, not this node will have same
# terminator with the new sub node, and value does not change
new_key = curr_key + unpack_to_nibbles(new_sub_node[0])
return [pack_nibbles(new_key), new_sub_node[1]]
if new_sub_node_type == NODE_TYPE_BRANCH:
return [pack_nibbles(curr_key), self._encode_node(new_sub_node)]
# should be no more cases
assert False
def delete(self, key):
'''
:param key: a string with length of [0, 32]
'''
if not isinstance(key, (str, unicode)):
raise Exception("Key must be string")
if len(key) > 32:
raise Exception("Max key length is 32")
self.root_node = self._delete_and_delete_storage(
self.root_node,
bin_to_nibbles(str(key)))
self.get_root_hash()
def _get_size(self, node):
'''Get counts of (key, value) stored in this and the descendant nodes
:param node: node in form of list, or BLANK_NODE
'''
if node == BLANK_NODE:
return 0
node_type = self._get_node_type(node)
if is_key_value_type(node_type):
value_is_node = node_type == NODE_TYPE_EXTENSION
if value_is_node:
return self._get_size(self._decode_to_node(node[1]))
else:
return 1
elif node_type == NODE_TYPE_BRANCH:
sizes = [self._get_size(self._decode_to_node(node[x]))
for x in range(16)]
sizes = sizes + [1 if node[-1] else 0]
return sum(sizes)
def _to_dict(self, node):
'''convert (key, value) stored in this and the descendant nodes
to dict items.
:param node: node in form of list, or BLANK_NODE
.. note::
Here key is in full form, rather than key of the individual node
'''
if node == BLANK_NODE:
return {}
node_type = self._get_node_type(node)
if is_key_value_type(node_type):
nibbles = without_terminator(unpack_to_nibbles(node[0]))
key = '+'.join([str(x) for x in nibbles])
if node_type == NODE_TYPE_EXTENSION:
sub_dict = self._to_dict(self._decode_to_node(node[1]))
else:
sub_dict = {str(NIBBLE_TERMINATOR): node[1]}
# prepend key of this node to the keys of children
res = {}
for sub_key, sub_value in sub_dict.iteritems():
full_key = '{0}+{1}'.format(key, sub_key).strip('+')
res[full_key] = sub_value
return res
elif node_type == NODE_TYPE_BRANCH:
res = {}
for i in range(16):
sub_dict = self._to_dict(self._decode_to_node(node[i]))
for sub_key, sub_value in sub_dict.iteritems():
full_key = '{0}+{1}'.format(i, sub_key).strip('+')
res[full_key] = sub_value
if node[16]:
res[str(NIBBLE_TERMINATOR)] = node[-1]
return res
def to_dict(self):
d = self._to_dict(self.root_node)
res = {}
for key_str, value in d.iteritems():
if key_str:
nibbles = [int(x) for x in key_str.split('+')]
else:
nibbles = []
key = nibbles_to_bin(without_terminator(nibbles))
res[key] = value
return res
def get(self, key):
return self._get(self.root_node, bin_to_nibbles(str(key)))
def __len__(self):
return self._get_size(self.root_node)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.update(key, value)
def __delitem__(self, key):
return self.delete(key)
def __iter__(self):
return iter(self.to_dict())
def __contains__(self, key):
return self.get(key) != BLANK_NODE
def update(self, key, value):
'''
:param key: a string
:value: a string
'''
if not isinstance(key, (str, unicode)):
raise Exception("Key must be string")
# if len(key) > 32:
# raise Exception("Max key length is 32")
if not isinstance(value, (str, unicode)):
raise Exception("Value must be string")
# if value == '':
# return self.delete(key)
self.root_node = self._update_and_delete_storage(
self.root_node,
bin_to_nibbles(str(key)),
value)
self.get_root_hash()
def root_hash_valid(self):
if self.root_hash == BLANK_ROOT:
return True
return self.root_hash in self.db
def produce_spv_proof(self, key):
proof.push(RECORDING)
self.get(key)
o = proof.get_nodelist()
proof.pop()
return o
def verify_spv_proof(root, key, proof):
proof.push(VERIFYING, proof)
t = Trie(db.EphemDB())
for i, node in enumerate(proof):
R = rlp.encode(node)
H = utils.sha3(R)
t.db.put(H, R)
try:
t.root_hash = root
t.get(key)
proof.pop()
return True
except Exception, e:
print e
proof.pop()
return False
if __name__ == "__main__":
import sys
import db
_db = db.DB(sys.argv[2])
def encode_node(nd):
if isinstance(nd, str):
return nd.encode('hex')
else:
return rlp.encode(nd).encode('hex')
if len(sys.argv) >= 2:
if sys.argv[1] == 'insert':
t = Trie(_db, sys.argv[3].decode('hex'))
t.update(sys.argv[4], sys.argv[5])
print encode_node(t.root_hash)
elif sys.argv[1] == 'get':
t = Trie(_db, sys.argv[3].decode('hex'))
print t.get(sys.argv[4])
|
|
""" discover and run doctests in modules and test files."""
import bdb
import inspect
import platform
import sys
import traceback
import types
import warnings
from contextlib import contextmanager
from typing import Any
from typing import Callable
from typing import Dict
from typing import Generator
from typing import Iterable
from typing import List
from typing import Optional
from typing import Pattern
from typing import Sequence
from typing import Tuple
from typing import Union
import py.path
import pytest
from _pytest import outcomes
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import TerminalRepr
from _pytest._io import TerminalWriter
from _pytest.compat import safe_getattr
from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.fixtures import FixtureRequest
from _pytest.outcomes import OutcomeException
from _pytest.pathlib import import_path
from _pytest.python_api import approx
from _pytest.warning_types import PytestWarning
if TYPE_CHECKING:
import doctest
from typing import Type
DOCTEST_REPORT_CHOICE_NONE = "none"
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
DOCTEST_REPORT_CHOICES = (
DOCTEST_REPORT_CHOICE_NONE,
DOCTEST_REPORT_CHOICE_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF,
DOCTEST_REPORT_CHOICE_UDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
)
# Lazy definition of runner class
RUNNER_CLASS = None
# Lazy definition of output checker class
CHECKER_CLASS = None # type: Optional[Type[doctest.OutputChecker]]
def pytest_addoption(parser: Parser) -> None:
parser.addini(
"doctest_optionflags",
"option flags for doctests",
type="args",
default=["ELLIPSIS"],
)
parser.addini(
"doctest_encoding", "encoding used for doctest files", default="utf-8"
)
group = parser.getgroup("collect")
group.addoption(
"--doctest-modules",
action="store_true",
default=False,
help="run doctests in all .py modules",
dest="doctestmodules",
)
group.addoption(
"--doctest-report",
type=str.lower,
default="udiff",
help="choose another output format for diffs on doctest failure",
choices=DOCTEST_REPORT_CHOICES,
dest="doctestreport",
)
group.addoption(
"--doctest-glob",
action="append",
default=[],
metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob",
)
group.addoption(
"--doctest-ignore-import-errors",
action="store_true",
default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors",
)
group.addoption(
"--doctest-continue-on-failure",
action="store_true",
default=False,
help="for a given doctest, continue to run after the first failure",
dest="doctest_continue_on_failure",
)
def pytest_unconfigure() -> None:
global RUNNER_CLASS
RUNNER_CLASS = None
def pytest_collect_file(
path: py.path.local, parent
) -> Optional[Union["DoctestModule", "DoctestTextfile"]]:
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules and not _is_setup_py(path):
mod = DoctestModule.from_parent(parent, fspath=path) # type: DoctestModule
return mod
elif _is_doctest(config, path, parent):
txt = DoctestTextfile.from_parent(parent, fspath=path) # type: DoctestTextfile
return txt
return None
def _is_setup_py(path: py.path.local) -> bool:
if path.basename != "setup.py":
return False
contents = path.read_binary()
return b"setuptools" in contents or b"distutils" in contents
def _is_doctest(config: Config, path: py.path.local, parent) -> bool:
if path.ext in (".txt", ".rst") and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ["test*.txt"]
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(
self, reprlocation_lines: Sequence[Tuple[ReprFileLocation, Sequence[str]]]
) -> None:
self.reprlocation_lines = reprlocation_lines
def toterminal(self, tw: TerminalWriter) -> None:
for reprlocation, lines in self.reprlocation_lines:
for line in lines:
tw.line(line)
reprlocation.toterminal(tw)
class MultipleDoctestFailures(Exception):
def __init__(self, failures: "Sequence[doctest.DocTestFailure]") -> None:
super().__init__()
self.failures = failures
def _init_runner_class() -> "Type[doctest.DocTestRunner]":
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
"""
Runner to collect failures. Note that the out variable in this case is
a list instead of a stdout-like object
"""
def __init__(
self,
checker: Optional[doctest.OutputChecker] = None,
verbose: Optional[bool] = None,
optionflags: int = 0,
continue_on_failure: bool = True,
) -> None:
doctest.DebugRunner.__init__(
self, checker=checker, verbose=verbose, optionflags=optionflags
)
self.continue_on_failure = continue_on_failure
def report_failure(
self, out, test: "doctest.DocTest", example: "doctest.Example", got: str,
) -> None:
failure = doctest.DocTestFailure(test, example, got)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
def report_unexpected_exception(
self,
out,
test: "doctest.DocTest",
example: "doctest.Example",
exc_info: "Tuple[Type[BaseException], BaseException, types.TracebackType]",
) -> None:
if isinstance(exc_info[1], OutcomeException):
raise exc_info[1]
if isinstance(exc_info[1], bdb.BdbQuit):
outcomes.exit("Quitting debugger")
failure = doctest.UnexpectedException(test, example, exc_info)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
return PytestDoctestRunner
def _get_runner(
checker: Optional["doctest.OutputChecker"] = None,
verbose: Optional[bool] = None,
optionflags: int = 0,
continue_on_failure: bool = True,
) -> "doctest.DocTestRunner":
# We need this in order to do a lazy import on doctest
global RUNNER_CLASS
if RUNNER_CLASS is None:
RUNNER_CLASS = _init_runner_class()
# Type ignored because the continue_on_failure argument is only defined on
# PytestDoctestRunner, which is lazily defined so can't be used as a type.
return RUNNER_CLASS( # type: ignore
checker=checker,
verbose=verbose,
optionflags=optionflags,
continue_on_failure=continue_on_failure,
)
class DoctestItem(pytest.Item):
def __init__(
self,
name: str,
parent: "Union[DoctestTextfile, DoctestModule]",
runner: Optional["doctest.DocTestRunner"] = None,
dtest: Optional["doctest.DocTest"] = None,
) -> None:
super().__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None # type: Optional[FixtureRequest]
@classmethod
def from_parent( # type: ignore
cls,
parent: "Union[DoctestTextfile, DoctestModule]",
*,
name: str,
runner: "doctest.DocTestRunner",
dtest: "doctest.DocTest"
):
# incompatible signature due to to imposed limits on sublcass
"""
the public named constructor
"""
return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest)
def setup(self) -> None:
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfixturevalue)
for name, value in self.fixture_request.getfixturevalue(
"doctest_namespace"
).items():
globs[name] = value
self.dtest.globs.update(globs)
def runtest(self) -> None:
assert self.dtest is not None
assert self.runner is not None
_check_all_skipped(self.dtest)
self._disable_output_capturing_for_darwin()
failures = [] # type: List[doctest.DocTestFailure]
# Type ignored because we change the type of `out` from what
# doctest expects.
self.runner.run(self.dtest, out=failures) # type: ignore[arg-type]
if failures:
raise MultipleDoctestFailures(failures)
def _disable_output_capturing_for_darwin(self) -> None:
"""
Disable output capturing. Otherwise, stdout is lost to doctest (#985)
"""
if platform.system() != "Darwin":
return
capman = self.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
# TODO: Type ignored -- breaks Liskov Substitution.
def repr_failure( # type: ignore[override]
self, excinfo: ExceptionInfo[BaseException],
) -> Union[str, TerminalRepr]:
import doctest
failures = (
None
) # type: Optional[Sequence[Union[doctest.DocTestFailure, doctest.UnexpectedException]]]
if isinstance(
excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException)
):
failures = [excinfo.value]
elif isinstance(excinfo.value, MultipleDoctestFailures):
failures = excinfo.value.failures
if failures is not None:
reprlocation_lines = []
for failure in failures:
example = failure.example
test = failure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = type(failure).__name__
# TODO: ReprFileLocation doesn't expect a None lineno.
reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type]
checker = _get_checker()
report_choice = _get_report_choice(
self.config.getoption("doctestreport")
)
if lineno is not None:
assert failure.test.docstring is not None
lines = failure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
assert test.lineno is not None
lines = [
"%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)
]
# trim docstring error lines to 10
lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
else:
lines = [
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
]
indent = ">>>"
for line in example.source.splitlines():
lines.append("??? {} {}".format(indent, line))
indent = "..."
if isinstance(failure, doctest.DocTestFailure):
lines += checker.output_difference(
example, failure.got, report_choice
).split("\n")
else:
inner_excinfo = ExceptionInfo(failure.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
lines += [
x.strip("\n")
for x in traceback.format_exception(*failure.exc_info)
]
reprlocation_lines.append((reprlocation, lines))
return ReprFailDoctest(reprlocation_lines)
else:
return super().repr_failure(excinfo)
def reportinfo(self):
assert self.dtest is not None
return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name
def _get_flag_lookup() -> Dict[str, int]:
import doctest
return dict(
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
NUMBER=_get_number_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
def _get_continue_on_failure(config):
continue_on_failure = config.getvalue("doctest_continue_on_failure")
if continue_on_failure:
# We need to turn off this if we use pdb since we should stop at
# the first failure
if config.getvalue("usepdb"):
continue_on_failure = False
return continue_on_failure
class DoctestTextfile(pytest.Module):
obj = None
def collect(self) -> Iterable[DoctestItem]:
import doctest
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
filename = str(self.fspath)
name = self.fspath.basename
globs = {"__name__": "__main__"}
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
if test.examples:
yield DoctestItem.from_parent(
self, name=test.name, runner=runner, dtest=test
)
def _check_all_skipped(test: "doctest.DocTest") -> None:
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip("all tests skipped by +SKIP option")
def _is_mocked(obj: object) -> bool:
"""
returns if a object is possibly a mock object by checking the existence of a highly improbable attribute
"""
return (
safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
is not None
)
@contextmanager
def _patch_unwrap_mock_aware() -> Generator[None, None, None]:
"""
contextmanager which replaces ``inspect.unwrap`` with a version
that's aware of mock objects and doesn't recurse on them
"""
real_unwrap = inspect.unwrap
def _mock_aware_unwrap(
func: Callable[..., Any], *, stop: Optional[Callable[[Any], Any]] = None
) -> Any:
try:
if stop is None or stop is _is_mocked:
return real_unwrap(func, stop=_is_mocked)
_stop = stop
return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func))
except Exception as e:
warnings.warn(
"Got %r when unwrapping %r. This is usually caused "
"by a violation of Python's object protocol; see e.g. "
"https://github.com/pytest-dev/pytest/issues/5080" % (e, func),
PytestWarning,
)
raise
inspect.unwrap = _mock_aware_unwrap
try:
yield
finally:
inspect.unwrap = real_unwrap
class DoctestModule(pytest.Module):
def collect(self) -> Iterable[DoctestItem]:
import doctest
class MockAwareDocTestFinder(doctest.DocTestFinder):
"""
a hackish doctest finder that overrides stdlib internals to fix a stdlib bug
https://github.com/pytest-dev/pytest/issues/3456
https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
"""
Doctest code does not take into account `@property`, this is a hackish way to fix it.
https://bugs.python.org/issue17446
"""
if isinstance(obj, property):
obj = getattr(obj, "fget", obj)
# Type ignored because this is a private function.
return doctest.DocTestFinder._find_lineno( # type: ignore
self, obj, source_lines,
)
def _find(
self, tests, obj, name, module, source_lines, globs, seen
) -> None:
if _is_mocked(obj):
return
with _patch_unwrap_mock_aware():
# Type ignored because this is a private function.
doctest.DocTestFinder._find( # type: ignore
self, tests, obj, name, module, source_lines, globs, seen
)
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(
self.fspath, self.config.getoption("importmode")
)
else:
try:
module = import_path(self.fspath)
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
pytest.skip("unable to import module %r" % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = MockAwareDocTestFinder()
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=False,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem.from_parent(
self, name=test.name, runner=runner, dtest=test
)
def _setup_fixtures(doctest_item: DoctestItem) -> FixtureRequest:
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func() -> None:
pass
doctest_item.funcargs = {} # type: ignore[attr-defined]
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo( # type: ignore[attr-defined]
node=doctest_item, func=func, cls=None, funcargs=False
)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _init_checker_class() -> "Type[doctest.OutputChecker]":
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Based on doctest_nose_plugin.py from the nltk project
(https://github.com/nltk/nltk) and on the "numtest" doctest extension
by Sebastien Boisgerault (https://github.com/boisgera/numtest).
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
_number_re = re.compile(
r"""
(?P<number>
(?P<mantissa>
(?P<integer1> [+-]?\d*)\.(?P<fraction>\d+)
|
(?P<integer2> [+-]?\d+)\.
)
(?:
[Ee]
(?P<exponent1> [+-]?\d+)
)?
|
(?P<integer3> [+-]?\d+)
(?:
[Ee]
(?P<exponent2> [+-]?\d+)
)
)
""",
re.VERBOSE,
)
def check_output(self, want: str, got: str, optionflags: int) -> bool:
if doctest.OutputChecker.check_output(self, want, got, optionflags):
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
allow_number = optionflags & _get_number_flag()
if not allow_unicode and not allow_bytes and not allow_number:
return False
def remove_prefixes(regex: Pattern[str], txt: str) -> str:
return re.sub(regex, r"\1\2", txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
if allow_number:
got = self._remove_unwanted_precision(want, got)
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def _remove_unwanted_precision(self, want: str, got: str) -> str:
wants = list(self._number_re.finditer(want))
gots = list(self._number_re.finditer(got))
if len(wants) != len(gots):
return got
offset = 0
for w, g in zip(wants, gots):
fraction = w.group("fraction")
exponent = w.group("exponent1")
if exponent is None:
exponent = w.group("exponent2")
if fraction is None:
precision = 0
else:
precision = len(fraction)
if exponent is not None:
precision -= int(exponent)
if float(w.group()) == approx(float(g.group()), abs=10 ** -precision):
# They're close enough. Replace the text we actually
# got with the text we want, so that it will match when we
# check the string literally.
got = (
got[: g.start() + offset] + w.group() + got[g.end() + offset :]
)
offset += w.end() - w.start() - (g.end() - g.start())
return got
return LiteralsOutputChecker
def _get_checker() -> "doctest.OutputChecker":
"""
Returns a doctest.OutputChecker subclass that supports some
additional options:
* ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b''
prefixes (respectively) in string literals. Useful when the same
doctest should run in Python 2 and Python 3.
* NUMBER to ignore floating-point differences smaller than the
precision of the literal number in the doctest.
An inner class is used to avoid importing "doctest" at the module
level.
"""
global CHECKER_CLASS
if CHECKER_CLASS is None:
CHECKER_CLASS = _init_checker_class()
return CHECKER_CLASS()
def _get_allow_unicode_flag() -> int:
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_UNICODE")
def _get_allow_bytes_flag() -> int:
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_BYTES")
def _get_number_flag() -> int:
"""
Registers and returns the NUMBER flag.
"""
import doctest
return doctest.register_optionflag("NUMBER")
def _get_report_choice(key: str) -> int:
"""
This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
"""
import doctest
return {
DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
DOCTEST_REPORT_CHOICE_NONE: 0,
}[key]
@pytest.fixture(scope="session")
def doctest_namespace() -> Dict[str, Any]:
"""
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
"""
return dict()
|
|
"""
:codeauthor: Pedro Algarvio ([email protected])
tests.unit.config.schemas.test_ssh
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import salt.utils.stringutils
from salt.config.schemas import ssh as ssh_schemas
from salt.config.schemas.minion import MinionConfiguration
from salt.utils.versions import LooseVersion as _LooseVersion
from tests.support.unit import TestCase, skipIf
try:
import jsonschema
import jsonschema.exceptions
HAS_JSONSCHEMA = True
JSONSCHEMA_VERSION = _LooseVersion(jsonschema.__version__)
except ImportError:
HAS_JSONSCHEMA = False
JSONSCHEMA_VERSION = _LooseVersion("0")
class RosterEntryConfigTest(TestCase):
def test_config(self):
config = ssh_schemas.RosterEntryConfig()
expected = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Roster Entry",
"description": "Salt SSH roster entry definition",
"type": "object",
"properties": {
"host": {
"title": "Host",
"description": "The IP address or DNS name of the remote host",
"type": "string",
"pattern": r"^((\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([A-Za-z0-9][A-Za-z0-9\.\-]{1,255}))$",
"minLength": 1,
},
"port": {
"description": "The target system's ssh port number",
"title": "Port",
"default": 22,
"maximum": 65535,
"minimum": 0,
"type": "integer",
},
"user": {
"default": "root",
"type": "string",
"description": "The user to log in as. Defaults to root",
"title": "User",
"minLength": 1,
},
"passwd": {
"title": "Password",
"type": "string",
"description": "The password to log in with",
"format": "secret",
"minLength": 1,
},
"priv": {
"type": "string",
"description": (
"File path to ssh private key, defaults to salt-ssh.rsa"
),
"title": "Private Key",
"minLength": 1,
},
"priv_passwd": {
"type": "string",
"description": "Passphrase for private key file",
"title": "Private Key passphrase",
"format": "secret",
"minLength": 1,
},
"sudo": {
"default": False,
"type": "boolean",
"description": "run command via sudo. Defaults to False",
"title": "Sudo",
},
"timeout": {
"type": "integer",
"description": (
"Number of seconds to wait for response when establishing an"
" SSH connection"
),
"title": "Timeout",
},
"thin_dir": {
"type": "string",
"description": (
"The target system's storage directory for Salt components."
" Defaults to /tmp/salt-<hash>."
),
"title": "Thin Directory",
},
# The actuall representation of the minion options would make this HUGE!
"minion_opts": ssh_schemas.DictItem(
title="Minion Options",
description="Dictionary of minion options",
properties=MinionConfiguration(),
).serialize(),
},
"anyOf": [{"required": ["passwd"]}, {"required": ["priv"]}],
"required": ["host", "user"],
"x-ordering": [
"host",
"port",
"user",
"passwd",
"priv",
"priv_passwd",
"sudo",
"timeout",
"thin_dir",
"minion_opts",
],
"additionalProperties": False,
}
try:
self.assertDictContainsSubset(
expected["properties"], config.serialize()["properties"]
)
self.assertDictContainsSubset(expected, config.serialize())
except AssertionError:
import salt.utils.json
print(salt.utils.json.dumps(config.serialize(), indent=4))
raise
@skipIf(HAS_JSONSCHEMA is False, "The 'jsonschema' library is missing")
def test_config_validate(self):
try:
jsonschema.validate(
{"host": "localhost", "user": "root", "passwd": "foo"},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker(),
)
except jsonschema.exceptions.ValidationError as exc:
self.fail("ValidationError raised: {}".format(exc))
try:
jsonschema.validate(
{"host": "127.0.0.1", "user": "root", "passwd": "foo"},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker(),
)
except jsonschema.exceptions.ValidationError as exc:
self.fail("ValidationError raised: {}".format(exc))
try:
jsonschema.validate(
{"host": "127.1.0.1", "user": "root", "priv": "foo", "passwd": "foo"},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker(),
)
except jsonschema.exceptions.ValidationError as exc:
self.fail("ValidationError raised: {}".format(exc))
try:
jsonschema.validate(
{"host": "127.1.0.1", "user": "root", "passwd": "foo", "sudo": False},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker(),
)
except jsonschema.exceptions.ValidationError as exc:
self.fail("ValidationError raised: {}".format(exc))
try:
jsonschema.validate(
{
"host": "127.1.0.1",
"user": "root",
"priv": "foo",
"passwd": "foo",
"thin_dir": "/foo/bar",
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker(),
)
except jsonschema.exceptions.ValidationError as exc:
self.fail("ValidationError raised: {}".format(exc))
try:
jsonschema.validate(
{
"host": "127.1.0.1",
"user": "root",
"passwd": "foo",
"minion_opts": {"interface": "0.0.0.0"},
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker(),
)
except jsonschema.exceptions.ValidationError as exc:
self.fail("ValidationError raised: {}".format(exc))
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
jsonschema.validate(
{"host": "127.1.0.1", "user": "", "passwd": "foo"},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker(),
)
self.assertIn("is too short", excinfo.exception.message)
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
jsonschema.validate(
{
"host": "127.1.0.1",
"user": "root",
"passwd": "foo",
"minion_opts": {"interface": 0},
},
ssh_schemas.RosterEntryConfig.serialize(),
format_checker=jsonschema.FormatChecker(),
)
self.assertIn("is not of type", excinfo.exception.message)
class RosterItemTest(TestCase):
def test_roster_config(self):
try:
self.assertDictContainsSubset(
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Roster Configuration",
"description": "Roster entries definition",
"type": "object",
"patternProperties": {
r"^([^:]+)$": ssh_schemas.RosterEntryConfig.serialize()
},
"additionalProperties": False,
},
ssh_schemas.RosterItem.serialize(),
)
except AssertionError:
import salt.utils.json
print(salt.utils.json.dumps(ssh_schemas.RosterItem.serialize(), indent=4))
raise
@skipIf(HAS_JSONSCHEMA is False, "The 'jsonschema' library is missing")
def test_roster_config_validate(self):
try:
jsonschema.validate(
{"target-1": {"host": "localhost", "user": "root", "passwd": "foo"}},
ssh_schemas.RosterItem.serialize(),
format_checker=jsonschema.FormatChecker(),
)
except jsonschema.exceptions.ValidationError as exc:
self.fail("ValidationError raised: {}".format(exc))
with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo:
jsonschema.validate(
{
salt.utils.stringutils.to_str("target-1:1"): {
"host": "localhost",
"user": "root",
"passwd": "foo",
}
},
ssh_schemas.RosterItem.serialize(),
format_checker=jsonschema.FormatChecker(),
)
if JSONSCHEMA_VERSION < _LooseVersion("2.6.0"):
self.assertIn(
"Additional properties are not allowed ('target-1:1' was unexpected)",
excinfo.exception.message,
)
else:
self.assertIn(
"'target-1:1' does not match any of the regexes",
excinfo.exception.message,
)
|
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattercarpet"
_path_str = "scattercarpet.textfont"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
|
from builtins import zip
import pytest
from threeML import *
from threeML.plugins.OGIPLike import OGIPLike
from threeML.utils.fitted_objects.fitted_point_sources import InvalidUnitError
from threeML.io.calculate_flux import _calculate_point_source_flux
import astropy.units as u
import matplotlib.pyplot as plt
from threeML.io.package_data import get_path_of_data_dir
# Init some globals
datadir = os.path.abspath(
os.path.join(get_path_of_data_dir(), "datasets", "bn090217206")
)
good_d_flux_units = ["1/(cm2 s keV)", "erg/(cm2 s keV)", "erg2/(cm2 s keV)"]
good_i_flux_units = ["1/(cm2 s )", "erg/(cm2 s )", "erg2/(cm2 s )"]
good_energy_units = ["keV", "Hz", "nm"]
bad_flux_units = ["g"]
def make_simple_model():
triggerName = "bn090217206"
ra = 204.9
dec = -8.4
powerlaw = Powerlaw()
GRB = PointSource(triggerName, ra, dec, spectral_shape=powerlaw)
model = Model(GRB)
powerlaw.index.prior = Uniform_prior(lower_bound=-5.0, upper_bound=5.0)
powerlaw.K.prior = Log_uniform_prior(lower_bound=1.0, upper_bound=10)
return model
def make_components_model():
triggerName = "bn090217206"
ra = 204.9
dec = -8.4
powerlaw = Powerlaw() + Blackbody()
GRB = PointSource(triggerName, ra, dec, spectral_shape=powerlaw)
model = Model(GRB)
powerlaw.index_1.prior = Uniform_prior(lower_bound=-5.0, upper_bound=5.0)
powerlaw.K_1.prior = Log_uniform_prior(lower_bound=1.0, upper_bound=10)
powerlaw.K_2.prior = Uniform_prior(lower_bound=-5.0, upper_bound=5.0)
powerlaw.kT_2.prior = Log_uniform_prior(lower_bound=1.0, upper_bound=10)
return model
def make_dless_components_model():
triggerName = "bn090217206"
ra = 204.9
dec = -8.4
powerlaw = Powerlaw() * Constant()
GRB = PointSource(triggerName, ra, dec, spectral_shape=powerlaw)
model = Model(GRB)
powerlaw.index_1.prior = Uniform_prior(lower_bound=-5.0, upper_bound=5.0)
powerlaw.K_1.prior = Log_uniform_prior(lower_bound=1.0, upper_bound=10)
powerlaw.k_2 = 1.0
powerlaw.k_2.fix = True
return model
@pytest.fixture
def analysis_to_test(data_list_bn090217206_nai6):
simple_model = make_simple_model()
complex_model = make_components_model()
# prepare mle
dless_model = make_dless_components_model()
jl_simple = JointLikelihood(simple_model, data_list_bn090217206_nai6)
jl_simple.fit()
jl_complex = JointLikelihood(complex_model, data_list_bn090217206_nai6)
jl_complex.fit()
jl_dless = JointLikelihood(dless_model, data_list_bn090217206_nai6)
jl_dless.fit()
bayes_simple = BayesianAnalysis(simple_model, data_list_bn090217206_nai6)
bayes_simple.set_sampler("emcee")
bayes_simple.sampler.setup(n_iterations=10, n_burn_in=10, n_walkers=20)
bayes_simple.sample()
bayes_complex = BayesianAnalysis(complex_model, data_list_bn090217206_nai6)
bayes_complex.set_sampler("emcee")
bayes_complex.sampler.setup(n_iterations=10, n_burn_in=10, n_walkers=20)
bayes_complex.sample()
bayes_dless = BayesianAnalysis(dless_model, data_list_bn090217206_nai6)
bayes_dless.set_sampler("emcee")
bayes_dless.sampler.setup(n_iterations=10, n_burn_in=10, n_walkers=20)
bayes_dless.sample()
analysis_to_test = [
jl_simple.results,
jl_complex.results,
jl_dless.results,
bayes_simple.results,
bayes_complex.results,
bayes_dless.results,
]
return analysis_to_test
def test_fitted_point_source_plotting(analysis_to_test):
plot_keywords = {
"use_components": True,
"components_to_use": ["Powerlaw", "total"],
"sources_to_use": ["bn090217206"],
"flux_unit": "erg/(cm2 s)",
"energy_unit": "keV",
"plot_style_kwargs": {},
"contour_style_kwargs": {},
"legend_kwargs": {},
"ene_min": 10,
"ene_max": 100,
"num_ene": 5,
"show_legend": False,
"fit_cmap": "jet",
"countor_cmap": "jet",
"sum_sources": True,
}
for u1, u2 in zip(good_d_flux_units, good_i_flux_units):
for e_unit in good_energy_units:
for x in analysis_to_test:
_ = plot_point_source_spectra(
x, flux_unit=u1, energy_unit=e_unit, num_ene=5
)
_ = plot_point_source_spectra(x, **plot_keywords)
with pytest.raises(InvalidUnitError):
_ = plot_point_source_spectra(x, flux_unit=bad_flux_units[0])
plt.close("all")
def test_fitted_point_source_flux_calculations(analysis_to_test):
flux_keywords = {
"use_components": True,
"components_to_use": ["total", "Powerlaw"],
"sources_to_use": ["bn090217206"],
"flux_unit": "erg/(cm2 s)",
"energy_unit": "keV",
"sum_sources": True,
}
_calculate_point_source_flux(
1, 10, analysis_to_test[0], flux_unit=good_i_flux_units[0], energy_unit="keV"
)
_calculate_point_source_flux(1, 10, analysis_to_test[-2], **flux_keywords)
def test_units_on_energy_range(analysis_to_test):
_ = plot_point_source_spectra(
analysis_to_test[0], ene_min=1.0 * u.keV, ene_max=1 * u.MeV
)
with pytest.raises(AssertionError):
plot_point_source_spectra(analysis_to_test[0], ene_min=1.0, ene_max=1 * u.MeV)
with pytest.raises(AssertionError):
plot_point_source_spectra(analysis_to_test[0], ene_min=1.0 * u.keV, ene_max=1.0)
|
|
import json
import re
from random import choice
from discord import ChannelType, Embed
from discord.utils import find
from musicbot.bookmarks import bookmark
from musicbot.entry import Entry
from musicbot.logger import OnlineLogger
from musicbot.settings import Settings
from musicbot.utils import (Response, block_user, command_info, owner_only,
parse_timestamp, to_timestamp)
from musicbot.web_author import WebAuthor
from openpyxl import Workbook
class ToolCommands:
@command_info("2.0.3", 1485516420, {
"3.7.5": (1481827320, "The command finally works like it should"),
"3.9.9": (1499977057, "moving Giesela too"),
"4.1.8": (1500882643, "Updating to new player model")
})
async def cmd_moveus(self, server, channel, author, user_mentions, leftover_args):
"""
///|Usage
`{command_prefix}moveus <channel name>`
///|Explanation
Move everyone in your current channel to another one!
"""
target_channel = None
target = " ".join(leftover_args)
if user_mentions:
target_channel = user_mentions[0].voice.voice_channel
if not target_channel and target:
target_channel = find(
lambda vc: vc.type == ChannelType.voice and target.lower().strip() in vc.name.lower().strip(),
server.channels
)
if target_channel is None:
return Response("Can't resolve the target channel!")
author_channel = author.voice.voice_channel
voice_members = author_channel.voice_members
move_myself = False
if server.me in voice_members:
voice_members.remove(server.me)
move_myself = True
for voice_member in voice_members:
await self.move_member(voice_member, target_channel)
if move_myself:
await self.get_player(server, target_channel)
@command_info("1.0.0", 1477180800, {
"2.0.2": (1481827560, "Can now use @mentions to \"goto\" a user"),
"4.1.8": (1500881315, "Merging with old goto command")
})
async def cmd_summon(self, server, author, user_mentions, leftover_args):
"""
Usage:
{command_prefix}summon [@mention | name]
Call the bot to the summoner's voice channel.
"""
target_channel = None
target = " ".join(leftover_args)
if user_mentions:
target_channel = user_mentions[0].voice.voice_channel
if not target_channel and target:
target_channel = find(lambda vc: vc.type == ChannelType.voice and target.lower().strip() in vc.name.lower().strip(), server.channels)
if not target_channel:
target_channel = author.voice_channel
if not target_channel:
return Response("Couldn't find voic channel")
player = await self.get_player(server, target_channel)
if player.is_stopped:
player.play()
@owner_only
async def cmd_countmsgs(self, server, author, channel_id, number):
"""
///|Usage
`{command_prefix}countmsgs <channel> <number>`
///|Explanation
Count up to <number> messages in <channel> and return stats by user.
"""
alphabet = list("abcdefghijklmnopqrstuvwxyz")
def index_to_alphabet(ind):
if ind < len(alphabet):
return alphabet[ind].upper()
remainder = ind % len(alphabet)
return index_to_alphabet(ind -
remainder) + alphabet[remainder].upper()
msgs_by_member = {}
msgs_by_date = OrderedDict()
answers_by_date = OrderedDict()
channel = server.get_channel(channel_id)
last_msg = None
last_answer = None
spam = 0
async for msg in self.logs_from(channel, limit=int(number)):
increment = 1
if last_msg is not None and msg.author.id == last_msg.author.id and abs(
(last_msg.timestamp - msg.timestamp).total_seconds()) < 10:
spam += 1
last_msg = msg
increment = 0
if last_answer is None or last_answer.author != msg.author:
dt = answers_by_date.get(
"{0.day:0>2}/{0.month:0>2}/{0.year:0>4}".format(
msg.timestamp), {})
dt[msg.author.id] = dt.get(msg.author.id, 0) + increment
answers_by_date["{0.day:0>2}/{0.month:0>2}/{0.year:0>4}".
format(msg.timestamp)] = dt
last_answer = msg
existing_msgs = msgs_by_member.get(msg.author.id, [0, 0])
existing_msgs[0] += increment
existing_msgs[1] += len(re.sub(r"\W", r"", msg.content))
msgs_by_member[msg.author.id] = existing_msgs
dt = msgs_by_date.get(
"{0.day:0>2}/{0.month:0>2}/{0.year:0>4}".format(msg.timestamp),
{})
dt[msg.author.id] = dt.get(msg.author.id, 0) + increment
msgs_by_date["{0.day:0>2}/{0.month:0>2}/{0.year:0>4}".format(
msg.timestamp)] = dt
last_msg = msg
wb = Workbook()
ws = wb.active
ws.title = "Messages"
ws2 = wb.create_sheet("Answers")
ws["A2"] = "TOTAL"
sorted_user_index = {}
i = 1
for member in sorted(msgs_by_member):
data = msgs_by_member[member]
ws["{}{}".format("A", i)] = server.get_member(
member
).name if server.get_member(member) is not None else "Unknown"
ws["{}{}".format("B", i)] = data[0]
ws["{}{}".format("C", i)] = data[1]
sorted_user_index[member] = index_to_alphabet(i)
i += 1
i += 1
for date in reversed(msgs_by_date.keys()):
ws["A" + str(i)] = date
for mem in msgs_by_date[date]:
ws["{}{}".format(sorted_user_index.get(mem),
i)] = msgs_by_date[date][mem]
i += 1
i = 1
for date in reversed(answers_by_date.keys()):
ws2["A" + str(i)] = date
for mem in answers_by_date[date]:
ws2["{}{}".format(sorted_user_index.get(mem),
i)] = answers_by_date[date][mem]
i += 1
wb.save("cache/last_data.xlsx")
await self.send_file(
author,
open("cache/last_data.xlsx", "rb"),
filename="%s-msgs.xlsx" % (server.name.replace(" ", "_")))
async def cmd_archivechat(self, server, author, message, placeholder=None, number=1000000):
if message.channel_mentions is None or len(
message.channel_mentions) < 1:
return Response("Stupid duck")
channel = message.channel_mentions[0]
msgs = []
async for msg in self.logs_from(channel, limit=int(number)):
msg_data = {
"name": msg.author.name,
"timestamp": str(round(msg.timestamp.timestamp())),
"content": msg.content,
"attachments": msg.attachments
}
msgs.append(msg_data)
json.dump(msgs[::-1], open("cache/last_message_archive.json", "w+"))
await self.send_file(
author,
open("cache/last_message_archive.json", "rb"),
filename="%s-msg-archive.json" % (server.name.replace(" ", "_")))
@owner_only
async def cmd_surveyserver(self, server):
if self.online_loggers.get(server.id, None) is not None:
return Response("I'm already looking at this server")
else:
online_logger = OnlineLogger(self)
self.online_loggers[server.id] = online_logger
Settings["online_loggers"] = list(self.online_loggers.keys())
return Response("okay, okay!")
def load_online_loggers(self):
for server_id in Settings.get_setting("online_loggers", default=[]):
online_logger = OnlineLogger(self)
self.online_loggers[server_id] = online_logger
for listener in Settings.get_setting(
"online_logger_listeners_" + server_id, default=[]):
online_logger.add_listener(listener)
@owner_only
async def cmd_evalsurvey(self, server, author):
online_logger = self.online_loggers.get(server.id, None)
if online_logger is None:
return Response("I'm not even spying here")
online_logger.create_output()
await self.send_file(
author,
open("cache/last_survey_data.xlsx", "rb"),
filename="%s-survey.xlsx" % (server.name.replace(" ", "_")))
return Response("There you go, fam")
@owner_only
async def cmd_resetsurvey(self, server):
online_logger = self.online_loggers.get(server.id, None)
if online_logger is None:
return Response("I'm not even spying here")
online_logger.reset()
return Response("Well then")
async def cmd_notifyme(self, server, author):
"""
Usage:
{command_prefix}notifyme
Get notified when someone starts playing
"""
online_logger = self.online_loggers.get(server.id, None)
if online_logger is None:
return Response("I'm not even spying here")
if online_logger.add_listener(author.id):
Settings["online_logger_listeners_" + server.id] = [
*Settings.get_setting(
"online_logger_listeners_" + server.id, default=[]),
author.id
]
return Response("Got'cha!")
else:
try:
Settings["online_logger_listeners_" + server.id] = [
x
for x in Settings.get_setting(
"online_logger_listeners_" + server.id, default=[])
if x != author.id
]
except ValueError:
pass
return Response("Nevermore you shall be annoyed!")
@command_info("2.2.1", 1493757540, {
"3.7.8": (1499019245, "Fixed quoting by content.")
})
async def cmd_quote(self, author, channel, message, leftover_args):
"""
///|Usage
`{command_prefix}quote [#channel] <message id> [message id...]`
`{command_prefix}quote [#channel] [@mention] \"<message content>\"`
///|Explanation
Quote a message
"""
quote_to_channel = channel
target_author = None
if message.channel_mentions:
channel = message.channel_mentions[0]
leftover_args = leftover_args[1:]
if message.mentions:
target_author = message.mentions[0]
leftover_args = leftover_args[1:]
if len(leftover_args) < 1:
return Response("Please specify the message you want to quote")
message_content = " ".join(leftover_args)
if (message_content[0] == "\"" and message_content[-1] == "\"") or re.search(r"\D", message_content) is not None:
message_content = message_content.replace("\"", "")
async for msg in self.logs_from(channel, limit=3000):
if msg.id != message.id and message_content.lower().strip() in msg.content.lower().strip():
if target_author is None or target_author.id == msg.author.id:
leftover_args = [msg.id, ]
break
else:
if target_author is not None:
return Response("Didn't find a message with that content from {}".format(target_author.mention))
else:
return Response("Didn't find a message with that content")
await self.safe_delete_message(message)
for message_id in leftover_args:
try:
quote_message = await self.get_message(channel, message_id)
except:
return Response("Didn't find a message with the id `{}`".
format(message_id))
author_data = {
"name": quote_message.author.display_name,
"icon_url": quote_message.author.avatar_url
}
embed_data = {
"description": quote_message.content,
"timestamp": quote_message.timestamp,
"colour": quote_message.author.colour
}
em = Embed(**embed_data)
em.set_author(**author_data)
await self.send_message(quote_to_channel, embed=em)
return
@command_info("3.2.5", 1496428380, {
"3.3.9": (1497521393, "Added edit sub-command"),
"3.4.1": (1497550771, "Added the filter \"mine\" to the listing function"),
"3.4.6": (1497617827, "when listing bookmarks, they musn't be \"inline\"."),
"3.5.8": (1497827057, "Editing bookmarks now works as expected"),
"4.6.1": (1502582759, "Updated to new entry model"),
"4.7.8": (1504105817, "Using the new copy method for entries to make sure that they're \"clean\"")
})
async def cmd_bookmark(self, author, player, leftover_args):
"""
///|Creation
{command_prefix}bookmark [name] [timestamp]
///|Explanation
Create a new bookmark for the current entry. If no name is provided the entry's title will be used and if there's no timestamp provided the current timestamp will be used.
///|Using
{command_prefix}bookmark <id | name>
///|Editing
{command_prefix}bookmark edit <id> [new name] [new timestamp]
///|Listing
{command_prefix}bookmark list [mine]
///|Removal
{command_prefix}bookmark remove <id | name>
"""
if len(leftover_args) > 0:
arg = leftover_args[0].lower()
if arg in ["list", "showall"]:
em = Embed(title="Bookmarks")
bookmarks = bookmark.all_bookmarks
if "mine" in leftover_args:
bookmarks = filter(lambda x: bookmark.get_bookmark(x)["author"]["id"] == author.id, bookmarks)
for bm in bookmarks:
bm_name = bm["name"]
bm_author = WebAuthor.from_dict(bm["author"]).display_name
bm_timestamp = to_timestamp(bm["timestamp"])
bm_id = bm["id"]
t = "**{}**".format(bm_name)
v = "`{}` starting at `{}` *by* **{}**".format(bm_id, bm_timestamp, bm_author)
em.add_field(name=t, value=v, inline=False)
return Response(embed=em)
elif arg in ["remove", "delete"]:
if len(leftover_args) < 2:
return Response("Please provide an id or a name")
bm = bookmark.get_bookmark(" ".join(leftover_args[1:]))
if not bm:
return Response("Didn't find a bookmark with that query")
if bookmark.remove_bookmark(bm["id"]):
return Response("Removed bookmark `{}`".format(bm["name"]))
else:
return Response("Something went wrong")
elif arg in ["edit", "change"]:
if len(leftover_args) < 2:
return Response("Please provide an id")
bm_id = leftover_args[1]
if bm_id not in bookmark:
return Response("No bookmark with id `{}` found".format(bm_id))
if len(leftover_args) < 3:
return Response("Please also specify what you want to change")
new_timestamp = parse_timestamp(leftover_args[-1])
if new_timestamp is not None: # 0 evaluates to false so I need to check this oldschool-like
new_name = " ".join(leftover_args[2:-1]) if len(leftover_args) > 3 else None
else:
new_name = " ".join(leftover_args[2:])
if bookmark.edit_bookmark(bm_id, new_name, new_timestamp):
return Response("Successfully edited bookmark `{}`".format(bm_id))
else:
return Response("Something went wrong while editing `{}`".format(bm_id))
else:
bm = bookmark.get_bookmark(" ".join(leftover_args))
if bm:
entry = Entry.from_dict(player.queue, bm["entry"])
entry.seek(bm["timestamp"])
player.queue._add_entry(entry)
author = WebAuthor.from_dict(bm["author"])
return Response("Loaded bookmark `{0}` by **{1}**".format(bm["name"], author.display_name))
elif player.current_entry:
bm_timestamp = player.progress
bm_name = None
if len(leftover_args) > 1:
timestamp = parse_timestamp(leftover_args[-1])
if timestamp:
bm_timestamp = timestamp
bm_name = " ".join(
leftover_args[:-1]) if timestamp else " ".join(leftover_args)
else:
timestamp = parse_timestamp(leftover_args[-1])
if timestamp:
bm_timestamp = timestamp
else:
bm_name = " ".join(leftover_args)
id = bookmark.add_bookmark(player.current_entry, bm_timestamp, author.id, bm_name)
return Response("Created a new bookmark with the id `{0}` (\"{2}\", `{3}`)\nUse `{1}bookmark {0}` to load it ".format(id, self.config.command_prefix, bm_name, to_timestamp(bm_timestamp)))
else:
return Response("There's no such bookmark and there's nothing playing either")
else:
if player.current_entry:
id = bookmark.add_bookmark(player.current_entry.copy(),
player.progress, author.id)
return Response(
"Created a new bookmark with the id `{0}`\nUse `{1}bookmark {0}` to load it ".
format(id, self.config.command_prefix))
else:
return await self.cmd_bookmark(author, player, [
"list",
])
@block_user
@command_info("2.0.3", 1486054560, {
"3.7.2": (1498252803, "no arguments provided crash Fixed")
})
async def cmd_random(self, channel, author, leftover_args):
"""
///|Basic
`{command_prefix}random <item1>, <item2>, [item3], [item4]`
///|Use an existing set
`{command_prefix}random <setname>`
///|List all the existing sets
`{command_prefix}random list`
///|Creation
`{command_prefix}random create <name>, <option1>, <option2>, [option3], [option4]`
///|Editing
`{command_prefix}random edit <name>, [add | remove | replace], <item> [, item2, item3]`
///|Removal
`{command_prefix}random remove <name>`
///|Explanation
Choose a random item out of a list or use a pre-defined list.
"""
if not leftover_args:
return Response("Why u gotta be stupid?")
items = [x.strip()
for x in " ".join(leftover_args).split(",") if x is not ""]
if items[0].split()[0].lower().strip() == "create":
if len(items) < 2:
return Response(
"Can't create a set with the given arguments",
delete_after=20)
set_name = "_".join(items[0].split()[1:]).lower().strip()
set_items = items[1:]
if self.random_sets.create_set(set_name, set_items):
return Response(
"Created set **{0}**\nUse `{1}random {0}` to use it!".format(
set_name, self.config.command_prefix),
delete_after=60)
else:
return Response(
"OMG, shit went bad quickly! Everything's burning!\nDUCK there he goes again, the dragon's coming. Eat HIM not me. PLEEEEEEEEEEEEEASE!"
)
elif items[0].split()[0].lower().strip() == "list":
return_string = ""
for s in self.random_sets.get_sets():
return_string += "**{}**\n```\n{}```\n\n".format(
s[0], ", ".join(s[1]))
return Response(return_string)
elif items[0].split()[0].lower().strip() == "edit":
if len(items[0].split()) < 2:
return Response(
"Please provide the name of the list you wish to edit!",
delete_after=20)
set_name = "_".join(items[0].split()[1:]).lower().strip()
existing_items = self.random_sets.get_set(set_name)
if existing_items is None:
return Response("This set does not exist!")
edit_mode = items[1].strip().lower() if len(items) > 1 else None
if edit_mode is None:
return Response(
"You need to provide the way you want to edit the list",
delete_after=20)
if len(items) < 3:
return Response(
"You have to specify the items you want to add/remove or set as the new items"
)
if edit_mode == "add":
for option in items[2:]:
self.random_sets.add_option(set_name, option)
elif edit_mode == "remove":
for option in items[2:]:
self.random_sets.remove_option(set_name, option)
elif edit_mode == "replace":
self.random_sets.replace_options(set_name, items[2:])
else:
return Response(
"This is not a valid edit mode!")
return Response("Edited your set!")
elif items[0].split()[0].lower().strip() == "remove":
set_name = "_".join(items[0].split()[1:]).lower().strip()
set_items = items[1:]
res = self.random_sets.remove_set(set_name, set_items)
if res:
return Response("Removed set!")
elif res is None:
return Response("No such set!")
else:
return Response(
"OMG, shit went bad quickly! Everything's burning!\nDUCK there he goes again, the dragon's coming. Eat HIM not me. PLEEEEEEEEEEEEEASE!"
)
if len(items) <= 0 or items is None:
return Response(
"Is your name \"{0}\" by any chance?\n(This is not how this command works. Use `{1}help random` to find out how not to be a stupid **{0}** anymore)".
format(author.name, self.config.command_prefix),
delete_after=30)
if len(items) <= 1:
# return Response("Only you could use `{1}random` for one item...
# Well done, {0}!".format(author.name, self.config.command_prefix),
# delete_after=30)
query = "_".join(items[0].split())
items = self.random_sets.get_set(query.lower().strip())
if items is None:
return Response("Something went wrong")
await self.safe_send_message(channel,
"I choose **" + choice(items) + "**")
|
|
# -*- coding: utf-8 -*-
"""Useful module for commandline input parameters handling
.. module:: lib.console.commandlinetool
:platform: Unix
:synopsis: Useful module for commandline input parameters handling
.. moduleauthor:: Petr Czaderna <[email protected]>
"""
import sys
from hydratk.lib.number.conversion import int2bool
from hydratk.lib.translation.translator import Translator
from hydratk.lib.console import cmdoptparser
HS = chr(27) + chr(91) + "1m"
US = chr(27) + chr(91) + "4m"
EOS = chr(27) + chr(91) + "0m"
def rprint(data):
sys.stdout.write(data)
sys.stdout.flush()
class CommandlineTool():
"""Class CommandLineTool
"""
_title = ''
_cp_string = ''
_commands = []
_long_opt = {}
_short_opt = []
_cmd_text = {}
_opt_text = {}
_trn = None
_parser = None
@staticmethod
def set_translator(translator):
"""Method sets translator
Args:
translator (obj): Translator object
Returns:
void
Raises:
error: ValueError
"""
if isinstance(translator, Translator):
CommandlineTool._trn = translator
else:
raise ValueError(
'translator must be a valid instance of hydratk.lib.translation.translator.Translator class')
@staticmethod
def set_possible_commands(commands):
"""Commands setter method
Args:
commands (list): Possible commands to use
Returns:
void
"""
CommandlineTool._commands = commands
@staticmethod
def set_possible_options(short_opt, long_opt):
"""Options setter method
Args:
short_opt (list): Possible short options to use (getopt format)
long_opt (list): Possible long options to use (getopt format)
Returns:
void
"""
CommandlineTool._short_opt = short_opt
CommandlineTool._long_opt = long_opt
@staticmethod
def set_help(title, cp_string, cmd_text, opt_text):
"""Method creates and returns a formated help text
Args:
title (str): Title text
cp_string (str): Copyright string
cmd_text (dict): Text description for specified commands, format is ['command' : 'description']
opt_text (dict): Text description for specified options, format is ['short_opt', 'long_opt' : 'description']
Returns:
void
"""
CommandlineTool._title = title
CommandlineTool._cp_string = cp_string
CommandlineTool._cmd_text = cmd_text
CommandlineTool._opt_text = opt_text
@staticmethod
def print_short_help():
"""Method prints short help
Args:
none
Returns:
void
"""
print(CommandlineTool.create_short_help())
@staticmethod
def print_help():
"""Method prints long help
Args:
none
Returns:
void
"""
print(CommandlineTool.create_help())
@staticmethod
def get_command_options_desc(command):
"""Method creates and returns a formated help text
Args:
command (str): command text
Returns:
str: help text
"""
result = []
command.replace(' ', '')
command.replace('{h}', '')
command.replace('{u}', '')
command.replace('{e}', '')
optlist = list(CommandlineTool._opt_text.keys())
optlist.sort()
for opt in optlist:
desc = CommandlineTool._opt_text[opt]
if type(desc) is dict:
if type(desc['commands']) is tuple:
for cmd in desc['commands']:
if (command == cmd):
result.append(opt + ' - ' + desc['description'])
else:
if (command == desc['commands']):
result.append(opt + ' - ' + desc['description'])
return result
@staticmethod
def get_input_command():
"""Method returns passed action command parameter
Args:
none
Returns:
str: string command
bool: false if no valid command was used
"""
result = False
for cmd in sys.argv:
if cmd in CommandlineTool._commands:
result = cmd
return result
@staticmethod
def get_input_options(opt_dict):
"""Method returns passed action command parameter
Args:
opt_dict (dict): options
Returns:
dict: result dictionary with short and long input options
Raises:
error: CmdOptParserError
"""
result = {}
try:
CommandlineTool._parser = cmdoptparser.CmdOptParser(add_help=False) #supress built-in argparse -h/--help option
for option, opt_set in opt_dict.items():
d_option = opt_set['d_opt'] if opt_set[
'd_opt'] is not None else option
CommandlineTool._parser.add_opt(
option, d_option, opt_set['has_value'], opt_set['allow_multiple'])
result['options'], result[
'remaining'] = CommandlineTool._parser.parse()
except cmdoptparser.CmdOptParserError as err:
raise err
return result
@staticmethod
def get_input_option(opt):
"""Method gets option value
Args:
opt (str): option
Returns:
bool: result
Raises:
error: CmdOptParserError
"""
if CommandlineTool._parser == None:
raise cmdoptparser.CmdOptParserError(
'Commandline needs to be parsed first')
opt_value = CommandlineTool._parser.get_opt(opt)
if opt_value is None:
opt_value = False
result = opt_value if opt_value != '' else True
return result
@staticmethod
def create_short_help():
"""Method creates short help text
Args:
none
Returns:
str: help text
"""
result = ''
result += CommandlineTool._title + "\n"
result += CommandlineTool._cp_string + "\n"
have_options = ' [options..]' if len(CommandlineTool._short_opt) > 0 or len(
CommandlineTool._long_opt) > 1 else ''
have_commands = ' <command>' if len(
CommandlineTool._commands) > 0 else ''
cmd = (sys.argv[0]).split('/')[-1]
result += "Syntax: " + sys.argv[0] + have_options + have_commands + \
"\n" if CommandlineTool._trn == None else CommandlineTool._trn.msg(
'htk_help_syntax', cmd) + "\n"
result += "For list of all available commands and options type {h}" + cmd + \
" help{e}" if CommandlineTool._trn == None else CommandlineTool._trn.msg(
'htk_help_on_help', cmd)
# apply decorations
result = CommandlineTool.parse_shell_text(result)
return result
@staticmethod
def create_help():
"""Method creates and returns a formated help text
Args:
none
Returns:
str: result help text
"""
import pprint
result = ''
result += CommandlineTool._title + "\n"
result += CommandlineTool._cp_string + "\n"
have_options = ' [options..]' if len(CommandlineTool._short_opt) > 0 or len(
CommandlineTool._long_opt) > 1 else ''
have_commands = ' <command>' if len(
CommandlineTool._commands) > 0 else ''
cmd = (sys.argv[0]).split('/')[-1]
result += "Syntax: " + cmd + have_options + have_commands + \
"\n\n" if CommandlineTool._trn == None else CommandlineTool._trn.msg(
'htk_help_syntax', cmd) + "\n\n"
if (have_commands):
result += "Commands:\n" if CommandlineTool._trn == None else CommandlineTool._trn.msg(
'htk_help_commands') + "\n"
if (len(CommandlineTool._cmd_text) > 0):
cmdlist = list(CommandlineTool._cmd_text.keys())
cmdlist.sort()
for cmd in cmdlist:
desc = CommandlineTool._cmd_text[cmd]
desc = desc if type(
desc).__name__ == 'str' else 'undefined'
result += " {h}" + cmd + "{e} - " + desc + "\n"
cmd_options = CommandlineTool.get_command_options_desc(cmd)
if len(cmd_options) > 0:
# pprint.pprint(cmd_options)
result += " Options:\n" if CommandlineTool._trn == None else " " + \
CommandlineTool._trn.msg('htk_help_options') + "\n"
for cmd_opt in cmd_options:
result += " " + cmd_opt + "\n"
result += "\n"
else: # no text description
for cmd in CommandlineTool._commands:
result += " " + cmd + "\n"
cmd_options = CommandlineTool.get_command_options_desc(cmd)
if len(cmd_options) > 0:
result = " Options:\n" if CommandlineTool._trn == None else " " + \
CommandlineTool._trn.msg('htk_help_options') + "\n"
for cmd_opt in cmd_options:
result += " " + cmd_opt + "\n"
result += "\n"
if (have_options):
if len(CommandlineTool._opt_text) > 0:
optlist = list(CommandlineTool._opt_text.keys())
optlist.sort()
glob_opt_result = ''
for opt in optlist:
desc = CommandlineTool._opt_text[opt]
if (type(desc) is not dict):
if desc != '':
glob_opt_result += " " + \
opt + " - " + desc + "\n"
else: # no text description
for opt in CommandlineTool._short_opt:
have_param = int2bool(opt.find(':'))
opt = opt.replace(':', '')
opt_param = '' if have_param == False else ' <param>'
glob_opt_result += " " + opt + "\n"
if glob_opt_result != '':
result += "\nGlobal Options:\n" if CommandlineTool._trn == None else "\n" + \
CommandlineTool._trn.msg(
'htk_help_glob_options') + "\n"
result += glob_opt_result
# apply decorations
result = CommandlineTool.parse_shell_text(result)
return result
@staticmethod
def parse_shell_text(result):
"""Method adds special characters for shell print
Args:
result (str): text
Returns:
str: shell text
"""
result = result.replace('{h}', HS)
result = result.replace('{u}', US)
result = result.replace('{e}', EOS)
return result
|
|
'''
Created on 15.10.2012
@author: mhoefli
'''
from __future__ import division
from openopt import GLP # , NLP
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy
setBackend = "TkAgg"
if mpl.rcParams["backend"] != setBackend:
print "Switching matplotlib backend from", mpl.rcParams["backend"], "to", setBackend
mpl.rcParams["backend"] = setBackend
plt.switch_backend(setBackend)
mpl.rcdefaults()
def GaussianRegularizationDistanceReconstruction(config, TM, effhist):
Rmin = config.get("Transfer Matrix", "from distance")
Rmax = config.get("Transfer Matrix", "to distance")
if config.get("Reverse Model Fit", "x0 min") < 0:
config.set("Reverse Model Fit", "x0 min", Rmin)
if config.get("Reverse Model Fit", "x0 max") < 0:
config.set("Reverse Model Fit", "x0 max", Rmax)
grmin = config.get("Reverse Model Fit", "x0 min")
grmax = config.get("Reverse Model Fit", "x0 max")
gsigmin = config.get("Reverse Model Fit", "sigma min")
gsigmax = config.get("Reverse Model Fit", "sigma max")
gamin = 0.
gamax = 1.
ngauss = config.get("Reverse Model Fit", "nr gaussian")
maxtime = config.get("Reverse Model Fit", "maxruntime")
pfact = config.get("Reverse Model Fit", "penaltyfact")
res = config.get("Reverse Model Fit", "residual")
print "Will fit %d gaussian(s) in the range of %f to %f with min and maxwidth %f and %f. Max-runtime is %f." % (ngauss, grmin, grmax, gsigmin, gsigmax, maxtime)
lbounds = [gamin] * (ngauss - 1) + [grmin] * ngauss + [gsigmin] * ngauss
ubounds = [gamax] * (ngauss - 1) + [grmax] * ngauss + [gsigmax] * ngauss
r_prdist, x_range, e_fitprdist, fitvals = fittingOpenopt(effhist, TM, Rmin, Rmax, lbounds, ubounds, maxtime, pfact, res)
return r_prdist, x_range, e_fitprdist, fitvals
def x2parms(argvec):
nrgauss = int((len(argvec) + 1) / 3)
a_vals = argvec[0:nrgauss - 1]
a_vals = numpy.append(a_vals, 1. - a_vals.sum())
r_vals = argvec[nrgauss - 1:(2 * nrgauss) - 1]
sig_vals = argvec[(2 * nrgauss) - 1:]
return nrgauss, a_vals, r_vals, sig_vals
def gaussSQDiff(argvec, TM, targeteff, xxarr):
_nrgauss, a_vals, r_vals, sig_vals = x2parms(argvec)
gaussians = (a_vals * numpy.exp(-(xxarr.T - r_vals) ** 2 / (2.0 * sig_vals ** 2)))
r_prdist = gaussians.sum(axis = 1)
e_prdist = numpy.dot(r_prdist, TM)
e_prdist = e_prdist / e_prdist.mean()
devnew = ((targeteff - e_prdist) ** 2).mean()
return devnew
def penalizeCloseGauss(argvec, TM, targeteff, xxarr, penaltyfactor):
stddev = gaussSQDiff(argvec, TM, targeteff, xxarr)
_nrgauss, _a_vals, r_vals, sig_vals = x2parms(argvec)
dists = numpy.subtract.outer(r_vals, r_vals.T)
ssums = numpy.add.outer(sig_vals, sig_vals.T)
absdist = numpy.sqrt(dists * dists)
distsigdiff = absdist - penaltyfactor * ssums
distsigdiffnotr = distsigdiff - distsigdiff * numpy.eye(distsigdiff.shape[0])
diff = (distsigdiffnotr < 0).sum() / 2
return stddev * 10 ** diff
def plotCallback(p, lines_dist, lines_eff, lines_g, xxarr, TM, chsql, chisqs, chisqax):
argvec = p.xk
nrgauss, a_vals, r_vals, sig_vals = x2parms(argvec)
gaussians = (a_vals * numpy.exp(-(xxarr.T - r_vals) ** 2 / (2.0 * sig_vals ** 2)))
r_prdist = gaussians.sum(axis = 1)
e_prdist = numpy.dot(r_prdist, TM.getMatrix())
e_prdist = e_prdist / e_prdist.mean()
for gauss in range(nrgauss):
lines_g[gauss].set_ydata(gaussians[:, gauss])
lines_dist.set_ydata(r_prdist)
lines_eff.set_ydata(e_prdist)
chisqs.append(p.fk)
if len(chisqs) % 10 == 0:
chisqs.pop(0)
chisqs.append(p.fk)
chsql.set_data(range(len(chisqs)), chisqs)
chisqax.relim()
chisqax.autoscale_view(True, True, True)
plt.draw()
return False
def createLivePlot(nrgauss, pearr, tmatrix, xarr, lbounds, ubounds):
plt.figure(figsize = (10, 8))
plt.ion()
plt.subplot(221)
g_lines = []
for gauss in range(nrgauss):
ln, = plt.plot(xarr, numpy.ones(len(xarr)), label = "G%d" % gauss)
g_lines.append(ln)
maxgauss = numpy.array(ubounds)[nrgauss:2 * nrgauss].sum()
lines_distance, = plt.plot(xarr, numpy.ones(len(xarr)) * maxgauss, label = "sum", linewidth = 2, linestyle = ":")
plt.yticks(())
plt.legend(bbox_to_anchor = (0., 1.02, 1., .102), loc = 3, ncol = 2, mode = "expand", borderaxespad = 0.)
plt.xlabel("Reconstructed distance")
plt.ylim(0., 1.1)
plt.subplot(222)
xeff = numpy.linspace(0., 1., len(pearr), endpoint = False) + 1. / len(pearr) / 2
plt.plot(xeff, pearr, label = "reference")
lines_efficiency, = plt.plot(xeff, numpy.ones(len(pearr)), label = "fit")
plt.xlabel("FRET Efficiency")
plt.yticks(())
plt.legend(bbox_to_anchor = (0., 1.02, 1., .102), loc = 3, ncol = 2, mode = "expand", borderaxespad = 0.)
plt.subplot(223)
tmatrix.plot()
plt.subplot(224)
chisqs = []
chsql, = plt.plot(chisqs)
chisqax = plt.gca()
plt.show()
return lines_distance, lines_efficiency, g_lines, chsql, chisqs, chisqax
def fittingOpenopt(pearr, tmatrix, minR, maxR, lbounds, ubounds, gmaxtime, pfact, res):
nrgauss = int((len(lbounds) + 1) / 3)
rvecbins = tmatrix.getMatrix().shape[0]
myrange = maxR - minR
xarr = numpy.linspace(minR + myrange / rvecbins / 2, maxR - myrange / rvecbins / 2, rvecbins)
xxarr = numpy.array([xarr] * nrgauss)
if pfact == 0:
print "Will not apply a penalty for gaussian proximity."
minfuncwrap = lambda x: gaussSQDiff(x, tmatrix.getMatrix(), pearr, xxarr)
else:
print "Will penalize gaussians which are closer than %d times the sum of both sigmas."
minfuncwrap = lambda x: penalizeCloseGauss(x, tmatrix.getMatrix(), pearr, xxarr, pfact)
lines_distance, lines_efficiency, g_lines, chsql, chisqs, chisqax = createLivePlot(nrgauss, pearr, tmatrix, xarr, lbounds, ubounds)
mycallback = lambda p: plotCallback(p, lines_distance, lines_efficiency, g_lines, xxarr, tmatrix, chsql, chisqs, chisqax)
print "Starting openopt ##########################"
prob = GLP(minfuncwrap, lb = lbounds, ub = ubounds, callback = mycallback, maxFunEvals = 1e15, maxNonSuccess = 200, maxIter = 1e5, maxTime = gmaxtime, fEnough = res)
result = prob.solve('de', population = 1000 * len(lbounds))
# result=prob.solve('asa')
# result=prob.solve('galileo') # not good
# result=prob.solve('pswarm')
# prob = GLP(minfuncwrap,lb=lbounds,ub=ubounds,callback=mycallback,maxNonSuccess=200,maxIter=1e5,maxTime=gmaxtime)
# result=prob.solve('isres',population=100*len(lbounds))
# prob = NLP(minfuncwrap,lb=lbounds,ub=ubounds,callback=mycallback,maxNonSuccess=200,maxIter=1e5,maxTime=gmaxtime)
# result=prob.solve('scipy_lbfgsb')
# result=prob.solve('scipy_tnc')
# result=prob.solve('bobyqa')
# result=prob.solve('ptn')
# result=prob.solve('slmvm1')
# result=prob.solve('slmvm2')
# result=prob.solve('ralg')
# result=prob.solve('scipy_cobyla') #good!!
# result=prob.solve('mma')
# result=prob.solve('auglag')
# result=prob.solve('gsubg')
xopt = result.xf
# prob2 = prob = NLP(minfuncwrap, xopt , lb = lbounds, ub = ubounds, callback = mycallback, maxNonSuccess = 20, maxIter = 1e5, maxTime = gmaxtime)
# result = prob2.solve('scipy_cobyla')
# xopt = result.xf
print "Minimum function chisq", result.ff
nrgauss, a_final, r_final, sig_final = x2parms(xopt)
print "G\tA\t\tx0\t\tsigma"
nr = 1
for a, r, sig in zip(a_final, r_final, sig_final):
print "%d\t%f\t%f\t%f\t" % (nr, a, r, sig)
nr += 1
gaussians = (a_final * numpy.exp(-(xxarr.T - r_final) ** 2 / (2.0 * sig_final ** 2)))
r_prdist = gaussians.sum(axis = 1)
e_fitprdist = numpy.dot(r_prdist, tmatrix.getMatrix())
r_prdist /= r_prdist.sum()
e_fitprdist /= e_fitprdist.sum()
return r_prdist, xarr, e_fitprdist, (a_final, r_final, sig_final)
|
|
import json
import requests
LEG_WITH_SOCKET = [
132369, 132410, 137044, 132444, 132449, 132452, 132460, 133973, 133974, 137037, 137038, 137039, 137040,
137041, 137042, 137043, 132378, 137045, 137046, 137047, 137048, 137049, 137050, 137051, 137052, 137054, 137055,
137220, 137223, 137276, 137382, 138854
]
ENCHANTABLE_SLOTS = ["neck", "back", "finger1", "finger2"]
RAIDS = [('The Emerald Nightmare', 'EN'), ('Trial of Valor', 'TOV'), ('The Nighthold', 'NH')]
region_locale = {
'us': ['us', 'en_US', 'en'],
# 'kr': ['kr', 'ko_KR', 'ko'],
# 'tw': ['tw', 'zh_TW', 'zh'],
'eu': ['eu', 'en_GB', 'en']
}
def get_sockets(player_dictionary):
"""
Return dict with total sockets and count of equipped gems and slots that are missing
:param player_dictionary: Retrieved player dict from API
:return: dict()
"""
sockets = 0
equipped_gems = 0
for item in player_dictionary["items"]:
if item in "averageItemLevel" or item in "averageItemLevelEquipped":
continue
if int(player_dictionary["items"][item]["id"]) in LEG_WITH_SOCKET:
sockets += 1
else:
for bonus in player_dictionary["items"][item]["bonusLists"]:
if bonus == 1808: # 1808 is Legion prismatic socket bonus
sockets += 1
if item in ["neck", "finger1", "finger2"]:
if player_dictionary["items"][item]["context"] == "trade-skill":
sockets += 1
for ttip in player_dictionary["items"][item]["tooltipParams"]:
if item in "mainHand" or item in "offHand": # Ignore Relic
continue
if "gem" in ttip: # Equipped gems are listed as gem0, gem1, etc...
equipped_gems += 1
return {"total_sockets": sockets,
"equipped_gems": equipped_gems}
def get_enchants(player_dictionary):
"""
Get count of enchants missing and slots that are missing
:param player_dictionary:
:return: dict()
"""
missing_enchant_slots = []
for slot in ENCHANTABLE_SLOTS:
if "enchant" not in player_dictionary["items"][slot]["tooltipParams"]:
missing_enchant_slots.append(slot)
return {
"enchantable_slots": len(ENCHANTABLE_SLOTS),
"missing_slots": missing_enchant_slots,
"total_missing": len(missing_enchant_slots)
}
def get_raid_progression(player_dictionary, raid):
r = [x for x in player_dictionary["progression"]
["raids"] if x["name"] in raid][0]
normal = 0
heroic = 0
mythic = 0
for boss in r["bosses"]:
if boss["normalKills"] > 0:
normal += 1
if boss["heroicKills"] > 0:
heroic += 1
if boss["mythicKills"] > 0:
mythic += 1
return {"normal": normal,
"heroic": heroic,
"mythic": mythic,
"total_bosses": len(r["bosses"])}
def get_mythic_progression(player_dictionary):
achievements = player_dictionary["achievements"]
plus_two = 0
plus_five = 0
plus_ten = 0
if 33096 in achievements["criteria"]:
index = achievements["criteria"].index(33096)
plus_two = achievements["criteriaQuantity"][index]
if 33097 in achievements["criteria"]:
index = achievements["criteria"].index(33097)
plus_five = achievements["criteriaQuantity"][index]
if 33098 in achievements["criteria"]:
index = achievements["criteria"].index(33098)
plus_ten = achievements["criteriaQuantity"][index]
return {
"plus_two": plus_two,
"plus_five": plus_five,
"plus_ten": plus_ten
}
def get_char(name, server, target_region, api_key):
r = requests.get("https://%s.api.battle.net/wow/character/%s/%s?fields=items+progression+achievements&locale=%s&apikey=%s" % (
region_locale[target_region][0], server, name, region_locale[target_region][1], api_key))
if r.status_code != 200:
raise Exception("Could Not Find Character (No 200 from API)")
player_dict = json.loads(r.text)
r = requests.get(
"https://%s.api.battle.net/wow/data/character/classes?locale=%s&apikey=%s" % (
region_locale[target_region][0], region_locale[target_region][1], api_key))
if r.status_code != 200:
raise Exception("Could Not Find Character Classes (No 200 From API)")
class_dict = json.loads(r.text)
class_dict = {c['id']: c['name'] for c in class_dict["classes"]}
equipped_ivl = player_dict["items"]["averageItemLevelEquipped"]
sockets = get_sockets(player_dict)
enchants = get_enchants(player_dict)
mythic_progress = get_mythic_progression(player_dict)
# Build raid progression
raid_progress = {}
for raid in RAIDS:
raid_name = raid[0]
raid_abrv = raid[1]
raid_progress[raid_name] = {
'abrv': raid_abrv,
'progress': get_raid_progression(player_dict, raid_name)
}
armory_url = 'http://{}.battle.net/wow/{}/character/{}/{}/advanced'.format(
region_locale[target_region][0], region_locale[target_region][2], server, name)
return_string = ''
return_string += "**%s** - **%s** - **%s %s**\n" % (
name.title(), server.title(), player_dict['level'], class_dict[player_dict['class']])
return_string += '<{}>\n'.format(armory_url)
return_string += '```CSS\n' # start Markdown
# iLvL
return_string += "Equipped Item Level: %s\n" % equipped_ivl
# Mythic Progression
return_string += "Mythics: +2: %s, +5: %s, +10: %s\n" % (mythic_progress["plus_two"],
mythic_progress["plus_five"],
mythic_progress["plus_ten"])
# Raid Progression
for raid, data in raid_progress.items():
progress = data['progress']
return_string += '{abrv}: {normal}/{total} (N), {heroic}/{total} (H), {mythic}/{total} (M)\n'.format(
abrv=data['abrv'],
normal=progress['normal'],
heroic=progress['heroic'],
mythic=progress['mythic'],
total=progress['total_bosses']
)
# Gems
return_string += "Gems Equipped: %s/%s\n" % (
sockets["equipped_gems"], sockets["total_sockets"])
# Enchants
return_string += "Enchants: %s/%s\n" % (enchants["enchantable_slots"] - enchants["total_missing"],
enchants["enchantable_slots"])
if enchants["total_missing"] > 0:
return_string += "Missing Enchants: {0}".format(
", ".join(enchants["missing_slots"]))
return_string += '```' # end Markdown
return return_string
async def pug(client, region, api_key, message):
target_region = region
try:
i = str(message.content).split(' ')
name = i[1]
server = i[2]
if len(i) == 4 and i[3].lower() in region_locale.keys():
target_region = i[3].lower()
character_info = get_char(name, server, target_region, api_key)
await client.send_message(message.channel, character_info)
except Exception as e:
print(e)
await client.send_message(message.channel, "Error With Name or Server\n"
"Use: !pug <name> <server> <region>\n"
"Hyphenate Two Word Servers (Ex: Twisting-Nether)")
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IgnoreArg
from mox3.mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.identity.groups import constants
GROUPS_INDEX_URL = reverse(constants.GROUPS_INDEX_URL)
GROUP_CREATE_URL = reverse(constants.GROUPS_CREATE_URL)
GROUP_UPDATE_URL = reverse(constants.GROUPS_UPDATE_URL, args=[1])
GROUP_MANAGE_URL = reverse(constants.GROUPS_MANAGE_URL, args=[1])
GROUP_ADD_MEMBER_URL = reverse(constants.GROUPS_ADD_MEMBER_URL, args=[1])
class GroupsViewTests(test.BaseAdminViewTests):
def _get_domain_id(self):
return self.request.session.get('domain_context', None)
def _get_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('domain_get',
'group_list',)})
def test_index(self):
domain_id = self._get_domain_id()
groups = self._get_groups(domain_id)
filters = {}
api.keystone.group_list(IgnoreArg(),
domain=domain_id,
filters=filters) \
.AndReturn(groups)
self.mox.ReplayAll()
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, constants.GROUPS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, groups)
if domain_id:
for group in res.context['table'].data:
self.assertItemsEqual(group.domain_id, domain_id)
self.assertContains(res, 'Create Group')
self.assertContains(res, 'Edit')
self.assertContains(res, 'Delete Group')
@test.create_stubs({api.keystone: ('group_list',
'get_effective_domain_id')})
def test_index_with_domain(self):
domain = self.domains.get(id="1")
filters = {}
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
groups = self._get_groups(domain.id)
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain.id,
filters=filters).AndReturn(groups)
self.mox.ReplayAll()
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, constants.GROUPS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, groups)
if domain.id:
for group in res.context['table'].data:
self.assertItemsEqual(group.domain_id, domain.id)
self.assertContains(res, 'Create Group')
self.assertContains(res, 'Edit')
self.assertContains(res, 'Delete Group')
@test.create_stubs({api.keystone: ('domain_get',
'group_list',
'keystone_can_edit_group')})
def test_index_with_keystone_can_edit_group_false(self):
domain_id = self._get_domain_id()
groups = self._get_groups(domain_id)
filters = {}
api.keystone.group_list(IgnoreArg(),
domain=domain_id,
filters=filters) \
.AndReturn(groups)
api.keystone.keystone_can_edit_group() \
.MultipleTimes().AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, constants.GROUPS_INDEX_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, groups)
self.assertNotContains(res, 'Create Group')
self.assertNotContains(res, 'Edit')
self.assertNotContains(res, 'Delete Group')
@test.create_stubs({api.keystone: ('group_create',
'domain_get')})
def test_create(self):
domain_id = self._get_domain_id()
domain = self.domains.get(id="1")
group = self.groups.get(id="1")
api.keystone.domain_get(IsA(http.HttpRequest), '1') \
.AndReturn(domain)
api.keystone.group_create(IsA(http.HttpRequest),
description=group.description,
domain_id=domain_id,
name=group.name).AndReturn(group)
self.mox.ReplayAll()
formData = {'method': 'CreateGroupForm',
'name': group.name,
'description': group.description}
res = self.client.post(GROUP_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({api.keystone: ('group_create',)})
def test_create_with_domain(self):
domain = self.domains.get(id="3")
group = self.groups.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
api.keystone.group_create(IsA(http.HttpRequest),
description=group.description,
domain_id=domain.id,
name=group.name).AndReturn(group)
self.mox.ReplayAll()
formData = {'method': 'CreateGroupForm',
'name': group.name,
'description': group.description}
res = self.client.post(GROUP_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({api.keystone: ('group_get',
'group_update')})
def test_update(self):
group = self.groups.get(id="1")
test_description = 'updated description'
api.keystone.group_get(IsA(http.HttpRequest), '1').AndReturn(group)
api.keystone.group_update(IsA(http.HttpRequest),
description=test_description,
group_id=group.id,
name=group.name).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateGroupForm',
'group_id': group.id,
'name': group.name,
'description': test_description}
res = self.client.post(GROUP_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('domain_get',
'group_list',
'group_delete')})
def test_delete_group(self):
domain_id = self._get_domain_id()
filters = {}
group = self.groups.get(id="2")
api.keystone.group_list(IgnoreArg(),
domain=domain_id,
filters=filters) \
.AndReturn(self.groups.list())
api.keystone.group_delete(IgnoreArg(), group.id)
self.mox.ReplayAll()
formData = {'action': 'groups__delete__%s' % group.id}
res = self.client.post(GROUPS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, GROUPS_INDEX_URL)
@test.create_stubs({api.keystone: ('get_effective_domain_id',
'group_get',
'user_list',)})
def test_manage(self):
group = self.groups.get(id="1")
group_members = self.users.list()
domain_id = self._get_domain_id()
api.keystone.group_get(IsA(http.HttpRequest), group.id).\
AndReturn(group)
if api.keystone.VERSIONS.active >= 3:
api.keystone.get_effective_domain_id(
IgnoreArg()).AndReturn(domain_id)
api.keystone.user_list(
IgnoreArg(), group=group.id, domain=domain_id).AndReturn(
group_members)
else:
api.keystone.user_list(
IgnoreArg(), group=group.id).AndReturn(group_members)
self.mox.ReplayAll()
res = self.client.get(GROUP_MANAGE_URL)
self.assertTemplateUsed(res, constants.GROUPS_MANAGE_VIEW_TEMPLATE)
self.assertItemsEqual(res.context['table'].data, group_members)
@test.create_stubs({api.keystone: ('get_effective_domain_id',
'user_list',
'remove_group_user')})
def test_remove_user(self):
group = self.groups.get(id="1")
user = self.users.get(id="2")
domain_id = self._get_domain_id()
if api.keystone.VERSIONS.active >= 3:
api.keystone.get_effective_domain_id(
IgnoreArg()).AndReturn(domain_id)
api.keystone.user_list(
IgnoreArg(), group=group.id, domain=domain_id).AndReturn(
self.users.list())
else:
api.keystone.user_list(
IgnoreArg(), group=group.id).AndReturn(self.users.list())
api.keystone.remove_group_user(IgnoreArg(),
group_id=group.id,
user_id=user.id)
self.mox.ReplayAll()
formData = {'action': 'group_members__removeGroupMember__%s' % user.id}
res = self.client.post(GROUP_MANAGE_URL, formData)
self.assertRedirectsNoFollow(res, GROUP_MANAGE_URL)
self.assertMessageCount(success=1)
@test.create_stubs({api.keystone: ('get_effective_domain_id',
'group_get',
'user_list',
'add_group_user')})
def test_add_user(self):
group = self.groups.get(id="1")
user = self.users.get(id="2")
domain_id = group.domain_id
api.keystone.get_effective_domain_id(IgnoreArg()).AndReturn(domain_id)
api.keystone.group_get(IsA(http.HttpRequest), group.id).\
AndReturn(group)
api.keystone.user_list(IgnoreArg(), domain=domain_id).\
AndReturn(self.users.list())
api.keystone.user_list(IgnoreArg(), domain=domain_id, group=group.id).\
AndReturn(self.users.list()[2:])
api.keystone.add_group_user(IgnoreArg(),
group_id=group.id,
user_id=user.id)
self.mox.ReplayAll()
formData = {'action': 'group_non_members__add__%s' % user.id}
res = self.client.post(GROUP_ADD_MEMBER_URL, formData)
self.assertRedirectsNoFollow(res, GROUP_MANAGE_URL)
self.assertMessageCount(success=1)
@test.update_settings(FILTER_DATA_FIRST={'identity.groups': True})
def test_index_with_filter_first(self):
res = self.client.get(GROUPS_INDEX_URL)
self.assertTemplateUsed(res, constants.GROUPS_INDEX_VIEW_TEMPLATE)
groups = res.context['table'].data
self.assertItemsEqual(groups, [])
|
|
# -*- mode: python; tab-width: 2; coding: utf8 -*-
#
# Copyright (C) 2015 Niklas Rosenstein
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import argparse
import errno
import shutil
import subprocess
import textwrap
from . import command, util, ssh
from .auth import ACCESS_READ, ACCESS_WRITE, ACCESS_MANAGE
from .auth import LEVEL_USER, LEVEL_SHELLUSER, LEVEL_ADMIN, LEVEL_ROOT
from .hooks import parse_webhooks, write_webhooks, invoke_webhook
def printerr(*args, **kwargs):
kwargs.setdefault('file', sys.stderr)
print(*args, **kwargs)
def check_repo(session, repo_name, access_mask, check='exists'):
''' Helper function that converts the repository name to the full
path, makes sure it exists and checks if the user has access to
the repository with the specified access mask. Returns the path
to the repository or raises `SystemExit` with the appropriate
exit code. '''
path = session.repo2path(repo_name)
if not session.get_access_info(path) & access_mask:
if access_mask & ACCESS_MANAGE:
mode = 'manage'
elif access_mask & ACCESS_WRITE:
mode = 'write'
elif access_mask & ACCESS_READ:
mode = 'read'
else:
mode = '<invalid access mask>'
printerr("error: {} permission to {!r} denied".format(mode, repo_name))
raise SystemExit(errno.EPERM)
if check == 'exists':
if not os.path.exists(path):
printerr("error: repository {!r} does not exist".format(repo_name))
raise SystemExit(errno.ENOENT)
if not os.path.isdir(path):
printerr("fatal error: repository {!r} is not a directory".format(args.repo))
raise SystemExit(errno.ENOENT) # XXX: better exit code?
elif check == 'not-exists':
if os.path.exists(path):
printerr("error: repository {!r} already exists".format(repo_name))
raise SystemExit(errno.EEXIST)
elif check:
raise ValueError("invalid check value: {!r}".format(check))
return path
@command('repo')
def _command_repo(session, args):
''' Manage repositories. '''
parser = argparse.ArgumentParser(prog='repo')
subparser = parser.add_subparsers(dest='cmd')
create_p = subparser.add_parser('create')
create_p.add_argument('name')
rename_p = subparser.add_parser('rename')
rename_p.add_argument('old')
rename_p.add_argument('new')
delete_p = subparser.add_parser('delete')
delete_p.add_argument('repo')
delete_p.add_argument('-f', '--force', action='store_true')
describe_p = subparser.add_parser('describe')
describe_p.add_argument('repo')
describe_p.add_argument('description', nargs='?')
list_p = subparser.add_parser('list')
list_p.add_argument('-d', '--describe', action='store_true')
hook_install_p = subparser.add_parser('install-hook')
hook_install_p.add_argument('repo')
hook_install_p.add_argument('name')
hook_install_p.add_argument('url')
hook_list_p = subparser.add_parser('list-hooks')
hook_list_p.add_argument('repo')
hook_remove_p = subparser.add_parser('remove-hook')
hook_remove_p.add_argument('repo')
hook_remove_p.add_argument('name')
args = parser.parse_args(args)
if not args.cmd:
parser.print_usage()
return 0
if args.cmd == 'create':
path = check_repo(session, args.name, ACCESS_MANAGE, 'not-exists')
# Make sure that none of the parent directories is a repository.
if any(x.endswith('.git') for x in path.split(os.sep)[:-1]):
printerr("error: can not create repository inside repository")
return errno.EPERM
res = subprocess.call(['git', 'init', '--bare', path])
if res != 0:
printerr("error: repository could not be created.")
return res
elif args.cmd == 'rename':
old_path = check_repo(session, args.old, ACCESS_MANAGE, 'exists')
new_path = check_repo(session, args.new, ACCESS_MANAGE, 'not-exists')
# Make sure that none of the parent directories is a repository.
if any(x.endswith('.git') for x in new_path.split(os.sep)[:-1]):
printerr("error: can not create repository inside repository")
return errno.EPERM
try:
# Make sure the parent of the new target directory exists.
parent = os.path.dirname(new_path)
if not os.path.exists(parent):
os.makedirs(parent)
os.rename(old_path, new_path)
except (OSError, IOError) as exc:
printerr("error:", exc)
return exc.errno
return 0
elif args.cmd == 'delete':
path = check_repo(session, args.repo, ACCESS_MANAGE, 'exists')
if not args.force:
if not util.confirm('do you really want to delete this repository?'):
return 0
print("deleting repository {!r}...".format(args.repo), end=' ')
try:
shutil.rmtree(path)
except (OSError, IOError) as exc:
print("error.")
printerr(exc)
return exc.errno
else:
print("done.")
return 0
elif args.cmd == 'describe':
path = check_repo(session, args.repo, ACCESS_MANAGE, 'exists')
descfile = os.path.join(path, 'description')
if args.description:
with open(descfile, 'w') as fp:
fp.write(args.description)
else:
with open(descfile, 'r') as fp:
print(fp.read().rstrip())
return 0
elif args.cmd == 'install-hook':
# XXX: Validate hook name?
# XXX: Validate URL scheme?
path = check_repo(session, args.repo, ACCESS_MANAGE, 'exists')
hooksfile = os.path.join(path, 'webhooks')
hooks = parse_webhooks(hooksfile)
if args.name in hooks:
printerr("error: webhook name {!r} occupied".format(args.name))
return errno.EEXIST
hooks[args.name] = args.url
write_webhooks(hooksfile, hooks)
return 0
elif args.cmd == 'list-hooks':
path = check_repo(session, args.repo, ACCESS_MANAGE, 'exists')
hooksfile = os.path.join(path, 'webhooks')
hooks = parse_webhooks(hooksfile)
for name, url in sorted(hooks.items(), key=lambda x: x[0]):
print("{0}: {1}".format(name, url))
return 0
elif args.cmd == 'remove-hook':
path = check_repo(session, args.repo, ACCESS_MANAGE, 'exists')
hooksfile = os.path.join(path, 'webhooks')
hooks = parse_webhooks(hooksfile)
if args.name not in hooks:
printerr("error: webhook {!r} does not exist".format(args.name))
return errno.ENOENT
del hooks[args.name]
write_webhooks(hooksfile, hooks)
return 0
elif args.cmd == 'list':
for repo_name, path in session.repositories():
info = session.get_access_info(path)
flags = list('---')
if info & ACCESS_READ:
flags[0] = 'r'
if info & ACCESS_WRITE:
flags[1] = 'w'
if info & ACCESS_MANAGE:
flags[2] = 'm'
if info:
print(''.join(flags), ': ', repo_name, sep='')
if args.describe:
with open(os.path.join(path, 'description'), 'r') as fp:
for line in textwrap.wrap(fp.read().rstrip()):
print(' ', line, sep='')
return 0
printerr("error: command {!r} not handled".format(args.cmd))
return 255
@command('help')
def _command_help(session, args):
''' Show this help. '''
print("Available commands:")
print()
for key, cmd in sorted(session.commands.items(), key=lambda x: x[0]):
if session.user.level < cmd.required_level:
continue
print(key)
if cmd.func.__doc__:
for line in textwrap.wrap(textwrap.dedent(cmd.func.__doc__)):
print(" ", line, sep='')
@command('git-upload-pack', required_level=LEVEL_USER)
def _command_git_upload_pack(session, args):
parser = argparse.ArgumentParser(prog='git-upload-pack')
parser.add_argument('repo')
args = parser.parse_args(args)
path = check_repo(session, args.repo, ACCESS_READ, 'exists')
return subprocess.call(['git', 'upload-pack', path])
@command('git-receive-pack', required_level=LEVEL_USER)
def _command_git_upload_pack(session, args):
parser = argparse.ArgumentParser(prog='git-receive-pack')
parser.add_argument('repo')
args = parser.parse_args(args)
path = check_repo(session, args.repo, ACCESS_WRITE, 'exists')
res = subprocess.call(['git', 'receive-pack', path])
hooksfile = os.path.join(path, 'webhooks')
hooks = parse_webhooks(hooksfile)
if not res and hooks:
printerr('info: invoking webhooks')
data = {'host': session.config.host_name,
'repo': args.repo, 'event': 'receive-pack'}
for name, url in hooks.items():
printerr('info: ', name, end='... ')
try:
invoke_webhook(url, data)
except Exception as exc: # XXX: Catch the right exception for connection and communication errors
printerr('error. ({0})'.format(exc))
else:
printerr('success.')
return res
@command('shell', required_level=LEVEL_ROOT)
def _command_shell(session, args):
''' Root users can use this command to enter the interactive shell. '''
if sys.platform == 'win32':
return subprocess.call('cmd')
elif sys.platform in ('cygwin', 'darwin') or sys.platform.startswith('linux'):
# XXX: Is it -l for other shells than Bash as well?
return subprocess.call(os.environ['SHELL'], '-l')
@command('ssh-key', required_level=LEVEL_SHELLUSER)
def _command_ssh_key(session, args):
''' Manage SSH keys. '''
level = session.user.level
parser = argparse.ArgumentParser(prog='ssh-key')
if level >= LEVEL_ROOT:
parser.add_argument('-u', '--user')
subparsers = parser.add_subparsers(dest='cmd')
add_p = subparsers.add_parser('add')
add_p.add_argument('name')
add_p.add_argument('pub_key', nargs='?')
list_p = subparsers.add_parser('list')
del_p = subparsers.add_parser('del')
del_p.add_argument('name')
del_p.add_argument('-f', '--force', action='store_true')
if level >= LEVEL_ROOT:
update_p = subparsers.add_parser('update')
args = parser.parse_args(args)
if not args.cmd:
parser.print_usage()
return
if not getattr(args, 'user', None):
args.user = session.user.name
else:
try:
info = session.config.access_control.get_user_info(args.user)
except auth.UnknownUser as exc:
printerr("error: user {!r} does not exist".format(str(exc)))
return errno.EINVAL
manager = getattr(session.config, 'ssh_key_manager')
if not manager:
printerr("error: no ssh_key_manager configured")
return 255 # XXX: Better error code?
if not isinstance(manager, ssh.SSHKeyManager):
printerr("error: invalid ssh_key_manager configuration")
return 255
if args.cmd == 'add':
if not args.pub_key:
args.pub_key = sys.stdin.readline()
try:
key = ssh.parse_authorized_key(args.pub_key)
if key.options:
raise ValueError('options are not allowed')
except ValueError as exc:
printerr("error: invalid SSH public key")
return errno.EINVAL
try:
manager.add_key(args.user, args.name, key.type, key.blob)
except ValueError as exc:
printerr("error: could not add SSH key: {}".format(exc))
return errno.EINVAL
print("SSH key added.")
return 0
elif args.cmd == 'list':
keys = list(manager.iter_keys(args.user))
if not keys:
print("no SSH keys for user {!r}".format(args.user))
else:
print("SSH keys for user {!r}".format(args.user))
print()
for key in keys:
print('*', key.title, end='')
if key.comment:
print(' -', key.comment)
else:
print()
return 0
elif args.cmd == 'del':
try:
manager.del_key(args.user, args.name)
except ValueError as exc:
printerr("error: {}".format(exc))
return 255
print("SSH key deleted.")
return 0
else:
raise RuntimeError
|
|
"""The IPython Controller Hub with 0MQ
This is the master object that handles connections from engines and clients,
and monitors traffic through the various queues.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import json
import os
import sys
import time
from datetime import datetime
import zmq
from zmq.eventloop.zmqstream import ZMQStream
# internal:
from IPython.utils.importstring import import_item
from IPython.utils.jsonutil import extract_dates
from IPython.utils.localinterfaces import localhost
from IPython.utils.py3compat import cast_bytes, unicode_type, iteritems
from IPython.utils.traitlets import (
HasTraits, Any, Instance, Integer, Unicode, Dict, Set, Tuple, DottedObjectName
)
from IPython.parallel import error, util
from IPython.parallel.factory import RegistrationFactory
from IPython.kernel.zmq.session import SessionFactory
from .heartmonitor import HeartMonitor
def _passer(*args, **kwargs):
return
def _printer(*args, **kwargs):
print (args)
print (kwargs)
def empty_record():
"""Return an empty dict with all record keys."""
return {
'msg_id': None,
'header': None,
'metadata': None,
'content': None,
'buffers': None,
'submitted': None,
'client_uuid': None,
'engine_uuid': None,
'started': None,
'completed': None,
'resubmitted': None,
'received': None,
'result_header': None,
'result_metadata': None,
'result_content': None,
'result_buffers': None,
'queue': None,
'execute_input': None,
'execute_result': None,
'error': None,
'stdout': '',
'stderr': '',
}
def init_record(msg):
"""Initialize a TaskRecord based on a request."""
header = msg['header']
return {
'msg_id': header['msg_id'],
'header': header,
'content': msg['content'],
'metadata': msg['metadata'],
'buffers': msg['buffers'],
'submitted': header['date'],
'client_uuid': None,
'engine_uuid': None,
'started': None,
'completed': None,
'resubmitted': None,
'received': None,
'result_header': None,
'result_metadata': None,
'result_content': None,
'result_buffers': None,
'queue': None,
'execute_input': None,
'execute_result': None,
'error': None,
'stdout': '',
'stderr': '',
}
class EngineConnector(HasTraits):
"""A simple object for accessing the various zmq connections of an object.
Attributes are:
id (int): engine ID
uuid (unicode): engine UUID
pending: set of msg_ids
stallback: tornado timeout for stalled registration
"""
id = Integer(0)
uuid = Unicode()
pending = Set()
stallback = Any()
_db_shortcuts = {
'sqlitedb': 'IPython.parallel.controller.sqlitedb.SQLiteDB',
'mongodb': 'IPython.parallel.controller.mongodb.MongoDB',
'dictdb': 'IPython.parallel.controller.dictdb.DictDB',
'nodb': 'IPython.parallel.controller.dictdb.NoDB',
}
class HubFactory(RegistrationFactory):
"""The Configurable for setting up a Hub."""
# port-pairs for monitoredqueues:
hb = Tuple(Integer, Integer, config=True,
help="""PUB/ROUTER Port pair for Engine heartbeats""")
def _hb_default(self):
return tuple(util.select_random_ports(2))
mux = Tuple(Integer, Integer, config=True,
help="""Client/Engine Port pair for MUX queue""")
def _mux_default(self):
return tuple(util.select_random_ports(2))
task = Tuple(Integer, Integer, config=True,
help="""Client/Engine Port pair for Task queue""")
def _task_default(self):
return tuple(util.select_random_ports(2))
control = Tuple(Integer, Integer, config=True,
help="""Client/Engine Port pair for Control queue""")
def _control_default(self):
return tuple(util.select_random_ports(2))
iopub = Tuple(Integer, Integer, config=True,
help="""Client/Engine Port pair for IOPub relay""")
def _iopub_default(self):
return tuple(util.select_random_ports(2))
# single ports:
mon_port = Integer(config=True,
help="""Monitor (SUB) port for queue traffic""")
def _mon_port_default(self):
return util.select_random_ports(1)[0]
notifier_port = Integer(config=True,
help="""PUB port for sending engine status notifications""")
def _notifier_port_default(self):
return util.select_random_ports(1)[0]
engine_ip = Unicode(config=True,
help="IP on which to listen for engine connections. [default: loopback]")
def _engine_ip_default(self):
return localhost()
engine_transport = Unicode('tcp', config=True,
help="0MQ transport for engine connections. [default: tcp]")
client_ip = Unicode(config=True,
help="IP on which to listen for client connections. [default: loopback]")
client_transport = Unicode('tcp', config=True,
help="0MQ transport for client connections. [default : tcp]")
monitor_ip = Unicode(config=True,
help="IP on which to listen for monitor messages. [default: loopback]")
monitor_transport = Unicode('tcp', config=True,
help="0MQ transport for monitor messages. [default : tcp]")
_client_ip_default = _monitor_ip_default = _engine_ip_default
monitor_url = Unicode('')
db_class = DottedObjectName('NoDB',
config=True, help="""The class to use for the DB backend
Options include:
SQLiteDB: SQLite
MongoDB : use MongoDB
DictDB : in-memory storage (fastest, but be mindful of memory growth of the Hub)
NoDB : disable database altogether (default)
""")
registration_timeout = Integer(0, config=True,
help="Engine registration timeout in seconds [default: max(30,"
"10*heartmonitor.period)]")
def _registration_timeout_default(self):
if self.heartmonitor is None:
# early initialization, this value will be ignored
return 0
# heartmonitor period is in milliseconds, so 10x in seconds is .01
return max(30, int(.01 * self.heartmonitor.period))
# not configurable
db = Instance('IPython.parallel.controller.dictdb.BaseDB')
heartmonitor = Instance(
'IPython.parallel.controller.heartmonitor.HeartMonitor')
def _ip_changed(self, name, old, new):
self.engine_ip = new
self.client_ip = new
self.monitor_ip = new
self._update_monitor_url()
def _update_monitor_url(self):
self.monitor_url = "%s://%s:%i" % (
self.monitor_transport, self.monitor_ip, self.mon_port)
def _transport_changed(self, name, old, new):
self.engine_transport = new
self.client_transport = new
self.monitor_transport = new
self._update_monitor_url()
def __init__(self, **kwargs):
super(HubFactory, self).__init__(**kwargs)
self._update_monitor_url()
def construct(self):
self.init_hub()
def start(self):
self.heartmonitor.start()
self.log.info("Heartmonitor started")
def client_url(self, channel):
"""return full zmq url for a named client channel"""
return "%s://%s:%i" % (self.client_transport, self.client_ip, self.client_info[channel])
def engine_url(self, channel):
"""return full zmq url for a named engine channel"""
return "%s://%s:%i" % (self.engine_transport, self.engine_ip, self.engine_info[channel])
def init_hub(self):
"""construct Hub object"""
ctx = self.context
loop = self.loop
if 'TaskScheduler.scheme_name' in self.config:
scheme = self.config.TaskScheduler.scheme_name
else:
from .scheduler import TaskScheduler
scheme = TaskScheduler.scheme_name.get_default_value()
# build connection dicts
engine = self.engine_info = {
'interface': "%s://%s" % (self.engine_transport, self.engine_ip),
'registration': self.regport,
'control': self.control[1],
'mux': self.mux[1],
'hb_ping': self.hb[0],
'hb_pong': self.hb[1],
'task': self.task[1],
'iopub': self.iopub[1],
}
client = self.client_info = {
'interface': "%s://%s" % (self.client_transport, self.client_ip),
'registration': self.regport,
'control': self.control[0],
'mux': self.mux[0],
'task': self.task[0],
'task_scheme': scheme,
'iopub': self.iopub[0],
'notification': self.notifier_port,
}
self.log.debug("Hub engine addrs: %s", self.engine_info)
self.log.debug("Hub client addrs: %s", self.client_info)
# Registrar socket
q = ZMQStream(ctx.socket(zmq.ROUTER), loop)
util.set_hwm(q, 0)
q.bind(self.client_url('registration'))
self.log.info(
"Hub listening on %s for registration.", self.client_url('registration'))
if self.client_ip != self.engine_ip:
q.bind(self.engine_url('registration'))
self.log.info(
"Hub listening on %s for registration.", self.engine_url('registration'))
### Engine connections ###
# heartbeat
hpub = ctx.socket(zmq.PUB)
hpub.bind(self.engine_url('hb_ping'))
hrep = ctx.socket(zmq.ROUTER)
util.set_hwm(hrep, 0)
hrep.bind(self.engine_url('hb_pong'))
self.heartmonitor = HeartMonitor(loop=loop, parent=self, log=self.log,
pingstream=ZMQStream(hpub, loop),
pongstream=ZMQStream(hrep, loop)
)
### Client connections ###
# Notifier socket
n = ZMQStream(ctx.socket(zmq.PUB), loop)
n.bind(self.client_url('notification'))
### build and launch the queues ###
# monitor socket
sub = ctx.socket(zmq.SUB)
sub.setsockopt(zmq.SUBSCRIBE, b"")
sub.bind(self.monitor_url)
sub.bind('inproc://monitor')
sub = ZMQStream(sub, loop)
# connect the db
db_class = _db_shortcuts.get(self.db_class.lower(), self.db_class)
self.log.info('Hub using DB backend: %r', (db_class.split('.')[-1]))
self.db = import_item(str(db_class))(session=self.session.session,
parent=self, log=self.log)
time.sleep(.25)
# resubmit stream
r = ZMQStream(ctx.socket(zmq.DEALER), loop)
url = util.disambiguate_url(self.client_url('task'))
r.connect(url)
self.hub = Hub(loop=loop, session=self.session, monitor=sub, heartmonitor=self.heartmonitor,
query=q, notifier=n, resubmit=r, db=self.db,
engine_info=self.engine_info, client_info=self.client_info,
log=self.log, registration_timeout=self.registration_timeout)
class Hub(SessionFactory):
"""The IPython Controller Hub with 0MQ connections
Parameters
==========
loop: zmq IOLoop instance
session: Session object
<removed> context: zmq context for creating new connections (?)
queue: ZMQStream for monitoring the command queue (SUB)
query: ZMQStream for engine registration and client queries requests (ROUTER)
heartbeat: HeartMonitor object checking the pulse of the engines
notifier: ZMQStream for broadcasting engine registration changes (PUB)
db: connection to db for out of memory logging of commands
NotImplemented
engine_info: dict of zmq connection information for engines to connect
to the queues.
client_info: dict of zmq connection information for engines to connect
to the queues.
"""
engine_state_file = Unicode()
# internal data structures:
ids = Set() # engine IDs
keytable = Dict()
by_ident = Dict()
engines = Dict()
clients = Dict()
hearts = Dict()
pending = Set()
queues = Dict() # pending msg_ids keyed by engine_id
tasks = Dict() # pending msg_ids submitted as tasks, keyed by client_id
completed = Dict() # completed msg_ids keyed by engine_id
all_completed = Set() # completed msg_ids keyed by engine_id
dead_engines = Set() # completed msg_ids keyed by engine_id
unassigned = Set() # set of task msg_ds not yet assigned a destination
incoming_registrations = Dict()
registration_timeout = Integer()
_idcounter = Integer(0)
# objects from constructor:
query = Instance(ZMQStream)
monitor = Instance(ZMQStream)
notifier = Instance(ZMQStream)
resubmit = Instance(ZMQStream)
heartmonitor = Instance(HeartMonitor)
db = Instance(object)
client_info = Dict()
engine_info = Dict()
def __init__(self, **kwargs):
"""
# universal:
loop: IOLoop for creating future connections
session: streamsession for sending serialized data
# engine:
queue: ZMQStream for monitoring queue messages
query: ZMQStream for engine+client registration and client requests
heartbeat: HeartMonitor object for tracking engines
# extra:
db: ZMQStream for db connection (NotImplemented)
engine_info: zmq address/protocol dict for engine connections
client_info: zmq address/protocol dict for client connections
"""
super(Hub, self).__init__(**kwargs)
# register our callbacks
self.query.on_recv(self.dispatch_query)
self.monitor.on_recv(self.dispatch_monitor_traffic)
self.heartmonitor.add_heart_failure_handler(self.handle_heart_failure)
self.heartmonitor.add_new_heart_handler(self.handle_new_heart)
self.monitor_handlers = {b'in': self.save_queue_request,
b'out': self.save_queue_result,
b'intask': self.save_task_request,
b'outtask': self.save_task_result,
b'tracktask': self.save_task_destination,
b'incontrol': _passer,
b'outcontrol': _passer,
b'iopub': self.save_iopub_message,
}
self.query_handlers = {'queue_request': self.queue_status,
'result_request': self.get_results,
'history_request': self.get_history,
'db_request': self.db_query,
'purge_request': self.purge_results,
'load_request': self.check_load,
'resubmit_request': self.resubmit_task,
'shutdown_request': self.shutdown_request,
'registration_request': self.register_engine,
'unregistration_request': self.unregister_engine,
'connection_request': self.connection_request,
}
# ignore resubmit replies
self.resubmit.on_recv(lambda msg: None, copy=False)
self.log.info("hub::created hub")
@property
def _next_id(self):
"""gemerate a new ID.
No longer reuse old ids, just count from 0."""
newid = self._idcounter
self._idcounter += 1
return newid
# newid = 0
# incoming = [id[0] for id in itervalues(self.incoming_registrations)]
# # print newid, self.ids, self.incoming_registrations
# while newid in self.ids or newid in incoming:
# newid += 1
# return newid
#-------------------------------------------------------------------------
# message validation
#-------------------------------------------------------------------------
def _validate_targets(self, targets):
"""turn any valid targets argument into a list of integer ids"""
if targets is None:
# default to all
return self.ids
if isinstance(targets, (int, str, unicode_type)):
# only one target specified
targets = [targets]
_targets = []
for t in targets:
# map raw identities to ids
if isinstance(t, (str, unicode_type)):
t = self.by_ident.get(cast_bytes(t), t)
_targets.append(t)
targets = _targets
bad_targets = [t for t in targets if t not in self.ids]
if bad_targets:
raise IndexError("No Such Engine: %r" % bad_targets)
if not targets:
raise IndexError("No Engines Registered")
return targets
#-------------------------------------------------------------------------
# dispatch methods (1 per stream)
#-------------------------------------------------------------------------
@util.log_errors
def dispatch_monitor_traffic(self, msg):
"""all ME and Task queue messages come through here, as well as
IOPub traffic."""
self.log.debug("monitor traffic: %r", msg[0])
switch = msg[0]
try:
idents, msg = self.session.feed_identities(msg[1:])
except ValueError:
idents = []
if not idents:
self.log.error("Monitor message without topic: %r", msg)
return
handler = self.monitor_handlers.get(switch, None)
if handler is not None:
handler(idents, msg)
else:
self.log.error("Unrecognized monitor topic: %r", switch)
@util.log_errors
def dispatch_query(self, msg):
"""Route registration requests and queries from clients."""
try:
idents, msg = self.session.feed_identities(msg)
except ValueError:
idents = []
if not idents:
self.log.error("Bad Query Message: %r", msg)
return
client_id = idents[0]
try:
msg = self.session.deserialize(msg, content=True)
except Exception:
content = error.wrap_exception()
self.log.error("Bad Query Message: %r", msg, exc_info=True)
self.session.send(self.query, "hub_error", ident=client_id,
content=content)
return
# print client_id, header, parent, content
# switch on message type:
msg_type = msg['header']['msg_type']
self.log.info("client::client %r requested %r", client_id, msg_type)
handler = self.query_handlers.get(msg_type, None)
try:
assert handler is not None, "Bad Message Type: %r" % msg_type
except:
content = error.wrap_exception()
self.log.error("Bad Message Type: %r", msg_type, exc_info=True)
self.session.send(self.query, "hub_error", ident=client_id,
content=content)
return
else:
handler(idents, msg)
def dispatch_db(self, msg):
""""""
raise NotImplementedError
#-------------------------------------------------------------------------
# handler methods (1 per event)
#-------------------------------------------------------------------------
#----------------------- Heartbeat --------------------------------------
def handle_new_heart(self, heart):
"""handler to attach to heartbeater.
Called when a new heart starts to beat.
Triggers completion of registration."""
self.log.debug("heartbeat::handle_new_heart(%r)", heart)
if heart not in self.incoming_registrations:
self.log.info("heartbeat::ignoring new heart: %r", heart)
else:
self.finish_registration(heart)
def handle_heart_failure(self, heart):
"""handler to attach to heartbeater.
called when a previously registered heart fails to respond to beat request.
triggers unregistration"""
self.log.debug("heartbeat::handle_heart_failure(%r)", heart)
eid = self.hearts.get(heart, None)
uuid = self.engines[eid].uuid
if eid is None or self.keytable[eid] in self.dead_engines:
self.log.info(
"heartbeat::ignoring heart failure %r (not an engine or already dead)", heart)
else:
self.unregister_engine(
heart, dict(content=dict(id=eid, queue=uuid)))
#----------------------- MUX Queue Traffic ------------------------------
def save_queue_request(self, idents, msg):
if len(idents) < 2:
self.log.error("invalid identity prefix: %r", idents)
return
queue_id, client_id = idents[:2]
try:
msg = self.session.deserialize(msg)
except Exception:
self.log.error(
"queue::client %r sent invalid message to %r: %r", client_id, queue_id, msg, exc_info=True)
return
eid = self.by_ident.get(queue_id, None)
if eid is None:
self.log.error("queue::target %r not registered", queue_id)
self.log.debug("queue:: valid are: %r", self.by_ident.keys())
return
record = init_record(msg)
msg_id = record['msg_id']
self.log.info(
"queue::client %r submitted request %r to %s", client_id, msg_id, eid)
# Unicode in records
record['engine_uuid'] = queue_id.decode('ascii')
record['client_uuid'] = msg['header']['session']
record['queue'] = 'mux'
try:
# it's posible iopub arrived first:
existing = self.db.get_record(msg_id)
for key, evalue in iteritems(existing):
rvalue = record.get(key, None)
if evalue and rvalue and evalue != rvalue:
self.log.warn(
"conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue)
elif evalue and not rvalue:
record[key] = evalue
try:
self.db.update_record(msg_id, record)
except Exception:
self.log.error(
"DB Error updating record %r", msg_id, exc_info=True)
except KeyError:
try:
self.db.add_record(msg_id, record)
except Exception:
self.log.error(
"DB Error adding record %r", msg_id, exc_info=True)
self.pending.add(msg_id)
self.queues[eid].append(msg_id)
def save_queue_result(self, idents, msg):
if len(idents) < 2:
self.log.error("invalid identity prefix: %r", idents)
return
client_id, queue_id = idents[:2]
try:
msg = self.session.deserialize(msg)
except Exception:
self.log.error("queue::engine %r sent invalid message to %r: %r",
queue_id, client_id, msg, exc_info=True)
return
eid = self.by_ident.get(queue_id, None)
if eid is None:
self.log.error(
"queue::unknown engine %r is sending a reply: ", queue_id)
return
parent = msg['parent_header']
if not parent:
return
msg_id = parent['msg_id']
if msg_id in self.pending:
self.pending.remove(msg_id)
self.all_completed.add(msg_id)
self.queues[eid].remove(msg_id)
self.completed[eid].append(msg_id)
self.log.info("queue::request %r completed on %s", msg_id, eid)
elif msg_id not in self.all_completed:
# it could be a result from a dead engine that died before delivering the
# result
self.log.warn("queue:: unknown msg finished %r", msg_id)
return
# update record anyway, because the unregistration could have been
# premature
rheader = msg['header']
md = msg['metadata']
completed = rheader['date']
started = extract_dates(md.get('started', None))
result = {
'result_header': rheader,
'result_metadata': md,
'result_content': msg['content'],
'received': datetime.now(),
'started': started,
'completed': completed
}
result['result_buffers'] = msg['buffers']
try:
self.db.update_record(msg_id, result)
except Exception:
self.log.error(
"DB Error updating record %r", msg_id, exc_info=True)
#--------------------- Task Queue Traffic ------------------------------
def save_task_request(self, idents, msg):
"""Save the submission of a task."""
client_id = idents[0]
try:
msg = self.session.deserialize(msg)
except Exception:
self.log.error("task::client %r sent invalid task message: %r",
client_id, msg, exc_info=True)
return
record = init_record(msg)
record['client_uuid'] = msg['header']['session']
record['queue'] = 'task'
header = msg['header']
msg_id = header['msg_id']
self.pending.add(msg_id)
self.unassigned.add(msg_id)
try:
# it's posible iopub arrived first:
existing = self.db.get_record(msg_id)
if existing['resubmitted']:
for key in ('submitted', 'client_uuid', 'buffers'):
# don't clobber these keys on resubmit
# submitted and client_uuid should be different
# and buffers might be big, and shouldn't have changed
record.pop(key)
# still check content,header which should not change
# but are not expensive to compare as buffers
for key, evalue in iteritems(existing):
if key.endswith('buffers'):
# don't compare buffers
continue
rvalue = record.get(key, None)
if evalue and rvalue and evalue != rvalue:
self.log.warn(
"conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue)
elif evalue and not rvalue:
record[key] = evalue
try:
self.db.update_record(msg_id, record)
except Exception:
self.log.error(
"DB Error updating record %r", msg_id, exc_info=True)
except KeyError:
try:
self.db.add_record(msg_id, record)
except Exception:
self.log.error(
"DB Error adding record %r", msg_id, exc_info=True)
except Exception:
self.log.error(
"DB Error saving task request %r", msg_id, exc_info=True)
def save_task_result(self, idents, msg):
"""save the result of a completed task."""
client_id = idents[0]
try:
msg = self.session.deserialize(msg)
except Exception:
self.log.error("task::invalid task result message send to %r: %r",
client_id, msg, exc_info=True)
return
parent = msg['parent_header']
if not parent:
# print msg
self.log.warn("Task %r had no parent!", msg)
return
msg_id = parent['msg_id']
if msg_id in self.unassigned:
self.unassigned.remove(msg_id)
header = msg['header']
md = msg['metadata']
engine_uuid = md.get('engine', u'')
eid = self.by_ident.get(cast_bytes(engine_uuid), None)
status = md.get('status', None)
if msg_id in self.pending:
self.log.info("task::task %r finished on %s", msg_id, eid)
self.pending.remove(msg_id)
self.all_completed.add(msg_id)
if eid is not None:
if status != 'aborted':
self.completed[eid].append(msg_id)
if msg_id in self.tasks[eid]:
self.tasks[eid].remove(msg_id)
completed = header['date']
started = extract_dates(md.get('started', None))
result = {
'result_header': header,
'result_metadata': msg['metadata'],
'result_content': msg['content'],
'started': started,
'completed': completed,
'received': datetime.now(),
'engine_uuid': engine_uuid,
}
result['result_buffers'] = msg['buffers']
try:
self.db.update_record(msg_id, result)
except Exception:
self.log.error(
"DB Error saving task request %r", msg_id, exc_info=True)
else:
self.log.debug("task::unknown task %r finished", msg_id)
def save_task_destination(self, idents, msg):
try:
msg = self.session.deserialize(msg, content=True)
except Exception:
self.log.error(
"task::invalid task tracking message", exc_info=True)
return
content = msg['content']
# print (content)
msg_id = content['msg_id']
engine_uuid = content['engine_id']
eid = self.by_ident[cast_bytes(engine_uuid)]
self.log.info("task::task %r arrived on %r", msg_id, eid)
if msg_id in self.unassigned:
self.unassigned.remove(msg_id)
# else:
# self.log.debug("task::task %r not listed as MIA?!"%(msg_id))
self.tasks[eid].append(msg_id)
# self.pending[msg_id][1].update(received=datetime.now(),engine=(eid,engine_uuid))
try:
self.db.update_record(msg_id, dict(engine_uuid=engine_uuid))
except Exception:
self.log.error(
"DB Error saving task destination %r", msg_id, exc_info=True)
def mia_task_request(self, idents, msg):
raise NotImplementedError
client_id = idents[0]
# content = dict(mia=self.mia,status='ok')
# self.session.send('mia_reply', content=content, idents=client_id)
#--------------------- IOPub Traffic ------------------------------
def save_iopub_message(self, topics, msg):
"""save an iopub message into the db"""
# print (topics)
try:
msg = self.session.deserialize(msg, content=True)
except Exception:
self.log.error("iopub::invalid IOPub message", exc_info=True)
return
parent = msg['parent_header']
if not parent:
self.log.debug("iopub::IOPub message lacks parent: %r", msg)
return
msg_id = parent['msg_id']
msg_type = msg['header']['msg_type']
content = msg['content']
# ensure msg_id is in db
try:
rec = self.db.get_record(msg_id)
except KeyError:
rec = None
# stream
d = {}
if msg_type == 'stream':
name = content['name']
s = '' if rec is None else rec[name]
d[name] = s + content['text']
elif msg_type == 'error':
d['error'] = content
elif msg_type == 'execute_input':
d['execute_input'] = content['code']
elif msg_type in ('display_data', 'execute_result'):
d[msg_type] = content
elif msg_type == 'status':
pass
elif msg_type == 'data_pub':
self.log.info("ignored data_pub message for %s" % msg_id)
else:
self.log.warn("unhandled iopub msg_type: %r", msg_type)
if not d:
return
if rec is None:
# new record
rec = empty_record()
rec['msg_id'] = msg_id
rec.update(d)
d = rec
update_record = self.db.add_record
else:
update_record = self.db.update_record
try:
update_record(msg_id, d)
except Exception:
self.log.error(
"DB Error saving iopub message %r", msg_id, exc_info=True)
#-------------------------------------------------------------------------
# Registration requests
#-------------------------------------------------------------------------
def connection_request(self, client_id, msg):
"""Reply with connection addresses for clients."""
self.log.info("client::client %r connected", client_id)
content = dict(status='ok')
jsonable = {}
for k, v in iteritems(self.keytable):
if v not in self.dead_engines:
jsonable[str(k)] = v
content['engines'] = jsonable
self.session.send(
self.query, 'connection_reply', content, parent=msg, ident=client_id)
def register_engine(self, reg, msg):
"""Register a new engine."""
content = msg['content']
try:
uuid = content['uuid']
except KeyError:
self.log.error("registration::queue not specified", exc_info=True)
return
eid = self._next_id
self.log.debug("registration::register_engine(%i, %r)", eid, uuid)
content = dict(id=eid, status='ok', hb_period=self.heartmonitor.period)
# check if requesting available IDs:
if cast_bytes(uuid) in self.by_ident:
try:
raise KeyError("uuid %r in use" % uuid)
except:
content = error.wrap_exception()
self.log.error("uuid %r in use", uuid, exc_info=True)
else:
for h, ec in iteritems(self.incoming_registrations):
if uuid == h:
try:
raise KeyError("heart_id %r in use" % uuid)
except:
self.log.error(
"heart_id %r in use", uuid, exc_info=True)
content = error.wrap_exception()
break
elif uuid == ec.uuid:
try:
raise KeyError("uuid %r in use" % uuid)
except:
self.log.error("uuid %r in use", uuid, exc_info=True)
content = error.wrap_exception()
break
msg = self.session.send(self.query, "registration_reply",
content=content,
ident=reg)
heart = cast_bytes(uuid)
if content['status'] == 'ok':
if heart in self.heartmonitor.hearts:
# already beating
self.incoming_registrations[
heart] = EngineConnector(id=eid, uuid=uuid)
self.finish_registration(heart)
else:
purge = lambda: self._purge_stalled_registration(heart)
t = self.loop.add_timeout(
self.loop.time() + self.registration_timeout,
purge,
)
self.incoming_registrations[heart] = EngineConnector(
id=eid, uuid=uuid, stallback=t)
else:
self.log.error(
"registration::registration %i failed: %r", eid, content['evalue'])
return eid
def unregister_engine(self, ident, msg):
"""Unregister an engine that explicitly requested to leave."""
try:
eid = msg['content']['id']
except:
self.log.error(
"registration::bad engine id for unregistration: %r", ident, exc_info=True)
return
self.log.info("registration::unregister_engine(%r)", eid)
uuid = self.keytable[eid]
content = dict(id=eid, uuid=uuid)
self.dead_engines.add(uuid)
self.loop.add_timeout(
self.loop.time() + self.registration_timeout,
lambda: self._handle_stranded_msgs(eid, uuid),
)
############## TODO: HANDLE IT ################
self._save_engine_state()
if self.notifier:
self.session.send(
self.notifier, "unregistration_notification", content=content)
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
that the result failed and later receive the actual result.
"""
outstanding = self.queues[eid]
for msg_id in outstanding:
self.pending.remove(msg_id)
self.all_completed.add(msg_id)
try:
raise error.EngineError(
"Engine %r died while running task %r" % (eid, msg_id))
except:
content = error.wrap_exception()
# build a fake header:
header = {}
header['engine'] = uuid
header['date'] = datetime.now()
rec = dict(
result_content=content, result_header=header, result_buffers=[])
rec['completed'] = header['date']
rec['engine_uuid'] = uuid
try:
self.db.update_record(msg_id, rec)
except Exception:
self.log.error(
"DB Error handling stranded msg %r", msg_id, exc_info=True)
def finish_registration(self, heart):
"""Second half of engine registration, called after our HeartMonitor
has received a beat from the Engine's Heart."""
try:
ec = self.incoming_registrations.pop(heart)
except KeyError:
self.log.error(
"registration::tried to finish nonexistant registration", exc_info=True)
return
self.log.info(
"registration::finished registering engine %i:%s", ec.id, ec.uuid)
if ec.stallback is not None:
self.loop.remove_timeout(ec.stallback)
eid = ec.id
self.ids.add(eid)
self.keytable[eid] = ec.uuid
self.engines[eid] = ec
self.by_ident[cast_bytes(ec.uuid)] = ec.id
self.queues[eid] = list()
self.tasks[eid] = list()
self.completed[eid] = list()
self.hearts[heart] = eid
content = dict(id=eid, uuid=self.engines[eid].uuid)
if self.notifier:
self.session.send(
self.notifier, "registration_notification", content=content)
self.log.info("engine::Engine Connected: %i", eid)
self._save_engine_state()
def _purge_stalled_registration(self, heart):
if heart in self.incoming_registrations:
ec = self.incoming_registrations.pop(heart)
self.log.info(
"registration::purging stalled registration: %i", ec.id)
else:
pass
#-------------------------------------------------------------------------
# Engine State
#-------------------------------------------------------------------------
def _cleanup_engine_state_file(self):
"""cleanup engine state mapping"""
if os.path.exists(self.engine_state_file):
self.log.debug(
"cleaning up engine state: %s", self.engine_state_file)
try:
os.remove(self.engine_state_file)
except IOError:
self.log.error(
"Couldn't cleanup file: %s", self.engine_state_file, exc_info=True)
def _save_engine_state(self):
"""save engine mapping to JSON file"""
if not self.engine_state_file:
return
self.log.debug("save engine state to %s" % self.engine_state_file)
state = {}
engines = {}
for eid, ec in iteritems(self.engines):
if ec.uuid not in self.dead_engines:
engines[eid] = ec.uuid
state['engines'] = engines
state['next_id'] = self._idcounter
with open(self.engine_state_file, 'w') as f:
json.dump(state, f)
def _load_engine_state(self):
"""load engine mapping from JSON file"""
if not os.path.exists(self.engine_state_file):
return
self.log.info("loading engine state from %s" % self.engine_state_file)
with open(self.engine_state_file) as f:
state = json.load(f)
save_notifier = self.notifier
self.notifier = None
for eid, uuid in iteritems(state['engines']):
heart = uuid.encode('ascii')
# start with this heart as current and beating:
self.heartmonitor.responses.add(heart)
self.heartmonitor.hearts.add(heart)
self.incoming_registrations[
heart] = EngineConnector(id=int(eid), uuid=uuid)
self.finish_registration(heart)
self.notifier = save_notifier
self._idcounter = state['next_id']
#-------------------------------------------------------------------------
# Client Requests
#-------------------------------------------------------------------------
def shutdown_request(self, client_id, msg):
"""handle shutdown request."""
self.session.send(
self.query, 'shutdown_reply', content={'status': 'ok'}, ident=client_id)
# also notify other clients of shutdown
self.session.send(
self.notifier, 'shutdown_notice', content={'status': 'ok'})
self.loop.add_timeout(self.loop.time() + 1, self._shutdown)
def _shutdown(self):
self.log.info("hub::hub shutting down.")
time.sleep(0.1)
sys.exit(0)
def check_load(self, client_id, msg):
content = msg['content']
try:
targets = content['targets']
targets = self._validate_targets(targets)
except:
content = error.wrap_exception()
self.session.send(self.query, "hub_error",
content=content, ident=client_id)
return
content = dict(status='ok')
# loads = {}
for t in targets:
content[bytes(t)] = len(self.queues[t]) + len(self.tasks[t])
self.session.send(
self.query, "load_reply", content=content, ident=client_id)
def queue_status(self, client_id, msg):
"""Return the Queue status of one or more targets.
If verbose, return the msg_ids, else return len of each type.
Keys:
* queue (pending MUX jobs)
* tasks (pending Task jobs)
* completed (finished jobs from both queues)
"""
content = msg['content']
targets = content['targets']
try:
targets = self._validate_targets(targets)
except:
content = error.wrap_exception()
self.session.send(self.query, "hub_error",
content=content, ident=client_id)
return
verbose = content.get('verbose', False)
content = dict(status='ok')
for t in targets:
queue = self.queues[t]
completed = self.completed[t]
tasks = self.tasks[t]
if not verbose:
queue = len(queue)
completed = len(completed)
tasks = len(tasks)
content[str(t)] = {
'queue': queue, 'completed': completed, 'tasks': tasks}
content['unassigned'] = list(
self.unassigned) if verbose else len(self.unassigned)
# print (content)
self.session.send(
self.query, "queue_reply", content=content, ident=client_id)
def purge_results(self, client_id, msg):
"""Purge results from memory. This method is more valuable before we move
to a DB based message storage mechanism."""
content = msg['content']
self.log.info("Dropping records with %s", content)
msg_ids = content.get('msg_ids', [])
reply = dict(status='ok')
if msg_ids == 'all':
try:
self.db.drop_matching_records(dict(completed={'$ne': None}))
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
else:
pending = [m for m in msg_ids if (m in self.pending)]
if pending:
try:
raise IndexError("msg pending: %r" % pending[0])
except:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
else:
try:
self.db.drop_matching_records(
dict(msg_id={'$in': msg_ids}))
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
if reply['status'] == 'ok':
eids = content.get('engine_ids', [])
for eid in eids:
if eid not in self.engines:
try:
raise IndexError("No such engine: %i" % eid)
except:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
break
uid = self.engines[eid].uuid
try:
self.db.drop_matching_records(
dict(engine_uuid=uid, completed={'$ne': None}))
except Exception:
reply = error.wrap_exception()
self.log.exception("Error dropping records")
break
self.session.send(
self.query, 'purge_reply', content=reply, ident=client_id)
def resubmit_task(self, client_id, msg):
"""Resubmit one or more tasks."""
def finish(reply):
self.session.send(
self.query, 'resubmit_reply', content=reply, ident=client_id)
content = msg['content']
msg_ids = content['msg_ids']
reply = dict(status='ok')
try:
records = self.db.find_records({'msg_id': {'$in': msg_ids}}, keys=[
'header', 'content', 'buffers'])
except Exception:
self.log.error(
'db::db error finding tasks to resubmit', exc_info=True)
return finish(error.wrap_exception())
# validate msg_ids
found_ids = [rec['msg_id'] for rec in records]
pending_ids = [
msg_id for msg_id in found_ids if msg_id in self.pending]
if len(records) > len(msg_ids):
try:
raise RuntimeError("DB appears to be in an inconsistent state."
"More matching records were found than should exist")
except Exception:
self.log.exception("Failed to resubmit task")
return finish(error.wrap_exception())
elif len(records) < len(msg_ids):
missing = [m for m in msg_ids if m not in found_ids]
try:
raise KeyError("No such msg(s): %r" % missing)
except KeyError:
self.log.exception("Failed to resubmit task")
return finish(error.wrap_exception())
elif pending_ids:
pass
# no need to raise on resubmit of pending task, now that we
# resubmit under new ID, but do we want to raise anyway?
# msg_id = invalid_ids[0]
# try:
# raise ValueError("Task(s) %r appears to be inflight" % )
# except Exception:
# return finish(error.wrap_exception())
# mapping of original IDs to resubmitted IDs
resubmitted = {}
# send the messages
for rec in records:
header = rec['header']
msg = self.session.msg(header['msg_type'], parent=header)
msg_id = msg['msg_id']
msg['content'] = rec['content']
# use the old header, but update msg_id and timestamp
fresh = msg['header']
header['msg_id'] = fresh['msg_id']
header['date'] = fresh['date']
msg['header'] = header
self.session.send(self.resubmit, msg, buffers=rec['buffers'])
resubmitted[rec['msg_id']] = msg_id
self.pending.add(msg_id)
msg['buffers'] = rec['buffers']
try:
self.db.add_record(msg_id, init_record(msg))
except Exception:
self.log.error(
"db::DB Error updating record: %s", msg_id, exc_info=True)
return finish(error.wrap_exception())
finish(dict(status='ok', resubmitted=resubmitted))
# store the new IDs in the Task DB
for msg_id, resubmit_id in iteritems(resubmitted):
try:
self.db.update_record(msg_id, {'resubmitted': resubmit_id})
except Exception:
self.log.error(
"db::DB Error updating record: %s", msg_id, exc_info=True)
def _extract_record(self, rec):
"""decompose a TaskRecord dict into subsection of reply for get_result"""
io_dict = {}
for key in ('execute_input', 'execute_result', 'error', 'stdout', 'stderr'):
io_dict[key] = rec[key]
content = {
'header': rec['header'],
'metadata': rec['metadata'],
'result_metadata': rec['result_metadata'],
'result_header': rec['result_header'],
'result_content': rec['result_content'],
'received': rec['received'],
'io': io_dict,
}
if rec['result_buffers']:
buffers = list(map(bytes, rec['result_buffers']))
else:
buffers = []
return content, buffers
def get_results(self, client_id, msg):
"""Get the result of 1 or more messages."""
content = msg['content']
msg_ids = sorted(set(content['msg_ids']))
statusonly = content.get('status_only', False)
pending = []
completed = []
content = dict(status='ok')
content['pending'] = pending
content['completed'] = completed
buffers = []
if not statusonly:
try:
matches = self.db.find_records(dict(msg_id={'$in': msg_ids}))
# turn match list into dict, for faster lookup
records = {}
for rec in matches:
records[rec['msg_id']] = rec
except Exception:
content = error.wrap_exception()
self.log.exception("Failed to get results")
self.session.send(self.query, "result_reply", content=content,
parent=msg, ident=client_id)
return
else:
records = {}
for msg_id in msg_ids:
if msg_id in self.pending:
pending.append(msg_id)
elif msg_id in self.all_completed:
completed.append(msg_id)
if not statusonly:
c, bufs = self._extract_record(records[msg_id])
content[msg_id] = c
buffers.extend(bufs)
elif msg_id in records:
if rec['completed']:
completed.append(msg_id)
c, bufs = self._extract_record(records[msg_id])
content[msg_id] = c
buffers.extend(bufs)
else:
pending.append(msg_id)
else:
try:
raise KeyError('No such message: ' + msg_id)
except:
content = error.wrap_exception()
break
self.session.send(self.query, "result_reply", content=content,
parent=msg, ident=client_id,
buffers=buffers)
def get_history(self, client_id, msg):
"""Get a list of all msg_ids in our DB records"""
try:
msg_ids = self.db.get_history()
except Exception as e:
content = error.wrap_exception()
self.log.exception("Failed to get history")
else:
content = dict(status='ok', history=msg_ids)
self.session.send(self.query, "history_reply", content=content,
parent=msg, ident=client_id)
def db_query(self, client_id, msg):
"""Perform a raw query on the task record database."""
content = msg['content']
query = extract_dates(content.get('query', {}))
keys = content.get('keys', None)
buffers = []
empty = list()
try:
records = self.db.find_records(query, keys)
except Exception as e:
content = error.wrap_exception()
self.log.exception("DB query failed")
else:
# extract buffers from reply content:
if keys is not None:
buffer_lens = [] if 'buffers' in keys else None
result_buffer_lens = [] if 'result_buffers' in keys else None
else:
buffer_lens = None
result_buffer_lens = None
for rec in records:
# buffers may be None, so double check
b = rec.pop('buffers', empty) or empty
if buffer_lens is not None:
buffer_lens.append(len(b))
buffers.extend(b)
rb = rec.pop('result_buffers', empty) or empty
if result_buffer_lens is not None:
result_buffer_lens.append(len(rb))
buffers.extend(rb)
content = dict(status='ok', records=records, buffer_lens=buffer_lens,
result_buffer_lens=result_buffer_lens)
# self.log.debug (content)
self.session.send(self.query, "db_reply", content=content,
parent=msg, ident=client_id,
buffers=buffers)
|
|
import itertools
from wtforms.validators import ValidationError
from wtforms.fields import FieldList, FormField, SelectFieldBase
try:
from wtforms.fields import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from flask_admin._compat import iteritems
from .widgets import (InlineFieldListWidget, InlineFormWidget,
AjaxSelect2Widget, XEditableWidget)
class InlineFieldList(FieldList):
widget = InlineFieldListWidget()
def __init__(self, *args, **kwargs):
super(InlineFieldList, self).__init__(*args, **kwargs)
def __call__(self, **kwargs):
# Create template
meta = getattr(self, 'meta', None)
if meta:
template = self.unbound_field.bind(form=None, name='', _meta=meta)
else:
template = self.unbound_field.bind(form=None, name='')
# Small hack to remove separator from FormField
if isinstance(template, FormField):
template.separator = ''
template.process(None)
return self.widget(self,
template=template,
check=self.display_row_controls,
**kwargs)
def display_row_controls(self, field):
return True
def process(self, formdata, data=None):
res = super(InlineFieldList, self).process(formdata, data)
# Postprocess - contribute flag
if formdata:
for f in self.entries:
key = 'del-%s' % f.id
f._should_delete = key in formdata
return res
def validate(self, form, extra_validators=tuple()):
"""
Validate this FieldList.
Note that FieldList validation differs from normal field validation in
that FieldList validates all its enclosed fields first before running any
of its own validators.
"""
self.errors = []
# Run validators on all entries within
for subfield in self.entries:
if not self.should_delete(subfield) and not subfield.validate(form):
self.errors.append(subfield.errors)
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def should_delete(self, field):
return getattr(field, '_should_delete', False)
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
try:
ivalues = iter(values)
except TypeError:
ivalues = iter([])
candidates = itertools.chain(ivalues, itertools.repeat(None))
_fake = type(str('_fake'), (object, ), {})
output = []
for field, data in zip(self.entries, candidates):
if not self.should_delete(field):
fake_obj = _fake()
fake_obj.data = data
field.populate_obj(fake_obj, 'data')
output.append(fake_obj.data)
setattr(obj, name, output)
class InlineFormField(FormField):
"""
Inline version of the ``FormField`` widget.
"""
widget = InlineFormWidget()
class InlineModelFormField(FormField):
"""
Customized ``FormField``.
Excludes model primary key from the `populate_obj` and
handles `should_delete` flag.
"""
widget = InlineFormWidget()
def __init__(self, form_class, pk, form_opts=None, **kwargs):
super(InlineModelFormField, self).__init__(form_class, **kwargs)
self._pk = pk
self.form_opts = form_opts
def get_pk(self):
return getattr(self.form, self._pk).data
def populate_obj(self, obj, name):
for name, field in iteritems(self.form._fields):
if name != self._pk:
field.populate_obj(obj, name)
class ListEditableFieldList(FieldList):
"""
Modified FieldList to allow for alphanumeric primary keys.
Used in the editable list view.
"""
widget = XEditableWidget()
def __init__(self, *args, **kwargs):
super(ListEditableFieldList, self).__init__(*args, **kwargs)
# min_entries = 1 is required for the widget to determine the type
self.min_entries = 1
def _extract_indices(self, prefix, formdata):
offset = len(prefix) + 1
for k in formdata:
if k.startswith(prefix):
k = k[offset:].split('-', 1)[0]
# removed "if k.isdigit():"
yield k
def _add_entry(self, formdata=None, data=unset_value, index=None):
assert not self.max_entries or len(self.entries) < self.max_entries, \
'You cannot have more than max_entries entries in this FieldList'
if index is None:
index = self.last_index + 1
self.last_index = index
# '%s-%s' instead of '%s-%d' to allow alphanumeric
name = '%s-%s' % (self.short_name, index)
id = '%s-%s' % (self.id, index)
# support both wtforms 1 and 2
meta = getattr(self, 'meta', None)
if meta:
field = self.unbound_field.bind(
form=None, name=name, prefix=self._prefix, id=id, _meta=meta
)
else:
field = self.unbound_field.bind(
form=None, name=name, prefix=self._prefix, id=id
)
field.process(formdata, data)
self.entries.append(field)
return field
def populate_obj(self, obj, name):
# return data from first item, instead of a list of items
setattr(obj, name, self.data.pop())
class AjaxSelectField(SelectFieldBase):
"""
Ajax Model Select Field
"""
widget = AjaxSelect2Widget()
separator = ','
def __init__(self, loader, label=None, validators=None, allow_blank=False, blank_text=u'', **kwargs):
super(AjaxSelectField, self).__init__(label, validators, **kwargs)
self.loader = loader
self.allow_blank = allow_blank
self.blank_text = blank_text
def _get_data(self):
if self._formdata:
model = self.loader.get_one(self._formdata)
if model is not None:
self._set_data(model)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _format_item(self, item):
value = self.loader.format(self.data)
return (value[0], value[1], True)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == u'__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank and self.data is None:
raise ValidationError(self.gettext(u'Not a valid choice'))
class AjaxSelectMultipleField(AjaxSelectField):
"""
Ajax-enabled model multi-select field.
"""
widget = AjaxSelect2Widget(multiple=True)
def __init__(self, loader, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(AjaxSelectMultipleField, self).__init__(loader, label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata:
data = []
# TODO: Optimize?
for item in formdata:
model = self.loader.get_one(item) if item else None
if model:
data.append(model)
else:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def process_formdata(self, valuelist):
self._formdata = set()
for field in valuelist:
for n in field.split(self.separator):
self._formdata.add(n)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext(u'Not a valid choice'))
|
|
from collections import Iterable
import json
import django
from django import forms
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.core import validators
from django.db import models
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
# from .utils import parse_array, edit_string_for_array # NOQA
TYPES = {
"int": int,
"smallint": int,
"bigint": int,
"text": force_text,
"double precision": float,
"varchar": force_text,
}
class SerializableList(list):
"""
A list that can convert to a JSON list or an XML string, depending on the
serialization method
"""
def replace(self, old, new, count=None):
"""
Replace old with new in every list item
"""
result = SerializableList([])
for item in self.__iter__():
if not isinstance(item, str):
result.append(item)
else:
result.append(item.replace(old, new))
if count is not None and len(result) == count:
break
return result
def encode(self, encoding=None, errors='strict'):
import sys
encoding = encoding or sys.getdefaultencoding()
result = SerializableList([])
for item in self.__iter__():
if not isinstance(item, str):
result.append(item)
else:
result.append(item.encode(encoding, errors))
return result
def decode(self, encoding=None, errors='strict'):
import sys
encoding = encoding or sys.getdefaultencoding()
result = SerializableList([])
for item in self.__iter__():
if not isinstance(item, str):
result.append(item)
else:
result.append(item.decode(encoding, errors))
return result
def __repr__(self):
import json
return json.dumps(list(self.__iter__()))
def _cast_to_unicode(data):
if isinstance(data, (list, tuple, SerializableList)):
return SerializableList([_cast_to_unicode(x) for x in data])
elif isinstance(data, six.string_types):
return force_text(data)
return data
def _cast_to_type(data, type_cast):
if isinstance(data, (list, tuple)):
return [_cast_to_type(x, type_cast) for x in data]
if type_cast == str:
return force_text(data)
return type_cast(data)
def _unserialize(value):
if not isinstance(value, six.string_types):
return _cast_to_unicode(value)
try:
return _cast_to_unicode(json.loads(value))
except ValueError:
return _cast_to_unicode(value)
class Creator(object):
"""
Field descriptor that calls the to_python method on assignment.
This matches the Django<=1.9 fields.subclassing.Creator class.
See the Django 1.8 release notes where SubFieldBase was deprecated for
more: https://docs.djangoproject.com/en/1.10/releases/1.8/#subfieldbase
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class ArrayField(models.Field):
empty_strings_allowed = False
def __init__(self, dbtype="int", type_cast=None, dimension=1, *args, **kwargs):
self._array_type = dbtype
type_key = self._array_type.split("(")[0]
self._explicit_type_cast = False
if type_cast is not None:
self._type_cast = type_cast
self._explicit_type_cast = True
elif type_key in TYPES:
self._type_cast = TYPES[type_key]
else:
self._type_cast = lambda x: x
self._dimension = dimension
kwargs.setdefault("blank", True)
kwargs.setdefault("null", True)
kwargs.setdefault("default", None)
super(ArrayField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super(ArrayField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, Creator(self))
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if lookup_type == "contains":
return [self.get_prep_value(value)]
return super(ArrayField, self).get_db_prep_lookup(lookup_type, value, connection, prepared)
def formfield(self, **params):
params.setdefault("form_class", ArrayFormField)
# Django 1.5 does not support "choices_form_class" parameter
if django.VERSION[:2] >= (1, 6):
params.setdefault("choices_form_class", forms.TypedMultipleChoiceField)
if self.choices:
params.setdefault("choices", self.get_choices(include_blank=False))
params.setdefault("coerce", self._type_cast)
return super(ArrayField, self).formfield(**params)
def get_db_prep_value(self, value, connection, prepared=False):
value = value if prepared else self.get_prep_value(value)
if not value or isinstance(value, six.string_types):
return value
return _cast_to_type(value, self._type_cast)
def get_prep_value(self, value):
return value if isinstance(value, (six.string_types, list)) or not isinstance(value, Iterable) else list(value)
def to_python(self, value):
if value is None or value == "":
return None
return _unserialize(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return json.dumps(self.get_prep_value(value),
cls=DjangoJSONEncoder)
def validate(self, value, model_instance):
if value is None and not self.null:
raise ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['blank'])
for val in value:
super(ArrayField, self).validate(val, model_instance)
def deconstruct(self):
name, path, args, kwargs = super(ArrayField, self).deconstruct()
if self._array_type != "int":
kwargs["dbtype"] = self._array_type
if self._dimension != 1:
kwargs["dimension"] = self._dimension
if self._explicit_type_cast:
kwargs["type_cast"] = self._type_cast
if self.blank:
kwargs.pop("blank", None)
else:
kwargs["blank"] = self.blank
if self.null:
kwargs.pop("null", None)
else:
kwargs["null"] = self.null
if self.default is None:
kwargs.pop("default", None)
else:
kwargs["default"] = self.default
return name, path, args, kwargs
def db_type(self, connection):
return "{0}{1}".format(self._array_type, "[]" * self._dimension)
def get_transform(self, name):
transform = super(ArrayField, self).get_transform(name)
if transform:
return transform
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self)
try:
start, end = name.split("_")
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
class IntegerArrayField(ArrayField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("dbtype", "int")
super(IntegerArrayField, self).__init__(*args, **kwargs)
class SmallIntegerArrayField(ArrayField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("dbtype", "smallint")
super(SmallIntegerArrayField, self).__init__(*args, **kwargs)
class BigIntegerArrayField(ArrayField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("dbtype", "bigint")
super(BigIntegerArrayField, self).__init__(*args, **kwargs)
class TextArrayField(ArrayField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("dbtype", "text")
super(TextArrayField, self).__init__(*args, **kwargs)
class FloatArrayField(ArrayField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("dbtype", "double precision")
super(FloatArrayField, self).__init__(*args, **kwargs)
class DateArrayField(ArrayField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("dbtype", "date")
super(DateArrayField, self).__init__(*args, **kwargs)
class DateTimeArrayField(ArrayField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("dbtype", "timestamp with time zone")
super(DateTimeArrayField, self).__init__(*args, **kwargs)
class ArrayFormField(forms.Field):
default_error_messages = {
"invalid": _("Enter a list of values, joined by commas. E.g. \"a,b,c\"."),
}
def __init__(self, max_length=None, min_length=None, delim=None,
strip=True, *args, **kwargs):
if delim is not None:
self.delim = delim
else:
self.delim = ","
self.strip = strip
if 'initial' in kwargs and kwargs['initial'] is None:
kwargs['initial'] = []
super(ArrayFormField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value:
return []
# If Django already parsed value to list
if isinstance(value, list):
return value
try:
value = value.split(self.delim)
if self.strip:
value = [x.strip() for x in value]
except Exception:
raise ValidationError(self.error_messages["invalid"])
return value
def prepare_value(self, value):
if isinstance(value, (list, tuple)): # if blank list/tuple return ''
return self.delim.join(force_text(v) for v in value)
return super(ArrayFormField, self).prepare_value(value)
def to_python(self, value):
if value is None or value == "":
return []
elif isinstance(value, (tuple, list)):
return value
return value.split(self.delim)
if django.VERSION[:2] >= (1, 7):
from django.db.models import Lookup, Transform
class ContainsLookup(Lookup):
lookup_name = "contains"
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
var = "%s @> %s::%s" % (lhs, rhs, self.lhs.output_field.db_type(connection)), params
return var
class ContainedByLookup(Lookup):
lookup_name = "contained_by"
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "%s <@ %s::%s" % (lhs, rhs, self.lhs.output_field.db_type(connection)), params
class OverlapLookup(Lookup):
lookup_name = "overlap"
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return "%s && %s::%s" % (lhs, rhs, self.lhs.output_field.db_type(connection)), params
class ArrayLenTransform(Transform):
lookup_name = "len"
@property
def output_type(self):
return models.IntegerField()
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
return "array_length(%s, 1)" % lhs, params
class AnyBaseLookup(Lookup):
comparator = "="
"""self.comparator holds the comparison operator to be applied to the condition"""
def as_sql(self, qn, connection):
"""
Basically, the array gets split up into rows (unnested) such that we can apply string comparators on the
array's contents. Once these operations have been applied, the resulting set of PKs is used to identify,
for what rows the given condition is true.
:param qn: The SQLCompiler object used for compiling this query
:param connection: A DatabaseWrapper object
:return: a tuple (condition_string, parameter)
"""
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
table = self.lhs.alias
pk_name = qn.query.model._meta.pk.name
table_dot_pk_name = "%s.%s" % (table, pk_name)
return "{table_dot_pk_name} IN (" \
"SELECT DISTINCT tmp_table.{pk_name} " \
"FROM {table} AS tmp_table " \
"JOIN ( " \
"SELECT tmp_table2.{pk_name} AS id, unnest({arrayfield_name}::text[]) AS unnest " \
"FROM {table} AS tmp_table2) AS embedded_table ON embedded_table.{pk_name}=tmp_table.{pk_name} " \
"WHERE embedded_table.unnest {comparator} %s)".format(table_dot_pk_name=table_dot_pk_name,
pk_name=pk_name, table=table,
arrayfield_name=lhs,
comparator=self.comparator) % (
rhs, ), params
class AnyStartswithLookup(AnyBaseLookup):
lookup_name = "any_startswith"
comparator = "LIKE"
def process_rhs(self, qn, connection):
wildcarded_rhs_params = []
rhs, rhs_params = super(AnyStartswithLookup, self).process_rhs(qn, connection)
for param in rhs_params:
param = "%s%%" % param
wildcarded_rhs_params.append(param)
return rhs, wildcarded_rhs_params
class AnyIStartswithLookup(AnyStartswithLookup):
lookup_name = "any_istartswith"
comparator = "ILIKE"
class AnyEndswithLookup(AnyBaseLookup):
lookup_name = "any_endswith"
comparator = "LIKE"
def process_rhs(self, qn, connection):
wildcarded_rhs_params = []
rhs, rhs_params = super(AnyEndswithLookup, self).process_rhs(qn, connection)
for param in rhs_params:
param = "%%%s" % param
wildcarded_rhs_params.append(param)
return rhs, wildcarded_rhs_params
class AnyIEndswithLookup(AnyEndswithLookup):
lookup_name = "any_iendswith"
comparator = "ILIKE"
class AnyContainsLookup(AnyBaseLookup):
lookup_name = "any_contains"
comparator = "LIKE"
def process_rhs(self, qn, connection):
wildcarded_rhs_params = []
rhs, rhs_params = super(AnyContainsLookup, self).process_rhs(qn, connection)
for param in rhs_params:
param = "%%%s%%" % param
wildcarded_rhs_params.append(param)
return rhs, wildcarded_rhs_params
class AnyIContainsLookup(AnyContainsLookup):
lookup_name = "any_icontains"
comparator = "ILIKE"
ArrayField.register_lookup(ContainedByLookup)
ArrayField.register_lookup(ContainsLookup)
ArrayField.register_lookup(OverlapLookup)
ArrayField.register_lookup(ArrayLenTransform)
ArrayField.register_lookup(AnyStartswithLookup)
ArrayField.register_lookup(AnyIStartswithLookup)
ArrayField.register_lookup(AnyEndswithLookup)
ArrayField.register_lookup(AnyIEndswithLookup)
ArrayField.register_lookup(AnyContainsLookup)
ArrayField.register_lookup(AnyIContainsLookup)
class IndexTransform(Transform):
def __init__(self, index, field, *args, **kwargs):
super(IndexTransform, self).__init__(*args, **kwargs)
self.index = index
self.field = field
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
return "%s[%s]" % (lhs, self.index), params
# TODO: Temporary not supported nested index lookup
# @property
# def output_type(self):
# output_type = self.field.__class__(dimension=self.field._dimension-1)
# output_type._array_type = self.field._array_type
# output_type._explicit_type_cast = self.field._explicit_type_cast
# output_type._type_cast = self.field._type_cast
# output_type.set_attributes_from_name(self.field.name)
# return output_type
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super(SliceTransform, self).__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, qn, connection):
lhs, params = qn.compile(self.lhs)
return "%s[%s:%s]" % (lhs, self.start, self.end), params
class IndexTransformFactory(object):
def __init__(self, index, field):
self.index = index
self.field = field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.field, *args, **kwargs)
class SliceTransformFactory(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs)
# South support
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([
(
[ArrayField], # class
[], # positional params
{
"dbtype": ["_array_type", {"default": "int"}],
"dimension": ["_dimension", {"default": 1}],
"null": ["null", {"default": True}],
}
)
], ["^djorm_pgarray\.fields\.ArrayField"])
add_introspection_rules([
(
[TextArrayField], # class
[], # positional params
{
"dimension": ["_dimension", {"default": 1}],
"null": ["null", {"default": True}],
}
)
], ["^djorm_pgarray\.fields\.TextArrayField"])
add_introspection_rules([
(
[FloatArrayField], # class
[], # positional params
{
"dimension": ["_dimension", {"default": 1}],
"null": ["null", {"default": True}],
}
)
], ["^djorm_pgarray\.fields\.FloatArrayField"])
add_introspection_rules([
(
[IntegerArrayField], # class
[], # positional params
{
"dimension": ["_dimension", {"default": 1}],
"null": ["null", {"default": True}],
}
)
], ["^djorm_pgarray\.fields\.IntegerArrayField"])
add_introspection_rules([
(
[BigIntegerArrayField], # class
[], # positional params
{
"dimension": ["_dimension", {"default": 1}],
"null": ["null", {"default": True}],
}
)
], ["^djorm_pgarray\.fields\.BigIntegerArrayField"])
add_introspection_rules([
(
[SmallIntegerArrayField], # class
[], # positional params
{
"dimension": ["_dimension", {"default": 1}],
"null": ["null", {"default": True}],
}
)
], ["^djorm_pgarray\.fields\.SmallIntegerArrayField"])
add_introspection_rules([
(
[DateTimeArrayField], # class
[], # positional params
{
"dimension": ["_dimension", {"default": 1}],
"null": ["null", {"default": True}],
}
)
], ["^djorm_pgarray\.fields\.DateTimeArrayField"])
add_introspection_rules([
(
[DateArrayField], # class
[], # positional params
{
"dimension": ["_dimension", {"default": 1}],
"null": ["null", {"default": True}],
}
)
], ["^djorm_pgarray\.fields\.DateArrayField"])
except ImportError:
pass
|
|
from nose.tools import assert_equal, assert_true
from datetime import datetime as dt
from test_util import make_path
from letter_parser import get_action, fst, get_year, \
get_author, get_recipient, parse_file, get_year_month_day, \
process_content_line
def test_get_action():
wrapper = lambda l: get_action(l, fst)
for l in ('<B_ALLEN>', '<P_8>'):
assert_equal(
'non_empty',
wrapper(l)
)
for l in ('\n', ''):
assert_equal(
'empty',
wrapper(l)
)
assert_equal(
'<q',
wrapper('<Q_ALL_A_1579_T_WALLEN> <L_ALLEN_001>')
)
assert_equal(
'author',
wrapper('AUTHOR:WILLIAM_ALLEN:MALE:_:1532:47')
)
assert_equal(
'recipient',
wrapper('RECIPIENT:RICHARD_HOPKINS:MALE:_:1546?:33?')
)
assert_equal(
'non_empty',
wrapper('and being discreete and well experimented by their owne long miseryes I doubt not but now or very speedily they will repayre all defalts and')
)
fst.current = 'C'
assert_equal(
'letter_end',
wrapper('<Q_')
)
fst.current = 'Q'
assert_equal(
'{ed',
wrapper('{ED:ANNE,_COUNTESS_OF_ARUNDEL,_TO_LORD_TREASURER_BURLEIGH.}')
)
def test_get_year():
l = '<Q_ALL_A_1579_T_WALLEN> <L_ALLEN_001> <A_WILLIAM_ALLEN> <A-GENDER_MALE>'
assert_equal(
1579,
get_year(l)
)
def test_get_year_month_day():
l = '{ED:100._ALLEN_TO_JOHN_ARDEN._ROME,_4_SEPTEMBER_1593.}'
assert_equal(
(1593, 9, 4),
get_year_month_day(l)
)
def test_get_year_month_day_1():
l = "{ED:6._EARL_OF_SHREWSBURY'S_LETTERS_TO_WILLIAM_WENTWORTH._[A]}"
assert_equal(
(None, None, None),
get_year_month_day(l)
)
def test_get_year_month_day_2():
l = '{ED:Antwerp,_10_November,_1593.}'
assert_equal(
(1593, 11, 10),
get_year_month_day(l)
)
def test_get_year_month_day_3():
l = '{ED:?_November_1627.}'
assert_equal(
(1627, 11, 1),
get_year_month_day(l)
)
def test_get_year_month_day_4():
l = '{ED:178._GEORGE_CELY_TO_SIR_JOHN_WESTON_[DRAFT]_[?_JULY_1482]}'
assert_equal(
(1482, 7, 1),
get_year_month_day(l)
)
def test_get_author():
l = 'AUTHOR:WILLIAM_ALLEN:MALE:_:1532:47'
assert_equal(
'WILLIAM_ALLEN',
get_author(l)
)
def test_get_recipient():
l = 'RECIPIENT:RICHARD_HOPKINS:MALE:_:1546?:33?'
assert_equal(
'RICHARD_HOPKINS',
get_recipient(l)
)
def test_process_content_line():
l = ' <paren> yet once to be </paren> complayne of'
assert_equal(
process_content_line(l),
' yet once to be complayne of'
)
def test_process_content_line_1():
ls = ('ALLEN,8.001.2', 'ARUNDEL,11.001.2', '{COM:ADDRESSED:}', '{TEXT:shalbe}', )
for l in ls:
assert_equal(
process_content_line(l),
''
)
def test_process_content_line_2():
ls = ('yo=r=', 'y=u=')
es = ('yor', 'yu')
for e, l in zip(es, ls):
assert_equal(
process_content_line(l),
e
)
def test_parse_file():
path = make_path('test/data/allen.txt')
letters = parse_file(path)
assert_equal(4,
len(letters))
l1 = letters[0]
assert_equal(
'WILLIAM_ALLEN',
l1['sender_id']
)
assert_equal(
['RICHARD_HOPKINS'],
l1['recipient_ids']
)
assert_equal(
dt(1579, 4, 5),
l1['datetime']
)
assert_true(
l1['body'].startswith('Mr. Hopkins')
)
assert_true(
l1['body'].endswith("Loven chez Madame d'Hungerford . \n")
)
l2 = letters[1]
assert_equal(
'WILLIAM_ALLEN',
l2['sender_id']
)
assert_equal(
['OWEN_LEWIS'],
l2['recipient_ids']
)
assert_equal(
dt(1579, 5, 12),
l2['datetime']
)
# print(l2['body'])
assert_true(
l2['body'].startswith('Most dearly beloved')
)
assert_true(
l2['body'].endswith('Romae . \n')
)
black_list = ('{COM:DIAERESIS_ABOVE_THE_LETTER_e_IN_AUDOENO}',
'ALLEN,231.003.172',
'<paren>',
'</paren>',
'Mons=r=',
'$1579')
for l in letters:
for i in black_list:
print i
print l['body']
assert_true(i not in l['body'])
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# res/ rep cycle
from app import app
from app import r
from app import g
from app import logging
from app import salt
from app import red
from app import RqlError
import urllib2
from flask import (render_template)
from flask import redirect, make_response
from flask import jsonify
from flask import abort, request
from flask import session
import hashlib
from random import randint
secret_key = app.secret_key
from app import sendMail
from payments import process_payments
@app.route('/admin/', methods=['POST', 'GET'])
def adminSign():
if request.method == 'POST':
if not request.json:
abort(400)
if request.headers['Content-Type'] != 'application/json; charset=UTF-8':
abort(400)
username = request.json.get('username')
password = request.json.get('password')
try:
user = r.table('Admin').get(username).run(g.rdb_conn)
except Exception, e:
logging.warning('DB failed on /admin/ -> user not found')
raise e
if user is None:
resp = make_response(jsonify({"Not Found": "User Not Found"}), 404)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
session[username] = username
resp = make_response(jsonify({"OK": "Signed In"}), 200)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
return render_template('adminSignin.html')
@app.route('/api/signIn/', methods=['POST', 'PUT'])
def signIn():
if not request.json:
abort(400)
if request.headers['Content-Type'] != 'application/json; charset=UTF-8':
abort(400)
session.permanent = True
password = request.json.get('password')
username = request.json.get('username')
# join to another table
try:
user = r.table('UsersInfo').get(username).run(g.rdb_conn)
except Exception, e:
logging.warning('DB signIn failed on /api/signIn/ -> user not found')
raise e
if user is None:
resp = make_response(jsonify({"Not Found": "User Not Found"}), 404)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
hashed_password = hashlib.sha512(str(password) + salt).hexdigest()
try:
user = r.table('UsersInfo').get(username).run(g.rdb_conn)
if str(user['password']) != str(hashed_password):
# add user to session then log in
resp = make_response(
jsonify({"Password": "Incorrect Password"}), 404)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
except RqlError:
logging.warning('raise RqlError DB signIn failed on /api/signIn/')
# manage sessions - add user to session
# redis sessions -> flask
# redis k/v store | dict
session[username] = username
resp = make_response(jsonify({"OK": "Signed In"}), 200)
resp.headers['Content-Type'] = "application/json"
resp.set_cookie('username',value=username)
resp.cache_control.no_cache = True
return resp
@app.route('/api/signUp/', methods=['POST'])
def getRandID():
if not request.json:
abort(400)
if request.headers['Content-Type'] != 'application/json; charset=UTF-8':
abort(400)
# use the mobile number as the id number its a unique entity
username = request.json.get('username')
email = request.json.get('email')
# then update userInfo
password = request.json.get('password')
email = str(email)
username = str(username)
try:
user = r.table('UsersInfo').get(username).run(g.rdb_conn)
if user is not None:
resp = make_response(jsonify({"Error": "User Exists"}), 400)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
"""
user = r.table('UsersInfo').filter({"email": email}).limit(1).run(g.rdb_conn)
userData =[]
for data in user:
userData.append(data)
if userData != []:
resp = make_response(jsonify({"Error": "User Email Exists"}), 400)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
"""
except RqlError:
logging.warning('DB code verify failed on /api/signUp/')
resp = make_response(jsonify({"Error": "503 DB error"}), 503)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
SMScode = randint(10000, 99999)
# verify user send email with code
# sendText(mobileNo, SMScode)
# @task sendMail
try:
sendMail(email, SMScode, username)
except urllib2.URLError:
logging.warning('sendMail verify failed on /api/signUp/')
abort(500)
except Exception, e:
logging.warning('SendMail error on /api/signUp/ %s' %(e) )
hashed_password = hashlib.sha512(password + salt).hexdigest()
try:
# r.table('UsersInfo').get(mobileNo).update({"smscode": SMScode}).run(g.rdb_conn)
r.table(
'UsersInfo').insert({"state": "", "fname": "", "lname": "" ,"username": username, "dob": "",
"email": email, "password": hashed_password, "smscode": SMScode, "mobileNo": ""}).run(g.rdb_conn)
except RqlError:
logging.warning('DB code verify failed on /api/signUp/')
resp = make_response(jsonify({"Error": "503 DB error"}), 503)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
# add to sessions then login
session[username] = username
# return redirect()
resp = make_response(jsonify({"OK": "Signed Up"}), 202)
resp.set_cookie('username',value=username)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
@app.route('/api/newsLetter/', methods=['POST'])
def addNewsLetter():
if not request.json:
abort(400)
if request.headers['Content-Type'] != 'application/json; charset=UTF-8':
abort(400)
if 'username' not in request.cookies:
return redirect('/')
email = request.json.get('email')
# mobile no is the id - primary key
try:
r.table('newsLetter').insert({
'email': email,
}).run(g.rdb_conn)
except RqlError:
logging.warning('DB could not write on /api/newsLetter/')
resp = make_response(jsonify({'Error': 'Save Failed'}), 503)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
resp = make_response(jsonify({'OK': 'Content Saved'}), 202)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
@app.route('/logout/', methods=['GET'])
def logout():
# remove from session and clear cookie
if 'username' not in request.cookies:
return redirect('/')
username = request.cookies.get('username')
session.pop(username, None)
resp = make_response( redirect('/') )
resp.set_cookie('username', '', expires=0)
return resp
@app.route('/confirm/<smscode>/', methods=['PUT', 'POST'])
def confirmUser(smscode):
# make request to get one task
if 'username' not in request.cookies:
return redirect('/')
username = request.cookies.get('username')
try:
user = r.table(
'UsersInfo').get(username).pluck('smscode').run(g.rdb_conn)
r.table('UsersInfo').get(username).update({"userVerified": "yes"})
except RqlError:
logging.warning('DB op failed on /confirmUser/')
resp = make_response(jsonify({"Error": "503 DB error"}), 503)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
if str(user) is not str(smscode):
return
"""
EMAIL VERFICATION FAILED
"""
return redirect("/task/createTask/", code=302)
@app.route('/post_payment/', methods=['GET', 'POST'])
def post_payment_pesapal():
if 'username' not in request.cookies:
return redirect('/')
username = request.cookies.get('username')
# with ref set in rand generator
pesapal_merchant_ref = request.args.get('pesapal_merchant_reference')
pesapal_merchant_id = request.args.get('pesapal_transaction_tracking_id')
print(pesapal_merchant_id)
print(pesapal_merchant_ref)
# store merchant info in db
# basic post_payment page TO LOAD
pesapal_data = { "pesapal_transaction_tracking_id": pesapal_merchant_id,
"pesapal_merchant_reference": pesapal_merchant_ref, "username": username }
try:
r.table('Payments').insert(pesapal_data).run(g.rdb_conn)
except Exception:
logging.warning('DB code verify failed on /post_payment/')
resp = make_response(jsonify({"Error": "503 DB error"}), 503)
resp.headers['Content-Type'] = "application/json"
resp.cache_control.no_cache = True
return resp
# optional get payment status - info sent to pesapla ipn notification
# per user info - render post payment page - by merchant ref
"""
post_params = {
'pesapal_merchant_reference': '000',
'pesapal_transaction_tracking_id': '000'
}
"""
status = process_payments.queryPaymentByRef(pesapal_data)
return render_template('PostPayment.html', status=status, username=username)
@app.route('/pesapal_ipn_notification/', methods=['POST'])
def ipn_notify():
#url = request.get.args('url')
# compare with merchant ref
pesapal_merchant_ref = request.args.get('pesapal_merchant_reference')
pesapal_merchant_id = request.args.get('pesapal_transaction_tracking_id')
# store in db per user info in payments
resp = make_response(jsonify({"OK": "Notification Received"}), 200)
resp.cache_control.no_cache = True
return resp
@app.route('/process_payments/', methods=['GET'])
def process_payment():
if 'username' not in request.cookies:
return redirect('/')
if request.cookies.get('username') == '' or request.cookies.get('username') is None:
return redirect('/')
username = request.cookies.get('username')
# fetch url from redis - attach iframe to window
url = red.hget(username, 'url')
return render_template('pesapal_payment.html', username=username, iframe=url)
|
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for dealing with Cloud Datastore's Protobuf API.
The non-private functions are part of the API.
"""
import datetime
import itertools
from google.protobuf import struct_pb2
from google.type import latlng_pb2
from proto.datetime_helpers import DatetimeWithNanoseconds
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.datastore_v1.types import datastore as datastore_pb2
from google.cloud.datastore_v1.types import entity as entity_pb2
from google.cloud.datastore.entity import Entity
from google.cloud.datastore.key import Key
def _get_meaning(value_pb, is_list=False):
"""Get the meaning from a protobuf value.
:type value_pb: :class:`.entity_pb2.Value._pb`
:param value_pb: The *raw* protobuf value to be checked for an
associated meaning.
:type is_list: bool
:param is_list: Boolean indicating if the ``value_pb`` contains
a list value.
:rtype: int
:returns: The meaning for the ``value_pb`` if one is set, else
:data:`None`. For a list value, if there are disagreeing
means it just returns a list of meanings. If all the
list meanings agree, it just condenses them.
"""
if is_list:
values = value_pb.array_value.values
# An empty list will have no values, hence no shared meaning
# set among them.
if len(values) == 0:
return None
# We check among all the meanings, some of which may be None,
# the rest which may be enum/int values.
all_meanings = [_get_meaning(sub_value_pb) for sub_value_pb in values]
unique_meanings = set(all_meanings)
if len(unique_meanings) == 1:
# If there is a unique meaning, we preserve it.
return unique_meanings.pop()
else: # We know len(value_pb.array_value.values) > 0.
# If the meaning is not unique, just return all of them.
return all_meanings
elif value_pb.meaning: # Simple field (int32).
return value_pb.meaning
return None
def _new_value_pb(entity_pb, name):
"""Add (by name) a new ``Value`` protobuf to an entity protobuf.
:type entity_pb: :class:`.entity_pb2.Entity`
:param entity_pb: An entity protobuf to add a new property to.
:type name: str
:param name: The name of the new property.
:rtype: :class:`.entity_pb2.Value`
:returns: The new ``Value`` protobuf that was added to the entity.
"""
# TODO(microgenerator): shouldn't need this. the issue is that
# we have wrapped and non-wrapped protos coming here.
properties = getattr(entity_pb.properties, "_pb", entity_pb.properties)
return properties.get_or_create(name)
def entity_from_protobuf(pb):
"""Factory method for creating an entity based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`.entity_pb2.Entity`
:param pb: The Protobuf representing the entity.
:rtype: :class:`google.cloud.datastore.entity.Entity`
:returns: The entity derived from the protobuf.
"""
if isinstance(pb, entity_pb2.Entity):
pb = pb._pb
key = None
if pb.HasField("key"): # Message field (Key)
key = key_from_protobuf(pb.key)
entity_props = {}
entity_meanings = {}
exclude_from_indexes = []
for prop_name, value_pb in pb.properties.items():
value = _get_value_from_value_pb(value_pb)
entity_props[prop_name] = value
# Check if the property has an associated meaning.
is_list = isinstance(value, list)
meaning = _get_meaning(value_pb, is_list=is_list)
if meaning is not None:
entity_meanings[prop_name] = (meaning, value)
# Check if ``value_pb`` was excluded from index. Lists need to be
# special-cased and we require all ``exclude_from_indexes`` values
# in a list agree.
if is_list and len(value) > 0:
exclude_values = set(
value_pb.exclude_from_indexes
for value_pb in value_pb.array_value.values
)
if len(exclude_values) != 1:
raise ValueError(
"For an array_value, subvalues must either "
"all be indexed or all excluded from "
"indexes."
)
if exclude_values.pop():
exclude_from_indexes.append(prop_name)
else:
if value_pb.exclude_from_indexes:
exclude_from_indexes.append(prop_name)
entity = Entity(key=key, exclude_from_indexes=exclude_from_indexes)
entity.update(entity_props)
entity._meanings.update(entity_meanings)
return entity
def _set_pb_meaning_from_entity(entity, name, value, value_pb, is_list=False):
"""Add meaning information (from an entity) to a protobuf.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: The entity to be turned into a protobuf.
:type name: str
:param name: The name of the property.
:type value: object
:param value: The current value stored as property ``name``.
:type value_pb: :class:`.entity_pb2.Value`
:param value_pb: The protobuf value to add meaning / meanings to.
:type is_list: bool
:param is_list: (Optional) Boolean indicating if the ``value`` is
a list value.
"""
if name not in entity._meanings:
return
meaning, orig_value = entity._meanings[name]
# Only add the meaning back to the protobuf if the value is
# unchanged from when it was originally read from the API.
if orig_value is not value:
return
# For lists, we set meaning on each sub-element.
if is_list:
if not isinstance(meaning, list):
meaning = itertools.repeat(meaning)
val_iter = zip(value_pb.array_value.values, meaning)
for sub_value_pb, sub_meaning in val_iter:
if sub_meaning is not None:
sub_value_pb.meaning = sub_meaning
else:
value_pb.meaning = meaning
def entity_to_protobuf(entity):
"""Converts an entity into a protobuf.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: The entity to be turned into a protobuf.
:rtype: :class:`.entity_pb2.Entity`
:returns: The protobuf representing the entity.
"""
entity_pb = entity_pb2.Entity()
if entity.key is not None:
key_pb = entity.key.to_protobuf()
entity_pb._pb.key.CopyFrom(key_pb._pb)
for name, value in entity.items():
value_is_list = isinstance(value, list)
value_pb = _new_value_pb(entity_pb, name)
# Set the appropriate value.
_set_protobuf_value(value_pb, value)
# Add index information to protobuf.
if name in entity.exclude_from_indexes:
if not value_is_list:
value_pb.exclude_from_indexes = True
for sub_value in value_pb.array_value.values:
sub_value.exclude_from_indexes = True
# Add meaning information to protobuf.
_set_pb_meaning_from_entity(
entity, name, value, value_pb, is_list=value_is_list
)
return entity_pb
def get_read_options(eventual, transaction_id):
"""Validate rules for read options, and assign to the request.
Helper method for ``lookup()`` and ``run_query``.
:type eventual: bool
:param eventual: Flag indicating if ``EVENTUAL`` or ``STRONG``
consistency should be used.
:type transaction_id: bytes
:param transaction_id: A transaction identifier (may be null).
:rtype: :class:`.datastore_pb2.ReadOptions`
:returns: The read options corresponding to the inputs.
:raises: :class:`ValueError` if ``eventual`` is ``True`` and the
``transaction_id`` is not ``None``.
"""
if transaction_id is None:
if eventual:
return datastore_pb2.ReadOptions(
read_consistency=datastore_pb2.ReadOptions.ReadConsistency.EVENTUAL
)
else:
return datastore_pb2.ReadOptions()
else:
if eventual:
raise ValueError("eventual must be False when in a transaction")
else:
return datastore_pb2.ReadOptions(transaction=transaction_id)
def key_from_protobuf(pb):
"""Factory method for creating a key based on a protobuf.
The protobuf should be one returned from the Cloud Datastore
Protobuf API.
:type pb: :class:`.entity_pb2.Key`
:param pb: The Protobuf representing the key.
:rtype: :class:`google.cloud.datastore.key.Key`
:returns: a new `Key` instance
"""
path_args = []
for element in pb.path:
path_args.append(element.kind)
if element.id: # Simple field (int64)
path_args.append(element.id)
# This is safe: we expect proto objects returned will only have
# one of `name` or `id` set.
if element.name: # Simple field (string)
path_args.append(element.name)
project = None
if pb.partition_id.project_id: # Simple field (string)
project = pb.partition_id.project_id
namespace = None
if pb.partition_id.namespace_id: # Simple field (string)
namespace = pb.partition_id.namespace_id
return Key(*path_args, namespace=namespace, project=project)
def _pb_attr_value(val):
"""Given a value, return the protobuf attribute name and proper value.
The Protobuf API uses different attribute names based on value types
rather than inferring the type. This function simply determines the
proper attribute name based on the type of the value provided and
returns the attribute name as well as a properly formatted value.
Certain value types need to be coerced into a different type (such
as a `datetime.datetime` into an integer timestamp, or a
`google.cloud.datastore.key.Key` into a Protobuf representation. This
function handles that for you.
.. note::
Values which are "text" ('unicode' in Python2, 'str' in Python3) map
to 'string_value' in the datastore; values which are "bytes"
('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
For example:
.. testsetup:: pb-attr-value
from google.cloud.datastore.helpers import _pb_attr_value
.. doctest:: pb-attr-value
>>> _pb_attr_value(1234)
('integer_value', 1234)
>>> _pb_attr_value('my_string')
('string_value', 'my_string')
:type val:
:class:`datetime.datetime`, :class:`google.cloud.datastore.key.Key`,
bool, float, integer, bytes, str, unicode,
:class:`google.cloud.datastore.entity.Entity`, dict, list,
:class:`google.cloud.datastore.helpers.GeoPoint`, NoneType
:param val: The value to be scrutinized.
:rtype: tuple
:returns: A tuple of the attribute name and proper value type.
"""
if isinstance(val, datetime.datetime):
name = "timestamp"
value = _datetime_to_pb_timestamp(val)
elif isinstance(val, Key):
name, value = "key", val.to_protobuf()
elif isinstance(val, bool):
name, value = "boolean", val
elif isinstance(val, float):
name, value = "double", val
elif isinstance(val, int):
name, value = "integer", val
elif isinstance(val, str):
name, value = "string", val
elif isinstance(val, bytes):
name, value = "blob", val
elif isinstance(val, Entity):
name, value = "entity", val
elif isinstance(val, dict):
entity_val = Entity(key=None)
entity_val.update(val)
name, value = "entity", entity_val
elif isinstance(val, list):
name, value = "array", val
elif isinstance(val, GeoPoint):
name, value = "geo_point", val.to_protobuf()
elif val is None:
name, value = "null", struct_pb2.NULL_VALUE
else:
raise ValueError("Unknown protobuf attr type", type(val))
return name + "_value", value
def _get_value_from_value_pb(pb):
"""Given a protobuf for a Value, get the correct value.
The Cloud Datastore Protobuf API returns a Property Protobuf which
has one value set and the rest blank. This function retrieves the
the one value provided.
Some work is done to coerce the return value into a more useful type
(particularly in the case of a timestamp value, or a key value).
:type pb: :class:`.entity_pb2.Value._pb`
:param pb: The *raw* Value Protobuf.
:rtype: object
:returns: The value provided by the Protobuf.
:raises: :class:`ValueError <exceptions.ValueError>` if no value type
has been set.
"""
value_type = pb.WhichOneof("value_type")
if value_type == "timestamp_value":
result = DatetimeWithNanoseconds.from_timestamp_pb(pb.timestamp_value)
elif value_type == "key_value":
result = key_from_protobuf(pb.key_value)
elif value_type == "boolean_value":
result = pb.boolean_value
elif value_type == "double_value":
result = pb.double_value
elif value_type == "integer_value":
result = pb.integer_value
elif value_type == "string_value":
result = pb.string_value
elif value_type == "blob_value":
result = pb.blob_value
elif value_type == "entity_value":
result = entity_from_protobuf(pb.entity_value)
elif value_type == "array_value":
result = [
_get_value_from_value_pb(item_value) for item_value in pb.array_value.values
]
elif value_type == "geo_point_value":
result = GeoPoint(pb.geo_point_value.latitude, pb.geo_point_value.longitude,)
elif value_type == "null_value":
result = None
else:
raise ValueError("Value protobuf did not have any value set")
return result
def _set_protobuf_value(value_pb, val):
"""Assign 'val' to the correct subfield of 'value_pb'.
The Protobuf API uses different attribute names based on value types
rather than inferring the type.
Some value types (entities, keys, lists) cannot be directly
assigned; this function handles them correctly.
:type value_pb: :class:`.entity_pb2.Value`
:param value_pb: The value protobuf to which the value is being assigned.
:type val: :class:`datetime.datetime`, boolean, float, integer, string,
:class:`google.cloud.datastore.key.Key`,
:class:`google.cloud.datastore.entity.Entity`
:param val: The value to be assigned.
"""
attr, val = _pb_attr_value(val)
if attr == "key_value":
value_pb.key_value.CopyFrom(val._pb)
elif attr == "timestamp_value":
value_pb.timestamp_value.CopyFrom(val)
elif attr == "entity_value":
entity_pb = entity_to_protobuf(val)
value_pb.entity_value.CopyFrom(entity_pb._pb)
elif attr == "array_value":
if len(val) == 0:
array_value = entity_pb2.ArrayValue(values=[])._pb
value_pb.array_value.CopyFrom(array_value)
else:
l_pb = value_pb.array_value.values
for item in val:
i_pb = l_pb.add()
_set_protobuf_value(i_pb, item)
elif attr == "geo_point_value":
value_pb.geo_point_value.CopyFrom(val)
else: # scalar, just assign
setattr(value_pb, attr, val)
class GeoPoint(object):
"""Simple container for a geo point value.
:type latitude: float
:param latitude: Latitude of a point.
:type longitude: float
:param longitude: Longitude of a point.
"""
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def to_protobuf(self):
"""Convert the current object to protobuf.
:rtype: :class:`google.type.latlng_pb2.LatLng`.
:returns: The current point as a protobuf.
"""
return latlng_pb2.LatLng(latitude=self.latitude, longitude=self.longitude)
def __eq__(self, other):
"""Compare two geo points for equality.
:rtype: bool
:returns: True if the points compare equal, else False.
"""
if not isinstance(other, GeoPoint):
return NotImplemented
return self.latitude == other.latitude and self.longitude == other.longitude
def __ne__(self, other):
"""Compare two geo points for inequality.
:rtype: bool
:returns: False if the points compare equal, else True.
"""
return not self == other
|
|
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Join a sequence of Ito Processes with specified correlations."""
import tensorflow.compat.v2 as tf
from tf_quant_finance.models import euler_sampling
from tf_quant_finance.models import generic_ito_process
from tf_quant_finance.models import ito_process
from tf_quant_finance.models import utils
class JoinedItoProcess(generic_ito_process.GenericItoProcess):
"""Join of Ito Processes with specified time dependent correlations.
For a sequence of Ito processes `I_1, .., I_n` of dimensions `d_1,.., d_n`,
the class initializes a process `I` of dimension `d_1 + .. + d_n` with
marginal proceses `I_i` and a correlation function `Corr(t)`. That is, let the
Ito Process `I_i` describe an SDE
```None
dX^i = a_i(t, X^i_t) dt + b_i(t, X^i_t) dW^i_t
```
where `a_i(t, x)` is a function taking values in `R^{d_i}`, `b_i(t, X_t)` is a
function taking values in `d_i x d_i` matrices, `W_i` is a `d_i`-dimensional
Brownian motion.
Then `I` describes an SDE for the joint process `(X_1,..., X_n)` of dimension
`d:= d_1 + ... + d_n`
```None
dX^i = a_i(t, X^i_t) dt + b_i(t, X^i_t) dB^i_t,
```
where `(B_1, ..., B_n) = chol(t) * (W_1, ..., W_n)` for a Cholesky
decomposition `chol(t)` of the correlation matrix `Corr(t)` at time `t`.
Here `(W_1, ..., W_n)` is `d`-dimensional vector and `Corr(t)` is a `d x d`
correlation matrix.
`Corr(t)` is represented as a block-diagonal formed of a list of matrices
`[m_1(t), m_2(t), ..., m_k(t)]` with `sum(rank(m_i)) = d`.
# Example. # Black-scholes and Heston model join.
```python
import numpy as np
import tensorflow as tf
import tf_quant_finance as tff
dtype = tf.float64
# Define Black scholes model with zero rate and volatility `0.1`
sigma = 0.1
def drift_fn(t , x):
return -sigma**2 / 2
def vol_fn(t , x):
return sigma * tf.ones([1, 1], dtype=x.dtype)
black_scholes_process = tff.models.GenericItoProcess(
dim=1, drift_fn=drift_fn, volatility_fn=vol_fn, dtype=dtype)
# Define Heston Model
epsilon = tff.math.piecewise.PiecewiseConstantFunc(
jump_locations=[0.5], values=[0.01, 0.02], dtype=np.float64)
heston_process = tff.models.heston_model.HestonModel(
kappa=1.0, theta=0.04, epsilon=epsilon, rho=0.2, dtype=dtype)
# Define join process where correlation between `black_scholes_process` and
# log-asset price of the `heston_process` is 0.5.
corr_structure = [[[1.0, 0.5], [0.5, 1.0]], [1.0]]
# `corr_structure` corresponds to a `3 x 3` correlation matrix. Here Brownian
# motion of `black_scholes_process` is correlated only with the 1st dimension
# of `heston_process` but not with the second one.
join_process = JoinedItoProcess(
processes=[black_scholes_process, heston_process],
corr_structure=corr_structure)
# Sample 100,000 sample paths at times [0.1, 1.0] from the join process using
# Sobol random sequence
times = [0.1, 1.0]
# Wrap sample_paths method with a tf.function
sample_paths_fn = tf.function(process.sample_paths)
samples = sample_paths_fn(
times=times, time_step=0.01, num_samples=100000,
initial_state=np.array([0.0, 1.0, 0.04]),
random_type=random.RandomType.SOBOL)
# Estimated correlations.
np.corrcoef(samples[:, -1, :], rowvar=False)
# Expected result:
# [[1. , 0.49567078, 0.08128067],
# [0.49567078, 1. , 0.16580689],
# [0.08128067, 0.16580689, 1. ]]
```
"""
def __init__(self, processes, corr_structure, dtype=None, name=None):
"""Initializes a JoinedItoProcess.
Takes a list of `processes` which are instances of `tff.models.ItoProcess`
and a list `corr_structure` of correlation matrices and creates an Ito
process that joins `processes` using the correlation structure.
`corr_structure` describes block-diagonal structure of correlations for
the Brownian motions in `processes`. For example, if the dimension of the
JoinedItoProcess is `3` and
`corr_structure = [[[1.0, 0.5], [0.5, 1.0]], [1.0]]`, then the introduced
correlation is
`Corr(t) = [[1.0, 0.5, 0.0], [0.5, 1.0, 0.0], [0.0, 0.0, 1.0]]`,
where `Corr(t)` is the same as in the `JoinedItoProcess` docstring.
Args:
processes: A sequence of instances of `tff.models.ItoProcess`. All
processes should have the same `dtype.`
corr_structure: A list of correlation matrices. Each correlation matrix
is either a `Tensor` of the same `dtype` as the `processes` and
square shape (i.e., `[d_i, d_i]` for some `d_i`) or a callable. The
callables should accept a scalar (stands for time `t`) and return a
square `Tensor`. The total dimension
`sum([m.shape[-1] for m in corr_structure]` of correlation
structure should be the same as the dimension of the `JoinedItoProcess`
`sum([p.dim() for p in processes])`.
dtype: The default `dtype` of the `processes`.
Default value: None which means that default dtypes inferred by
TensorFlow are used.
name: Python string. The name scope under which ops created by the methods
of this class are nested.
Default value: `None` which maps to the default name
`join_ito_process`.
Raises:
ValueError:
(a) If any of the `processes` is not an `ItoProcess`.
(b) If `processes` do not have the same `dtype`.
"""
self._name = name or "join_ito_process"
with tf.name_scope(self._name):
self._processes = [] # Input processes
dim = 0 # Dimension of the process
for process in processes:
if not isinstance(process, ito_process.ItoProcess):
raise ValueError(
"All input process of JoinedItoProcess must be instances "
"of the ItoProcess class.")
self._processes.append(process)
d = process.dim()
dim += d # Total dimension of the process
if dtype is None:
dtype = process.dtype()
elif dtype != process.dtype():
raise ValueError("All processes should have the same `dtype`")
self._corr_structure = [
corr if callable(corr) else tf.convert_to_tensor(
corr, dtype=dtype, name="corr")
for corr in corr_structure]
self._dim = dim
def _drift_fn(t, x):
"""Drift function of the JoinedItoProcess."""
drifts = []
i1 = 0
i2 = 0
for p in self._processes:
dim = p.dim()
i2 += dim
position = x[..., i1:i2]
drift = tf.convert_to_tensor(p.drift_fn()(t, position),
dtype=dtype,
name="drift")
drift = tf.broadcast_to(drift, position.shape)
drifts.append(drift)
i1 += dim
return tf.concat(drifts, -1)
def _vol_fn(t, x):
"""Volatility function of the JoinedItoProcess."""
vols = []
i1 = 0
i2 = 0
for p in self._processes:
dim = p.dim()
i2 += dim
position = x[..., i1:i2]
vol = tf.convert_to_tensor(p.volatility_fn()(t, position),
dtype=dtype,
name="volatility")
vol = tf.broadcast_to(vol, position.shape + [dim])
vols.append(vol)
i1 += dim
# Convert block diagonal volatilities to a dense correlation matrix
vol = utils.block_diagonal_to_dense(*vols)
# Compute Cholesky decomposition of the correlation structure
corr_structure = _get_parameters(
tf.expand_dims(t, -1), *self._corr_structure)
cholesky_decomp = [tf.linalg.cholesky(m) for m in corr_structure]
cholesky_decomp = utils.block_diagonal_to_dense(*cholesky_decomp)
return tf.linalg.matmul(vol, cholesky_decomp)
# The following will initialize the Generic Ito Process that has
# a sampling and PDE solving methods
super().__init__(dim, _drift_fn, _vol_fn, dtype, name)
def sample_paths(self,
times,
num_samples=1,
initial_state=None,
random_type=None,
seed=None,
time_step=None,
swap_memory=True,
skip=0,
name=None):
"""Returns a sample of paths from the process using Euler sampling.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
num_samples: Positive scalar `int`. The number of paths to draw.
Default value: 1.
initial_state: `Tensor` of shape `[self._dim]`. The initial state of the
process.
Default value: None which maps to a zero initial state.
random_type: Enum value of `RandomType`. The type of (quasi)-random number
generator to use to generate the paths.
Default value: None which maps to the standard pseudo-random numbers.
seed: Seed for the random number generator. The seed is
only relevant if `random_type` is one of
`[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
`HALTON_RANDOMIZED` the seed should be an integer scalar `Tensor`. For
`STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer
`Tensor` of shape `[2]`.
Default value: `None` which means no seed is set.
time_step: Real scalar `Tensor`. The maximal distance between time points
in grid in Euler scheme.
swap_memory: A Python bool. Whether GPU-CPU memory swap is enabled for
this op. See an equivalent flag in `tf.while_loop` documentation for
more details. Useful when computing a gradient of the op since
`tf.while_loop` is used to propagate stochastic process in time.
Default value: True.
skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
Halton sequence to skip. Used only when `random_type` is 'SOBOL',
'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
Default value: `0`.
name: Python string. The name to give this op.
Default value: `None` which maps to `sample_paths` is used.
Returns:
A real `Tensor` of shape `[num_samples, k, n]` where `k` is the size of the
`times`, and `n` is the dimension of the process.
Raises:
ValueError: If `time_step` is not supplied.
"""
if time_step is None:
raise ValueError("`time_step` has to be supplied for JoinedItoProcess "
"`sample_paths` method.")
name = name or self._name + "sample_paths"
with tf.name_scope(name):
if initial_state is None:
initial_state = tf.zeros(self._dim, dtype=self.dtype(),
name="initial_state")
else:
if isinstance(initial_state, (tuple, list)):
initial_state = [tf.convert_to_tensor(state, dtype=self.dtype(),
name="initial_state")
for state in initial_state]
initial_state = tf.stack(initial_state)
else:
initial_state = tf.convert_to_tensor(initial_state,
dtype=self.dtype(),
name="initial_state")
samples = euler_sampling.sample(self.dim(),
drift_fn=self.drift_fn(),
volatility_fn=self.volatility_fn(),
times=times,
time_step=time_step,
num_samples=num_samples,
initial_state=initial_state,
random_type=random_type,
seed=seed,
swap_memory=swap_memory,
skip=skip,
dtype=self.dtype())
return samples
def _get_parameters(times, *params):
"""Gets parameter values at at specified `times`."""
res = []
for param in params:
if callable(param):
# Used only in drift and volatility computation.
# Here `times` is of shape [1]
t = tf.squeeze(times)
# The result has to have shape [1] + param.shape
param_value = tf.convert_to_tensor(param(t), dtype=times.dtype,
name="param_value")
res.append(tf.expand_dims(param_value, 0))
else:
res.append(param + tf.zeros(times.shape + param.shape, dtype=times.dtype))
return res
|
|
from __future__ import unicode_literals
import boto
import boto.ec2.autoscale
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.group import AutoScalingGroup
from boto.ec2.autoscale import Tag
import boto.ec2.elb
import sure # noqa
from moto import mock_autoscaling, mock_ec2, mock_elb
from tests.helpers import requires_boto_gte
@mock_autoscaling
@mock_elb
def test_create_autoscaling_group():
elb_conn = boto.ec2.elb.connect_to_region('us-east-1')
elb_conn.create_load_balancer('test_lb', zones=[], listeners=[(80, 8080, 'http')])
conn = boto.ec2.autoscale.connect_to_region('us-east-1')
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
default_cooldown=60,
desired_capacity=2,
health_check_period=100,
health_check_type="EC2",
max_size=2,
min_size=2,
launch_config=config,
load_balancers=["test_lb"],
placement_group="test_placement",
vpc_zone_identifier='subnet-1234abcd',
termination_policies=["OldestInstance", "NewestInstance"],
tags=[Tag(
resource_id='tester_group',
key='test_key',
value='test_value',
propagate_at_launch=True
)
],
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.name.should.equal('tester_group')
set(group.availability_zones).should.equal(set(['us-east-1c', 'us-east-1b']))
group.desired_capacity.should.equal(2)
group.max_size.should.equal(2)
group.min_size.should.equal(2)
group.instances.should.have.length_of(2)
group.vpc_zone_identifier.should.equal('subnet-1234abcd')
group.launch_config_name.should.equal('tester')
group.default_cooldown.should.equal(60)
group.health_check_period.should.equal(100)
group.health_check_type.should.equal("EC2")
list(group.load_balancers).should.equal(["test_lb"])
group.placement_group.should.equal("test_placement")
list(group.termination_policies).should.equal(["OldestInstance", "NewestInstance"])
len(list(group.tags)).should.equal(1)
tag = list(group.tags)[0]
tag.resource_id.should.equal('tester_group')
tag.key.should.equal('test_key')
tag.value.should.equal('test_value')
tag.propagate_at_launch.should.equal(True)
@mock_autoscaling
def test_create_autoscaling_groups_defaults():
""" Test with the minimum inputs and check that all of the proper defaults
are assigned for the other attributes """
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.name.should.equal('tester_group')
group.max_size.should.equal(2)
group.min_size.should.equal(2)
group.launch_config_name.should.equal('tester')
# Defaults
list(group.availability_zones).should.equal([])
group.desired_capacity.should.equal(2)
group.vpc_zone_identifier.should.equal('')
group.default_cooldown.should.equal(300)
group.health_check_period.should.equal(None)
group.health_check_type.should.equal("EC2")
list(group.load_balancers).should.equal([])
group.placement_group.should.equal(None)
list(group.termination_policies).should.equal([])
list(group.tags).should.equal([])
@mock_autoscaling
def test_autoscaling_group_describe_filter():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
)
conn.create_auto_scaling_group(group)
group.name = 'tester_group2'
conn.create_auto_scaling_group(group)
group.name = 'tester_group3'
conn.create_auto_scaling_group(group)
conn.get_all_groups(names=['tester_group', 'tester_group2']).should.have.length_of(2)
conn.get_all_groups().should.have.length_of(3)
@mock_autoscaling
def test_autoscaling_update():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
launch_config=config,
vpc_zone_identifier='subnet-1234abcd',
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.vpc_zone_identifier.should.equal('subnet-1234abcd')
group.vpc_zone_identifier = 'subnet-5678efgh'
group.update()
group = conn.get_all_groups()[0]
group.vpc_zone_identifier.should.equal('subnet-5678efgh')
@mock_autoscaling
def test_autoscaling_group_delete():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
)
conn.create_auto_scaling_group(group)
conn.get_all_groups().should.have.length_of(1)
conn.delete_auto_scaling_group('tester_group')
conn.get_all_groups().should.have.length_of(0)
@mock_ec2
@mock_autoscaling
def test_autoscaling_group_describe_instances():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
)
conn.create_auto_scaling_group(group)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
instances[0].launch_config_name.should.equal('tester')
autoscale_instance_ids = [instance.instance_id for instance in instances]
ec2_conn = boto.connect_ec2()
reservations = ec2_conn.get_all_instances()
instances = reservations[0].instances
instances.should.have.length_of(2)
instance_ids = [instance.id for instance in instances]
set(autoscale_instance_ids).should.equal(set(instance_ids))
instances[0].instance_type.should.equal("t2.medium")
@requires_boto_gte("2.8")
@mock_autoscaling
def test_set_desired_capacity_up():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
launch_config=config,
vpc_zone_identifier='subnet-1234abcd',
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(2)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
conn.set_desired_capacity("tester_group", 3)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(3)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(3)
@requires_boto_gte("2.8")
@mock_autoscaling
def test_set_desired_capacity_down():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
launch_config=config,
vpc_zone_identifier='subnet-1234abcd',
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(2)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
conn.set_desired_capacity("tester_group", 1)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(1)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(1)
@requires_boto_gte("2.8")
@mock_autoscaling
def test_set_desired_capacity_the_same():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'],
desired_capacity=2,
max_size=2,
min_size=2,
launch_config=config,
vpc_zone_identifier='subnet-1234abcd',
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(2)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
conn.set_desired_capacity("tester_group", 2)
group = conn.get_all_groups()[0]
group.desired_capacity.should.equal(2)
instances = list(conn.get_all_autoscaling_instances())
instances.should.have.length_of(2)
@mock_autoscaling
@mock_elb
def test_autoscaling_group_with_elb():
elb_conn = boto.connect_elb()
zones = ['us-east-1a', 'us-east-1b']
ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
lb = elb_conn.create_load_balancer('my-lb', zones, ports)
instances_health = elb_conn.describe_instance_health('my-lb')
instances_health.should.be.empty
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t2.medium',
)
conn.create_launch_configuration(config)
group = AutoScalingGroup(
name='tester_group',
max_size=2,
min_size=2,
launch_config=config,
load_balancers=["my-lb"],
)
conn.create_auto_scaling_group(group)
group = conn.get_all_groups()[0]
elb = elb_conn.get_all_load_balancers()[0]
group.desired_capacity.should.equal(2)
elb.instances.should.have.length_of(2)
autoscale_instance_ids = set(instance.instance_id for instance in group.instances)
elb_instace_ids = set(instance.id for instance in elb.instances)
autoscale_instance_ids.should.equal(elb_instace_ids)
conn.set_desired_capacity("tester_group", 3)
group = conn.get_all_groups()[0]
elb = elb_conn.get_all_load_balancers()[0]
group.desired_capacity.should.equal(3)
elb.instances.should.have.length_of(3)
autoscale_instance_ids = set(instance.instance_id for instance in group.instances)
elb_instace_ids = set(instance.id for instance in elb.instances)
autoscale_instance_ids.should.equal(elb_instace_ids)
conn.delete_auto_scaling_group('tester_group')
conn.get_all_groups().should.have.length_of(0)
elb = elb_conn.get_all_load_balancers()[0]
elb.instances.should.have.length_of(0)
|
|
#!/usr/bin/env python
# Copyright 2015 Coursera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Helpers for working with OAuth2 / etc.
'''
import http.server
import configparser
import pickle
import logging
import requests
import io
import os
import os.path
import subprocess
import sys
import time
import urllib.parse
import uuid
from sys import platform as _platform
class ExpiredToken(Exception):
def __init__(self, oauth2Auth):
self.oauth2Auth = oauth2Auth
def __str__(self):
return 'Attempt to use expired token. Expired at: %s.' % \
self.oauth2Auth.expires
class OAuth2Exception(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'OAuth2 Protocol Exception: %(msg)s' % {
'msg': self.msg,
}
class CourseraOAuth2Auth(requests.auth.AuthBase):
'Attaches OAuth2 access tokens to requests.'
def __init__(self, token, expires):
self.token = token
self.expires = expires
def is_valid(self):
'Determines if this authorizer is still valid.'
return time.time() < self.expires
def __call__(self, request):
if self.is_valid():
logging.debug('About to add an authorization header!')
request.headers['Authorization'] = 'Bearer %(token)s' % {
'token': self.token
}
return request
else:
logging.error(
'Attempt to use expired Authorizer. Expired: %s, now: %s',
self.expires, time.time())
raise ExpiredToken(self)
class CodeHolder:
'A helper class to hold a token.'
def __init__(self):
self.code = None
def __call__(self, code):
self.code = code
def has_code(self):
return self.code is not None
def _make_handler(state_token, done_function):
'''
Makes a a handler class to use inside the basic python HTTP server.
state_token is the expected state token.
done_function is a function that is called, with the code passed to it.
'''
class LocalServerHandler(http.server.BaseHTTPRequestHandler):
def error_response(self, msg):
logging.warn(
'Error response: %(msg)s. %(path)s',
msg=msg,
path=self.path)
self.send_response(400)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(msg)
def do_GET(self):
parsed = urllib.parse.urlparse(self.path)
if len(parsed.query) == 0 or parsed.path != '/callback':
self.error_response(
'We encountered a problem with your request.')
return
params = urllib.parse.parse_qs(parsed.query)
if params['state'] != [state_token]:
self.error_response(
'Attack detected: state tokens did not match!')
return
if len(params['code']) != 1:
self.error_response('Wrong number of "code" query parameters.')
return
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(
("courseraprogramming: we have captured Coursera's response "
"code. Feel free to close this browser window now and return "
"to your terminal. Thanks!").encode("utf-8"))
done_function(params['code'][0])
return LocalServerHandler
OAUTH2_URL_BASE = 'https://accounts.coursera.org/oauth2/v1/'
class CourseraOAuth2(object):
'''
This class manages the OAuth2 tokens used to access Coursera's APIs.
You must register your app with Coursera at:
https://accounts.coursera.org/console
Construct an instance of this class with the client_id and client_secret
displayed in the Coursera app console. Please also set a redirect url to be
http://localhost:9876/callback
Note: you can replace the port number (9876 above) with whatever port you'd
like. If you would not like to use the local webserver to retrieve the
codes set the local_webserver_port field in the constructor to None.
TODO: add usage / more documentation.
'''
def __init__(self,
client_id,
client_secret,
scopes,
auth_endpoint=OAUTH2_URL_BASE+'auth',
token_endpoint=OAUTH2_URL_BASE+'token',
verify_tls=True,
token_cache_file='~/.coursera/oauth2_cache.pickle',
local_webserver_port=9876):
self.client_id = client_id
self.client_secret = client_secret
self.scopes = scopes
self.auth_endpoint = auth_endpoint
self.token_endpoint = token_endpoint
self.verify_tls = verify_tls
self.token_cache_file = os.path.expanduser(token_cache_file)
# Create the appropriate directory if not already in existance.
if not os.path.isfile(self.token_cache_file):
dir_name = os.path.dirname(self.token_cache_file)
try:
os.makedirs(dir_name, mode=0o700)
except:
logging.debug(
'Encountered an exception creating directory for token '
'cache file. Ignoring...',
exc_info=True)
else:
# TODO: check file permissions to ensure not world readable.
pass
# If not None, run a local webserver to hear the callback.
self.local_webserver_port = local_webserver_port
self._token_cache = None
@property
def _redirect_uri(self):
return 'http://%(hostname)s:%(port)s/callback' % {
'hostname': 'localhost',
'port': self.local_webserver_port,
}
@property
def token_cache(self):
if self._token_cache is None:
# Load token cache from the file system.
cache = self._load_token_cache()
self._token_cache = cache
return self._token_cache
@token_cache.setter
def token_cache(self, value):
self._token_cache = value
self._save_token_cache(value)
def _load_token_cache(self):
'Reads the local fs cache for pre-authorized access tokens'
try:
logging.debug('About to read from local file cache file %s',
self.token_cache_file)
with open(self.token_cache_file, 'rb') as f:
fs_cached = pickle.load(f)
if self._check_token_cache_type(fs_cached):
logging.debug('Loaded from file system: %s', fs_cached)
return fs_cached
else:
logging.warn('Found unexpected value in cache. %s',
fs_cached)
return None
except IOError:
logging.debug(
'Did not find file: %s on the file system.',
self.token_cache_file)
return None
except:
logging.info(
'Encountered exception loading from the file system.',
exc_info=True)
return None
def _save_token_cache(self, new_cache):
'Write out to the filesystem a cache of the OAuth2 information.'
logging.debug('Looking to write to local authentication cache...')
if not self._check_token_cache_type(new_cache):
logging.error('Attempt to save a bad value: %s', new_cache)
return
try:
logging.debug('About to write to fs cache file: %s',
self.token_cache_file)
with open(self.token_cache_file, 'wb') as f:
pickle.dump(new_cache, f, protocol=pickle.HIGHEST_PROTOCOL)
logging.debug('Finished dumping cache_value to fs cache file.')
except:
logging.exception(
'Could not successfully cache OAuth2 secrets on the file '
'system.')
def _check_token_cache_type(self, cache_value):
'''
Checks the cache_value for appropriate type correctness.
Pass strict=True for strict validation to ensure the latest types are
being written.
Returns true is correct type, False otherwise.
'''
def check_string_value(name):
return isinstance(cache_value[name], str)
def check_refresh_token():
if 'refresh' in cache_value:
return check_string_value('refresh')
else:
return True
return (
isinstance(cache_value, dict) and
'token' in cache_value and
'expires' in cache_value and
check_string_value('token') and
isinstance(cache_value['expires'], float) and
check_refresh_token()
)
def _request_tokens_from_token_endpoint(self, form_data):
logging.debug(
'Posting form data %s to token endpoint %s',
form_data,
self.token_endpoint)
response = requests.post(
self.token_endpoint,
data=form_data,
verify=self.verify_tls,
timeout=10,
)
logging.debug(
'Response from token endpoint: (%s) %s',
response.status_code,
response.text)
if response.status_code != 200:
logging.error(
'Encountered unexpected status code: %s %s %s',
response.status_code,
response,
response.text)
raise OAuth2Exception('Unexpected status code from token endpoint')
body = response.json()
if 'access_token' not in body or 'expires_in' not in body:
logging.error('Malformed / missing fields in body. %(body)s',
body=body)
raise OAuth2Exception(
'Malformed response body from token endpoint.')
if 'token_type' not in body or body['token_type'].lower() != 'bearer':
logging.error('Unknown token_type encountered: %s',
body['token_type'])
raise OAuth2Exception('Unknown token_type encountered.')
expires_time = time.time() + body['expires_in']
access_token = body['access_token']
tokens = {
'token': access_token,
'expires': expires_time,
}
if 'refresh_token' in body:
refresh = body['refresh_token']
if isinstance(refresh, str):
tokens['refresh'] = refresh
return tokens
def _build_authorizaton_url(self, state_token):
authorization_request = requests.Request(
'GET',
self.auth_endpoint,
params={
'access_type': 'offline',
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': self._redirect_uri,
'scope': self.scopes,
'state': state_token,
}).prepare()
logging.debug('Constructed authoriation request at: %s',
authorization_request.url)
return authorization_request.url
def _authorize_new_tokens(self):
'''
Stands up a new localhost http server and retrieves new OAuth2 access
tokens from the Coursera OAuth2 server.
'''
logging.info('About to request new OAuth2 tokens from Coursera.')
# Attempt to request new tokens from Coursera via the browser.
state_token = uuid.uuid4().hex
authorization_url = self._build_authorizaton_url(state_token)
sys.stdout.write(
'Please visit the following URL to authorize this app:\n')
sys.stdout.write('\t%s\n\n' % authorization_url)
if _platform == 'darwin':
# OS X -- leverage the 'open' command present on all modern macs
sys.stdout.write(
'Mac OS X detected; attempting to auto-open the url '
'in your default browser...\n')
try:
subprocess.check_call(['open', authorization_url])
except:
logging.exception('Could not call `open %(url)s`.',
url=authorization_url)
if self.local_webserver_port is not None:
# Boot up a local webserver to retrieve the response.
server_address = ('', self.local_webserver_port)
code_holder = CodeHolder()
local_server = http.server.HTTPServer(
server_address,
_make_handler(state_token, code_holder))
while not code_holder.has_code():
local_server.handle_request()
coursera_code = code_holder.code
else:
coursera_code = input('Please enter the code from Coursera: ')
form_data = {
'code': coursera_code,
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self._redirect_uri,
'grant_type': 'authorization_code',
}
return self._request_tokens_from_token_endpoint(form_data)
def _exchange_refresh_tokens(self):
'Exchanges a refresh token for an access token'
if self.token_cache is not None and 'refresh' in self.token_cache:
# Attempt to use the refresh token to get a new access token.
refresh_form = {
'grant_type': 'refresh_token',
'refresh_token': self.token_cache['refresh'],
'client_id': self.client_id,
'client_secret': self.client_secret,
}
try:
tokens = self._request_tokens_from_token_endpoint(refresh_form)
tokens['refresh'] = self.token_cache['refresh']
return tokens
except OAuth2Exception:
logging.exception(
'Encountered an exception during refresh token flow.')
return None
def _cache_has_good_token(self):
'The cache must have a token, and it must expire in > 5 minutes'
return (
self.token_cache is not None and
(time.time() + 5 * 60) < self.token_cache['expires']
)
def build_authorizer(self):
if not self._cache_has_good_token():
logging.debug('Attempting to use a refresh token.')
new_tokens = self._exchange_refresh_tokens()
if new_tokens is None:
logging.info(
'Attempting to retrieve new tokens from the endpoint. You '
'will be prompted to authorize the courseraprogramming '
'app in your web browser.')
new_tokens = self._authorize_new_tokens()
logging.debug('New tokens: %s', new_tokens)
self.token_cache = new_tokens
else:
logging.debug('Local cache is good.')
return CourseraOAuth2Auth(self.token_cache['token'],
self.token_cache['expires'])
def build_oauth2(args, cfg=None):
if cfg is None:
cfg = configuration()
try:
client_id = args.client_id
except:
client_id = cfg.get('oauth2', 'client_id')
try:
client_secret = args.client_secret
except:
client_secret = cfg.get('oauth2', 'client_secret')
try:
scopes = args.scopes
except:
scopes = cfg.get('oauth2', 'scopes')
try:
cache_filename = args.token_cache_file
except:
cache_filename = cfg.get('oauth2', 'token_cache')
return CourseraOAuth2(
client_id=client_id,
client_secret=client_secret,
scopes=scopes,
token_cache_file=cache_filename
)
def configuration():
'Loads configuration from the file system.'
defaults = '''
[oauth2]
client_id = NS8qaSX18X_Eu0pyNbLsnA
client_secret = bUqKqGywnGXEJPFrcd4Jpw
hostname = localhost
port = 9876
api_endpoint = https://api.coursera.org
auth_endpoint = https://accounts.coursera.org/oauth2/v1/auth
token_endpoint = https://accounts.coursera.org/oauth2/v1/token
scopes = view_profile manage_graders
verify_tls = True
token_cache = ~/.coursera/oauth2_cache.pickle
[upload]
transloadit_bored_api = https://api2.transloadit.com/instances/bored
'''
cfg = configparser.SafeConfigParser()
cfg.read_file(io.StringIO(defaults))
cfg.read([
'/etc/coursera/courseraprogramming.cfg',
os.path.expanduser('~/.coursera/courseraprogramming.cfg'),
'courseraprogramming.cfg',
])
return cfg
|
|
"""This module implements a binary search tree."""
from queue_ds import Queue
from stack import Stack
class Node(object):
"""Node object for use in a binary search tree."""
def __init__(self, value, right=None, left=None, parent=None):
"""Initialize a node for a binary search tree object."""
self.value = value
self.right = right
self.left = left
self.parent = parent
def _has_children(self):
"""Return True or False if Node has children."""
if self.right or self.left:
return True
return False
def _return_children(self):
"""Return all children of a Node."""
if self.left and self.right:
return [self.left, self.right]
elif self.left or self.right:
return [self.left] if self.left else [self.right]
class BinarySearchTree(object):
"""Binary Search Tree Object.
Methods:
- insert(self, val): will insert the value val into the BST. If
val is already present, it will be ignored.
- search(self, val): will return the node containing that value,
else None
- size(self): will return the integer size of the BST (equal to
the total number of values stored in the tree). It will return 0
if the tree is empty.
- depth(self): will return an integer representing the total number
of levels in the tree. If there is one value, the depth should be
1, if two values it will be 2, if three values it may be 2 or three,
depending, etc.
- contains(self, val): will return True if val is in the BST, False
if not.
- balance(self): will return an integer, positive or negative that
represents how well balanced the tree is. Trees which are higher
on the left than the right should return a positive value, trees
which are higher on the right than the left should return a negative
value. An ideally-balanced tree should return 0.
- in_order(self): will return a generator that will return the values
in the tree using in-order traversal, one at a time.
- pre_order(self): will return a generator that will return the values
in the tree using pre-order traversal, one at a time.
- post_order(self): will return a generator that will return the values
in the tree using post_order traversal, one at a time.
- breadth_first(self): will return a generator that will return the
values in the tree using breadth-first traversal, one at a time.
"""
def __init__(self):
"""Initialize a Binary Search Tree object."""
self.root = None
self._size = 0
def insert(self, val):
"""Insert a new node with val into the BST."""
curr_node = self.root
new_node = Node(val)
if curr_node is None:
self.root = new_node
self._size = 1
return
while curr_node:
if val > curr_node.value:
if curr_node.right:
curr_node = curr_node.right
else:
curr_node.right = new_node
new_node.parent = curr_node
self._size += 1
break
elif val < curr_node.value:
if curr_node.left:
curr_node = curr_node.left
else:
curr_node.left = new_node
new_node.parent = curr_node
self._size += 1
break
else:
return
self._test_tree_balance(new_node)
def search(self, val):
"""Return the node with value val or return None."""
start = self.root
if start is None:
raise ValueError("Cannot search an empty tree.")
while True:
if start.value == val:
return start
elif val < start.value and start.left:
start = start.left
elif val > start.value and start.right:
start = start.right
else:
return None
def size(self):
"""Return the integer size of the BST."""
return self._size
def __len__(self):
"""Return integer size of the BST."""
return self.size()
def depth(self, start=''):
"""Return the integer depth of the BST."""
def depth_wrapped(start):
if start is None:
return 0
else:
right_depth = depth_wrapped(start.right)
left_depth = depth_wrapped(start.left)
return max(right_depth, left_depth) + 1
if start is '':
return depth_wrapped(self.root)
else:
return depth_wrapped(start)
def contains(self, val):
"""Return True if node with value val is in BST, False if not."""
try:
return self.search(val).value == val
except AttributeError:
return False
def find_balance(self, start=None):
"""Return positive or negative integer that represents tree balance."""
if start is None:
start = self.root
if start is None:
return 0
return self.depth(start.left) - self.depth(start.right)
def in_order(self, node=None):
"""Return a generator of the tree in in_order order."""
start = node
if start is None:
start = self.root
if start is None:
raise StopIteration
s = Stack()
while len(s) or start:
if start:
s.push(start)
start = start.left
else:
start = s.pop()
yield start.value
start = start.right
def pre_order(self):
"""Return a generator of the tree in pre_order order."""
start = self.root
if start is None:
raise StopIteration
s = Stack()
s.push(start)
while len(s):
curr = s.pop()
yield curr.value
if curr.right is not None:
s.push(curr.right)
if curr.left is not None:
s.push(curr.left)
def post_order(self):
"""Return a generator of the tree in post_order order."""
start = self.root
if start is None:
raise StopIteration
s = []
last = None
while s or start:
if start:
s.append(start)
start = start.left
else:
peek = s[-1]
if peek.right and last is not peek.right:
start = peek.right
else:
yield peek.value
last = s.pop()
def breadth_first(self):
"""Return a generator of the tree in breadth first traversal order."""
start = self.root
if start is None:
raise StopIteration
q = Queue()
q.enqueue(start)
while len(q) > 0:
current = q.dequeue()
yield current.value
if current._has_children():
for child in current._return_children():
q.enqueue(child)
def delete(self, val):
"""Delete a node."""
if self.size() == 0:
raise IndexError("Cannot delete from empty tree.")
target = self.search(val)
if target is None:
raise ValueError("Cannot delete node that does not exist.")
test_node = target.parent
if self.size() == 1:
self.root = None
self._size -= 1
return
if not target._has_children():
self._del_leaf(target)
elif len(target._return_children()) == 1:
self._swap_par_child(target)
else:
g = self.in_order()
gen_out = None
while gen_out is not val:
gen_out = next(g)
successor = self.search(next(g))
target.value = successor.value
test_node = successor
if not successor._has_children():
self._del_leaf(successor)
else:
self._swap_par_child(successor)
self._size -= 1
self._test_tree_balance(test_node)
def _del_leaf(self, node):
"""Given a leaf node, delete it from tree."""
if node.parent.left == node:
node.parent.left = None
else:
node.parent.right = None
node.parent = None
def _swap_par_child(self, node):
"""Given a node with one child swap, parent child and del node."""
if node is node.parent.left:
node.parent.left = node._return_children()[0]
else:
node.parent.right = node._return_children()[0]
node.parent = None
def _rotate_right(self, sub_root):
"""Given root and pivot nodes, complete a right rotation."""
pivot = sub_root.left
sub_root.left = pivot.right
if pivot.right is not None:
pivot.right.parent = sub_root
pivot.parent = sub_root.parent
if sub_root is self.root:
self.root = pivot
else:
if sub_root is sub_root.parent.right:
sub_root.parent.right = pivot
else:
sub_root.parent.left = pivot
pivot.right = sub_root
sub_root.parent = pivot
def _rotate_left(self, sub_root):
"""Give root and pivot nodes, complete a left rotation."""
pivot = sub_root.right
sub_root.right = pivot.left
if pivot.left is not None:
pivot.left.parent = sub_root
pivot.parent = sub_root.parent
if sub_root is self.root:
self.root = pivot
else:
if sub_root is sub_root.parent.left:
sub_root.parent.left = pivot
else:
sub_root.parent.right = pivot
pivot.left = sub_root
sub_root.parent = pivot
def _test_tree_balance(self, node):
"""Check balance of tree."""
while node:
bal = self.find_balance(node)
if abs(bal) >= 2:
self._balance_tree(node, bal)
node = node.parent
def _balance_tree(self, start_node, bal):
"""Balance subtree of start node."""
if bal > 0: #< --- Heavy on the left.
sub_bal = self.find_balance(start_node.left)
if sub_bal < 0: #< --- Sub, right heavy
self._rotate_left(start_node.left)
self._rotate_right(start_node)
else: #< --- Heavy on the right.
sub_bal = self.find_balance(start_node.right)
if sub_bal > 0: #< --- Sub, left heavy
self._rotate_right(start_node.right)
self._rotate_left(start_node)
return self.depth(self.root.left) - self.depth(self.root.right)
|
|
#!/usr/bin/python3
from __future__ import print_function
import os
import datetime
import configparser
import time
#import pytz
from enum import Enum
import paho.mqtt.client as mqtt
from threading import Thread
from queue import Queue
heatMgrQueue = 0
ECS_COMMAND_OFF = b'1'
ECS_COMMAND_ON = b'2'
lastTemperatureUpdate = datetime.datetime.now()
currentTemperatureUpdate = datetime.datetime.now()
global configWarningSender, configWarningRecipient, configSmtpLogin, configSmtpPassword
global ecsState, ecsRemoteState, ecsStateForced, ecsTemperature, ecsHeatTarget
targetReached = False
#defines
ECS_HEAT_PROFILE_LOW = "LOW"
ECS_HEAT_PROFILE_MEDIUM = "MEDIUM"
ECS_HEAT_PROFILE_HIGH = "HIGH"
ECS_STATE_OFF = "OFF"
ECS_STATE_ON = "ON"
ECS_FORCE_DISABLED = "FORCE_DISABLED"
ECS_FORCE_OFF = "FORCE_OFF"
ECS_FORCE_ON = "FORCE_ON"
OVERHEAT_TEMPERATURE = 61
UNDERHEAT_TEMPERATURE = 16
HEAT_MANAGER_PERIOD = 1
DELAY_BETWEEN_TEMPERATURE_UPDATE = 60
class heatMgrMessage:
def __init__(self, type, value, heatProfile="MEDIUM"):
self.type = type
self.value = value
self.heatProfile = heatProfile
#=========================================================================#
# callback definition for MQTT #
#=========================================================================#
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc) + "\n")
client.subscribe("ECS/temp2")
client.subscribe("ECS/force")
client.subscribe("ECS/command")
def on_message(client, userdata, msg):
tmpQueue = userdata
tmpValue = ""
#print ("Callback tmpQueue : ")
#print (tmpQueue)
#print(msg.topic+" "+str(msg.payload) + "\n")
if msg.topic == "ECS/force":
# print ("Callback sending force command to heatManager")
if(msg.payload == b'0'):
tmpValue = ECS_FORCE_DISABLED
elif(msg.payload == b'1'):
tmpValue = ECS_FORCE_OFF
elif(msg.payload == b'2'):
tmpValue = ECS_FORCE_ON
else:
print ("Callback Error : Unknown Force command :", msg.payload)
tempMsg = heatMgrMessage('ECS_FORCE', tmpValue)
tmpQueue.put(tempMsg)
if msg.topic == "ECS/temp2":
# print("Callback sending temperature to heatManager")
tempMsg = heatMgrMessage('ECS_TEMPERATURE', msg.payload)
tmpQueue.put(tempMsg)
if msg.topic == "ECS/command":
# print("Callback sending ECS Command to heatManager")
tempMsg = heatMgrMessage('ECS_COMMAND', msg.payload)
tmpQueue.put(tempMsg)
#=========================================================================#
# Heat profile to temperature conversion #
#=========================================================================#
def getTargetTemperature(profile):
if (profile == ECS_HEAT_PROFILE_HIGH):
return 62
elif (profile == ECS_HEAT_PROFILE_MEDIUM):
return 58
elif (profile == ECS_HEAT_PROFILE_LOW):
return 53
else:
print("Unknown heatprofile. Defaulting to 53", profile)
return 53
def getStatusString():
global ecsState, ecsRemoteState, ecsStateForced, ecsTemperature, ecsHeatTarget
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
result = "\n\n=====================\nTime : "
result += now
result += "\n\tECS state :" + ecsState
result += "\n\tECS remote state :" + ecsRemoteState
result += "\n\tECS force state :" + str(ecsStateForced)
result += "\n\tECS temperature :" + str(ecsTemperature)
result += "\n\tTarget temperature : " + str(ecsHeatTarget)
result += "\n====================="
return result
def warnMessage(msg, mqttClient):
print(msg)
mqttClient.publish("ECS/warning", payload=msg, qos=1, retain=True)
return
def checkTemperatureValidity(temperature, mqttClient):
if(temperature < UNDERHEAT_TEMPERATURE):
if(warnSent == False):
warnMessage("Warning, the temperature of the ECS is getting low. Consider forcing ON", mqttClient)
warnSent = True
if(temperature > OVERHEAT_TEMPERATURE):
warnMessage("Warning, the temperature of the ECS is getting too high. Consider forcing OFF", mqttClient)
#=========================================================================#
# Heat manager thread body #
# processes messages from Queue : #
# - ECS force state (from MQTTLoop thread) #
# - temperature(s) (from MQTTLoop thread) #
# - ECS command (from MQTTLoop thread) #
#=========================================================================#
def heatManager(msqQueue, mqttClient):
global ecsState, ecsRemoteState, ecsStateForced, ecsTemperature, ecsHeatTarget
global lastTemperatureUpdate, currentTemperatureUpdate
global targetReached
ecsState = ECS_STATE_OFF
ecsRemoteState = ECS_STATE_OFF
ecsStateForced = False
ecsTemperature = 0
ecsHeatTarget = 0
nbMsg = 0
while True:
#print ("HeatManager waiting for message")
msg = msqQueue.get()
msgType = msg.type
msgValue = msg.value
msgHeatProfile = msg.heatProfile
#print ("HeatManager waking up. message received")
nbMsg += 1
#print ("nb message received")
print ("#################################################")
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print (nbMsg, "Time : ", now)
#process messages
if(msgType == "ECS_COMMAND"):
#Reset warning message status
warnSent = False
print(getStatusString())
print("====================================")
print("HeatManager : Processing ECS Command")
if (msgValue == ECS_COMMAND_OFF):
print("Heat Manager : turning ECS OFF")
ecsState = ECS_STATE_OFF
mqttClient.publish("ECS/state", payload='1', qos=1, retain=True)
mqttClient.publish("ECS/target", payload='0', qos=1, retain=True)
ecsRemoteState = ECS_STATE_OFF
if(targetReached == False):
warnMessage("Temperature not reached before turning Off", mqttClient)
elif (msgValue == ECS_COMMAND_ON):
targetReached = False
ecsState = ECS_STATE_ON
ecsHeatTarget = getTargetTemperature(msgHeatProfile)
#Check if temperature is recent enough to be valid, else warn user
currentTime = datetime.datetime.now()
deltaTime = currentTime - currentTemperatureUpdate
deltaTimeInSeconds = deltaTime.total_seconds()
if(deltaTimeInSeconds > DELAY_BETWEEN_TEMPERATURE_UPDATE *10):
message = "Warning : Switching ECS ON while temperature info may not be valid : \nsensor update exceeded 10 times maximum delta time: " + str(deltaTimeInSeconds) + "seconds"
warnMessage(message, mqttClient)
if(ecsTemperature < ecsHeatTarget):
print("Heat Manager : turning ECS ON")
mqttClient.publish("ECS/state", payload='2', qos=1, retain=True)
mqttClient.publish("ECS/target", payload=ecsHeatTarget, qos=1, retain=True)
ecsRemoteState = ECS_STATE_ON
else:
message = "Heat Manager : No ECS ON despite command, due to target temperature already reached"
warnMessage(message, mqttClient)
else:
print("Heat Manager : Error : unknown EcsCommand %s in received message" % msgValue)
elif(msgType == "ECS_TEMPERATURE"):
ecsTemperature = float(msgValue)
lastTemperatureUpdate = currentTemperatureUpdate
currentTemperatureUpdate = datetime.datetime.now()
deltaTime = currentTemperatureUpdate - lastTemperatureUpdate
deltaTimeInSeconds = deltaTime.total_seconds()
if(deltaTimeInSeconds > DELAY_BETWEEN_TEMPERATURE_UPDATE * 4):
message = "Warning : Temperature update from sensor exceeded 4 times maximum delta time: " + str(deltaTimeInSeconds) + "seconds"
warnMessage(message, mqttClient)
print ("updating temperature : ", ecsTemperature, " <> Target :", ecsHeatTarget)
checkTemperatureValidity(ecsTemperature, mqttClient)
#Check against temperature target when ECS is ON and not in forced mode
if ((ecsState == ECS_STATE_ON) and (ecsStateForced == False)):
if (ecsTemperature > ecsHeatTarget):
targetReached = True
print("Heat Manager : Switching ECS OFF due to target temperature reached")
ecsState = ECS_STATE_OFF
mqttClient.publish("ECS/state", payload='1', qos=1, retain=True)
mqttClient.publish("ECS/target", payload='0', qos=1, retain=True)
ecsRemoteState = ECS_STATE_OFF
elif(msgType == "ECS_FORCE"):
print(getStatusString())
print ("HeatMgr : ecsState", ecsState)
if (msgValue == ECS_FORCE_OFF):
print("Heat Manager : Forcing ECS OFF")
ecsStateForced = True
print("\tHeat Manager : Switching ECS OFF")
mqttClient.publish("ECS/state", payload='1', qos=1, retain=True)
mqttClient.publish("ECS/target", payload='0', qos=1, retain=True)
ecsState = ECS_STATE_OFF
ecsRemoteState = ECS_STATE_OFF
elif (msgValue == ECS_FORCE_ON):
print("Heat Manager : Forcing ECS ON")
ecsStateForced = True
print("\tHeat Manager : Switching ECS ON")
mqttClient.publish("ECS/state", payload='2', qos=1, retain=True)
mqttClient.publish("ECS/target", payload='100', qos=1, retain=True)
ecsState = ECS_STATE_ON
ecsRemoteState = ECS_STATE_ON
elif (msgValue == ECS_FORCE_DISABLED):
print("Heat Manager : Disabling Forcing ECS")
ecsStateForced = False
print("\tHeat Manager FORCE DISABLED : Switching ECS OFF")
mqttClient.publish("ECS/state", payload='1', qos=1, retain=True)
mqttClient.publish("ECS/target", payload='0', qos=1, retain=True)
ecsState = ECS_STATE_OFF
ecsRemoteState = ECS_STATE_OFF
else:
print("Heat Manager : Unknown message value %s " % msgValue)
else:
print("Heat Manager : Unknown message type %s " % msgType)
#=========================================================================#
# Main... #
#=========================================================================#
def main():
print("STARTING main process")
config = configparser.ConfigParser()
config.read('myconf.conf')
mqttAddress = config.get('MQTT', 'mqttAddress')
mqttPort = int(config.get('MQTT', 'mqttPort'))
global heatMgrQueue
heatMgrQueue = Queue()
mqttClient = mqtt.Client(userdata=heatMgrQueue)
print ("heatMgrQueue : ")
print (heatMgrQueue)
mqttClient.on_connect = on_connect
mqttClient.on_message = on_message
mqttClient.connect(mqttAddress, mqttPort)
mqttClient.loop_start()
HeatManagerThread = Thread(target=heatManager, args=(heatMgrQueue,mqttClient,))
HeatManagerThread.start()
while 1:
time.sleep(1000)
if __name__ == '__main__':
main()
|
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
def appendDefinitions( menuDefinition, prefix="" ) :
menuDefinition.append( prefix + "/Undo", { "command" : undo, "shortCut" : "Ctrl+Z", "active" : __undoAvailable } )
menuDefinition.append( prefix + "/Redo", { "command" : redo, "shortCut" : "Shift+Ctrl+Z", "active" : __redoAvailable } )
menuDefinition.append( prefix + "/UndoDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Cut", { "command" : cut, "shortCut" : "Ctrl+X", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Copy", { "command" : copy, "shortCut" : "Ctrl+C", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Paste", { "command" : paste, "shortCut" : "Ctrl+V", "active" : __pasteAvailable } )
menuDefinition.append( prefix + "/Delete", { "command" : delete, "shortCut" : "Backspace, Delete", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/CutCopyPasteDeleteDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Find...", { "command" : find, "shortCut" : "Ctrl+F" } )
menuDefinition.append( prefix + "/FindDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Select All", { "command" : selectAll, "shortCut" : "Ctrl+A" } )
menuDefinition.append( prefix + "/Select None", { "command" : selectNone, "shortCut" : "Shift+Ctrl+A", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/Inputs", { "command" : selectInputs, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/Add Inputs", { "command" : selectAddInputs, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/InputsDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Select Connected/Outputs", { "command" : selectOutputs, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/Add Outputs", { "command" : selectAddOutputs, "active" : __selectionAvailable } )
## A function suitable as the command for an Edit/Undo menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def undo( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.undo()
## A function suitable as the command for an Edit/Redo menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def redo( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.redo()
## A function suitable as the command for an Edit/Cut menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def cut( menu ) :
script, parent = __scriptAndParent( menu )
with Gaffer.UndoContext( script ) :
script.cut( parent, script.selection() )
## A function suitable as the command for an Edit/Copy menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def copy( menu ) :
script, parent = __scriptAndParent( menu )
script.copy( parent, script.selection() )
## A function suitable as the command for an Edit/Paste menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def paste( menu ) :
script, parent = __scriptAndParent( menu )
originalSelection = Gaffer.StandardSet( iter( script.selection() ) )
with Gaffer.UndoContext( script ) :
script.paste( parent )
# try to get the new nodes connected to the original selection
nodeGraph = __nodeGraph( menu, focussedOnly=False )
if nodeGraph is None :
return
nodeGraph.graphGadget().getLayout().connectNodes( nodeGraph.graphGadget(), script.selection(), originalSelection )
# position the new nodes sensibly
bound = nodeGraph.bound()
mousePosition = GafferUI.Widget.mousePosition()
if bound.intersects( mousePosition ) :
fallbackPosition = mousePosition - bound.min
else :
fallbackPosition = bound.center() - bound.min
fallbackPosition = nodeGraph.graphGadgetWidget().getViewportGadget().rasterToGadgetSpace(
IECore.V2f( fallbackPosition.x, fallbackPosition.y ),
gadget = nodeGraph.graphGadget()
).p0
fallbackPosition = IECore.V2f( fallbackPosition.x, fallbackPosition.y )
nodeGraph.graphGadget().getLayout().positionNodes( nodeGraph.graphGadget(), script.selection(), fallbackPosition )
## A function suitable as the command for an Edit/Delete menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def delete( menu ) :
script, parent = __scriptAndParent( menu )
with Gaffer.UndoContext( script ) :
script.deleteNodes( parent, script.selection() )
## A function suitable as the command for an Edit/Find menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def find( menu ) :
script, parent = __scriptAndParent( menu )
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
try :
findDialogue = scriptWindow.__findDialogue
except AttributeError :
findDialogue = GafferUI.NodeFinderDialogue( parent )
scriptWindow.addChildWindow( findDialogue )
scriptWindow.__findDialogue = findDialogue
findDialogue.setScope( parent )
findDialogue.setVisible( True )
## A function suitable as the command for an Edit/Select All menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectAll( menu ) :
script, parent = __scriptAndParent( menu )
for c in parent.children( Gaffer.Node.staticTypeId() ) :
script.selection().add( c )
## A function suitable as the command for an Edit/Select None menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectNone( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.selection().clear()
## The command function for the default "Edit/Select Connected/Inputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectInputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
inputs = Gaffer.StandardSet()
for node in script.selection() :
__inputNodes( node, inputs )
selection = script.selection()
selection.clear()
for node in inputs :
selection.add( node )
## The command function for the default "Edit/Select Connected/Add Inputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectAddInputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
inputs = Gaffer.StandardSet()
for node in script.selection() :
__inputNodes( node, inputs )
selection = script.selection()
for node in inputs :
selection.add( node )
## The command function for the default "Edit/Select Connected/Outputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectOutputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
outputs = Gaffer.StandardSet()
for node in script.selection() :
__outputNodes( node, outputs )
selection = script.selection()
selection.clear()
for node in outputs :
selection.add( node )
## The command function for the default "Edit/Select Connected/Add Outputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectAddOutputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
outputs = Gaffer.StandardSet()
for node in script.selection() :
__outputNodes( node, outputs )
selection = script.selection()
for node in outputs :
selection.add( node )
def __selectionAvailable( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
return True if scriptWindow.scriptNode().selection().size() else False
def __pasteAvailable( menu ) :
scriptNode = menu.ancestor( GafferUI.ScriptWindow ).scriptNode()
root = scriptNode.ancestor( Gaffer.ApplicationRoot.staticTypeId() )
return isinstance( root.getClipboardContents(), IECore.StringData )
def __nodeGraph( menu, focussedOnly=True ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
nodeGraph = None
## \todo Does this belong as a Window.focussedChild() method?
focusWidget = GafferUI.Widget._owner( scriptWindow._qtWidget().focusWidget() )
if focusWidget is not None :
nodeGraph = focusWidget.ancestor( GafferUI.NodeGraph )
if nodeGraph is not None or focussedOnly :
return nodeGraph
nodeGraphs = scriptWindow.getLayout().editors( GafferUI.NodeGraph )
return nodeGraphs[0] if nodeGraphs else None
def __scriptAndParent( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
nodeGraph = __nodeGraph( menu )
if nodeGraph is not None :
parent = nodeGraph.graphGadget().getRoot()
else :
parent = script
return script, parent
def __undoAvailable( menu ) :
scriptNode = menu.ancestor( GafferUI.ScriptWindow ).scriptNode()
return scriptNode.undoAvailable()
def __redoAvailable( menu ) :
scriptNode = menu.ancestor( GafferUI.ScriptWindow ).scriptNode()
return scriptNode.redoAvailable()
def __inputNodes( node, inputNodes ) :
def __walkPlugs( parent ) :
for plug in parent :
if isinstance( plug, Gaffer.Plug ) :
inputPlug = plug.getInput()
if inputPlug is not None :
inputNode = inputPlug.node()
if inputNode is not None and not inputNode.isSame( node ) :
inputNodes.add( inputNode )
else :
__walkPlugs( plug )
__walkPlugs( node )
def __outputNodes( node, outputNodes ) :
def __walkPlugs( parent ) :
for plug in parent :
if isinstance( plug, Gaffer.Plug ) :
outputPlugs = plug.outputs()
if outputPlugs :
for outputPlug in outputPlugs :
outputNode = outputPlug.node()
if outputNode is not None and not outputNode.isSame( node ) :
outputNodes.add( outputNode )
else :
__walkPlugs( plug )
__walkPlugs( node )
|
|
import os
import time
import socket
import logging
import selectors
import threading
import collections
import synapse.glob as s_glob
import synapse.common as s_common
import synapse.eventbus as s_eventbus
import synapse.lib.queue as s_queue
import synapse.lib.config as s_config
import synapse.lib.msgpack as s_msgpack
import synapse.lib.threads as s_threads
logger = logging.getLogger(__name__)
'''
The synapse.lib.net module implements async networking helpers.
( and will slowly replace synapse.lib.socket )
'''
class Plex(s_config.Config):
'''
A highly-efficient selectors-based multi-plexor for sockets.
'''
def __init__(self, conf=None):
s_config.Config.__init__(self, conf)
self.epoll = selectors.DefaultSelector()
self.socks = {}
self.links = {}
self.thrd = s_threads.worker(self._runPollLoop)
self.onfini(self._onPlexFini)
pmax = self.getConfOpt('pool:max')
self.pool = s_threads.Pool(maxsize=pmax)
self.onfini(self.pool.fini)
self.polls = {}
@staticmethod
@s_config.confdef(name='plex')
def _initPlexConf():
return (
('pool:max', {'defval': 8, 'type': 'int',
'doc': 'The maximum number of threads in the thread pool'}),
)
def _runPollLoop(self):
fems = []
while not self.isfini:
try:
fems = self.epoll.select()
for (_, fino, events, _), mask in fems:
if self.isfini:
return
poll = self.polls.get(fino)
if poll is None:
sock = self.socks.get(fino)
if sock is not None:
self._finiPlexSock(sock)
continue
try:
poll(mask)
except Exception as e:
logger.exception('error during poll() callback')
except Exception as e:
if self.isfini:
continue
logger.exception('plex thread error: %r' % (e,))
if not fems:
time.sleep(0.035)
def _onPlexFini(self):
[l.fini() for l in list(self.links.values())]
[s.close() for s in list(self.socks.values())]
self.epoll.close()
self.thrd.join(timeout=1)
def _finiPlexSock(self, sock):
fino = sock.fileno()
self.socks.pop(fino, None)
poll = self.polls.pop(fino)
if poll is not None:
self.epoll.unregister(sock)
sock.close()
def listen(self, addr, onlink):
'''
Initiate a listening socket with a Link constructor.
Args:
addr ((str,int)): A (host,port) socket address.
onlink (function): A callback to receive newly connected SockLink.
Returns:
((str,int)): The bound (host,port) address tuple.
'''
sock = socket.socket()
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(120)
fino = sock.fileno()
def poll(flags):
if not flags & selectors.EVENT_READ:
self._finiPlexSock(sock)
errn = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
logger.warning('listen error: %d' % (errn,))
while True:
try:
news, addr = sock.accept()
self._setSockOpts(news)
link = self._initPlexSock(news)
try:
onlink(link)
except Exception as e:
logger.warning('listen() onlink error: %s' % (e,))
link.fini()
except BlockingIOError as e:
return
self.socks[fino] = sock
self.polls[fino] = poll
self.epoll.register(sock, selectors.EVENT_READ)
return sock.getsockname()
def _initPlexSock(self, sock):
fino = sock.fileno()
link = SockLink(self, sock)
self.links[fino] = link
self.polls[fino] = link.poll
if self.socks.get(fino) is None:
self.socks[fino] = sock
self.epoll.register(sock, link.flags)
else:
self.epoll.modify(sock, link.flags)
return link
def modify(self, sock, flags):
'''
Modify the epoll flags mask for the give file descriptor.
Args:
socket (socket): The socket to modify
flags (int): The epoll flags mask.
'''
return self.epoll.modify(sock, flags)
def _setSockOpts(self, sock):
sock.setblocking(False)
# disable nagle ( to minimize latency for small xmit )
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# enable TCP keep alives...
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'TCP_KEEPIDLE'):
# start sending a keep alives after 1 sec of inactivity
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1)
# send keep alives every 3 seconds once started
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 3)
# close the socket after 5 failed keep alives (15 sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
def connect(self, addr, onconn):
'''
Perform a non-blocking connect with the given callback function.
Args:
addr ((str,int)): A (host,port) socket address.
onconn (function): A callback (ok, link)
'''
sock = socket.socket()
self._setSockOpts(sock)
fino = sock.fileno()
def poll(flags):
ok = True
retn = None
errn = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if errn != 0:
ok = False
retn = errn
self._finiPlexSock(sock)
else:
ok = True
retn = self._initPlexSock(sock)
try:
onconn(ok, retn)
except Exception as e:
logger.exception('connect() onconn failed: %s' % (e,))
return
self.socks[fino] = sock
self.polls[fino] = poll
try:
sock.connect(addr)
# This path won't be exercised on Linux
poll(2)
except BlockingIOError as e:
# This is the Linux path
self.epoll.register(sock, selectors.EVENT_WRITE)
s_glob.plex = Plex() # type: ignore
class Link(s_eventbus.EventBus):
'''
A message aware network connection abstraction.
'''
def __init__(self, link=None):
s_eventbus.EventBus.__init__(self)
self.info = {}
# TODO: heart beat via global sched
self.rxtime = None
self.txtime = None
self.rxfunc = None
self.finfunc = None
self.isrxfini = False
self.istxfini = False
self.link = link
self._mesg_funcs = self.handlers()
self.onfini(self._onLinkFini)
def chain(self, link):
link.onrx(self.rx)
self.onfini(link.fini)
link.onfini(self.fini)
def onmesg(self, name, func):
'''
Set a named message handler for the link.
'''
self._mesg_funcs[name] = func
def onrx(self, func):
'''
Register a callback to recieve (link, mesg) tuples.
'''
self.rxfunc = func
def _onLinkFini(self):
self.txfini()
self.rxfini()
def rxfini(self):
'''
Called when the remote link has sent fini.
'''
if self.isrxfini:
return
self.isrxfini = True
self.fire('rx:fini')
if self.istxfini:
self.fini()
def txfini(self, data=s_common.novalu):
'''
Annotate that the there is nothing more to send.
'''
if data is not s_common.novalu:
self.tx(data)
if self.istxfini:
return
self.istxfini = True
self.fire('tx:fini')
if self.isrxfini:
self.fini()
def handlers(self):
'''
Return a dict of <mesg>:<func> handlers for this link layer.
'''
return {}
def getLinkProp(self, name, defval=None):
'''
Return a previously set link property.
Args:
name (str): The property name.
defval (obj): The default value.
Returns:
(obj): The property value or defval.
'''
return self.info.get(name, defval)
def setLinkProp(self, name, valu):
'''
Set a link property.
Args:
name (str): The property name.
valu (obj): The property value.
'''
self.info[name] = valu
def rx(self, link, mesg):
'''
Recv a message on this link and dispatch the message.
Args:
link (Link): The link.
mesg ((str,dict)): A message tufo.
'''
if self.isfini:
return
self.rxtime = s_common.now()
if self.rxfunc is not None:
try:
return self.rxfunc(self, mesg)
except Exception as e:
logger.exception('%s.rxfunc() failed on: %r' % (self.__class__.__name__, mesg))
self.fini()
return
try:
func = self._mesg_funcs.get(mesg[0])
except Exception as e:
logger.exception('link %s: rx mesg exception: %s' % (self.__class__.__name__, e))
self.fini()
return
if func is None:
logger.warning('link %s: unknown message type %s' % (self.__class__.__name__, mesg[0]))
return
try:
func(link, mesg)
except Exception as e:
logger.exception('link %s: rx exception: %s' % (self.__class__.__name__, e))
self.fini()
def tx(self, mesg):
'''
Transmit a message via this link.
Args:
mesg ((str,dict)): A message tufo.
'''
if self.istxfini:
return
self.txtime = s_common.now()
self._tx_real(mesg)
def txok(self, retn, fini=False):
self.tx((True, retn))
if fini:
self.txfini()
def txerr(self, enfo, fini=False):
self.tx((False, enfo))
if fini:
self.txfini()
def _tx_real(self, mesg):
return self.link.tx(mesg)
def __repr__(self):
rstr = self.getLinkProp('repr')
return '%s: %s at %s' % (self.__class__.__name__, rstr, hex(id(self)))
class Chan(Link):
def __init__(self, plex, iden, txinit=True):
Link.__init__(self, plex)
self._chan_rxq = None
self._chan_iden = iden
self._chan_txinit = True
def iden(self):
return self._chan_iden
def _tx_real(self, mesg):
name = 'data'
if self._chan_txinit:
self._chan_txinit = False
name = 'init'
self.link.tx((name, {'chan': self._chan_iden, 'data': mesg}))
def txfini(self, data=s_common.novalu):
name = 'fini'
info = {'chan': self._chan_iden}
if data is not s_common.novalu:
info['data'] = data
# check for syn/psh/fin
if self._chan_txinit:
self._chan_txinit = False
name = 'init'
info['fini'] = True
self.link.tx((name, info))
def setq(self):
'''
Set this Chan to using a Queue for rx.
'''
if self._chan_rxq is not None:
return
self._chan_rxq = s_queue.Queue()
def rx(link, mesg):
self._chan_rxq.put(mesg)
def rxfini(mesg):
self._chan_rxq.done()
self.onrx(rx)
self.on('rx:fini', rxfini)
self.onfini(self._chan_rxq.done)
def next(self, timeout=None):
return self._chan_rxq.get(timeout=timeout)
def slice(self, size, timeout=None):
return self._chan_rxq.slice(size, timeout=timeout)
def iter(self, timeout=None):
while not self.isfini:
yield self._chan_rxq.get(timeout=timeout)
def rxwind(self, timeout=None):
'''
Yield items from an txwind caller.
'''
self.setq()
while not self.isfini:
for ok, retn in self.slice(1000, timeout=timeout):
if not ok:
if retn is not None:
logger.warning('rxwind(): %r' % (retn,))
return
self.tx((True, True))
yield retn
def txwind(self, items, size, timeout=None):
'''
Execute a windowed transmission loop from a generator.
'''
wind = 0
try:
for item in items:
self.tx((True, item))
wind += 1
while wind >= size:
acks = self.slice(wind, timeout=timeout)
wind -= len(acks)
except Exception as e:
enfo = s_common.getexcfo(e)
self.txerr(enfo)
logger.exception('tx wind genr')
return
self.tx((False, None))
while wind > 0:
try:
acks = self.slice(wind, timeout=timeout)
wind -= len(acks)
except Exception as e:
print('TXWIND REMAIN WIND: %r' % (wind,))
raise
return True
class ChanPlex(Link):
'''
A Link which has multiple channels.
'''
def __init__(self, onchan=None):
Link.__init__(self)
self.onchan = onchan
self.chans = s_eventbus.BusRef()
# TODO: chan timeouts... (maybe add to BusRef?)
self.onfini(self.chans.fini)
def handlers(self):
return {
'init': self._onChanInit,
'data': self._onChanData,
'fini': self._onChanFini,
}
def _onChanInit(self, link, mesg):
iden = mesg[1].get('chan')
data = mesg[1].get('data', s_common.novalu)
chan = self.chans.get(iden)
if chan is not None:
# an init for an existing chan
# (return init message from our tx)
if data is not s_common.novalu:
chan.rx(self, data)
return
if self.onchan is None:
logger.warning('%r: got init without onchan: %r' % (self, chan))
return
chan = self.initPlexChan(iden, txinit=False)
chan.setLinkProp('plex:recv', True)
self.chans.put(iden, chan)
chan.setLinkProp('plex:link', link)
try:
self.onchan(chan)
except Exception as e:
logger.exception('onchan (%r) failed: %s' % (self.onchan, e))
chan.fini()
return
if data is not None:
chan.rx(self, data)
# syn/psh/fin ;)
if mesg[1].get('fini'):
chan.rxfini()
def _tx_real(self, mesg):
iden = mesg[1].get('chan')
chan = self.chans.get(iden)
if chan is None:
logger.warning('tx() for missing chan %r' % (mesg,))
return
link = chan.getLinkProp('plex:link', defval=self.link)
if link is None:
logger.warning('tx() for chan without link: %r' % (iden,))
return
return link.tx(mesg)
def _onChanData(self, link, mesg):
iden = mesg[1].get('chan')
data = mesg[1].get('data')
chan = self.chans.get(iden)
if chan is None:
# There are many chan shutdown instances where this is ok
logger.info('chan data for missing chan: %r (link: %r)' % (iden, link))
return
chan.setLinkProp('plex:link', link)
chan.rx(self, data)
def _onChanFini(self, link, mesg):
# this message means the remote end is done sending
# ( and does not by itself fini() the chan )
iden = mesg[1].get('chan')
data = mesg[1].get('data', s_common.novalu)
chan = self.chans.get(iden)
if chan is None:
return
chan.setLinkProp('plex:link', link)
if data is not s_common.novalu:
chan.rx(self, data)
chan.rxfini()
def initPlexChan(self, iden, txinit=True):
chan = Chan(self, iden, txinit=txinit)
chan.info.update(self.info)
self.chans.put(iden, chan)
return chan
def open(self, link):
iden = os.urandom(16)
chan = self.initPlexChan(iden, txinit=True)
chan.setLinkProp('plex:link', link)
chan.setLinkProp('plex:open', True)
return chan
class SockLink(Link):
'''
A Link implements Plex aware non-blocking operations for a socket.
'''
def __init__(self, plex, sock):
Link.__init__(self, None)
self.plex = plex
self.sock = sock
self.txbuf = b''
self.txque = collections.deque() # (byts, info)
self.txlock = threading.Lock()
self.unpk = s_msgpack.Unpk()
self.flags = selectors.EVENT_READ
def fini():
self.plex._finiPlexSock(self.sock)
self.onfini(fini)
def poll(self, flags):
'''
Handle an epoll event for this Link's socket.
Args:
flags (int): The epoll return flags.
'''
try:
txdone = False
if flags & selectors.EVENT_READ:
self._rxloop()
# chances are, after an rxloop(), a txloop() is needed...
self._txloop()
txdone = True
if flags & selectors.EVENT_WRITE and not txdone:
self._txloop()
except Exception as e:
logger.exception('error during epoll event: %s for %r' % (e, self.sock))
self.fini()
def tx(self, mesg, fini=False):
'''
Transmit the message on the socket.
Args:
mesg ((str,dict)): A message tufo.
'''
byts = s_msgpack.en(mesg)
return self._add_tx(byts)
def _add_tx(self, byts):
with self.txlock:
self.txque.append(byts)
if self.flags & selectors.EVENT_WRITE:
return
self.flags |= selectors.EVENT_WRITE
self.plex.modify(self.sock, self.flags)
def _rxbytes(self, size):
'''
Try to recv size bytes.
Args:
size (int): The number of bytes to recv.
Returns:
(bytes): The bytes (or None) if would block.
'''
try:
rv = self.sock.recv(size)
if rv == b'':
raise ConnectionError
return rv
except ConnectionError as e:
return ''
except BlockingIOError as e:
return None
def _rxloop(self):
while not self.isfini:
byts = self._rxbytes(1024000)
if byts is None:
return
if not byts:
self.fini()
return
for size, mesg in self.unpk.feed(byts):
try:
self.rx(self, mesg)
except Exception as e:
logger.exception('rxloop() error processing mesg: %r' % (mesg,))
def _txloop(self):
with self.txlock:
while not self.isfini:
if self.txbuf:
try:
sent = self.sock.send(self.txbuf)
# if we didn't send anything, gtfo
if sent == 0:
return
except BlockingIOError as e:
# we cant send any more without blocking
return
except BrokenPipeError as e:
logger.debug('tx broken pipe: ignore...')
return
self.txbuf = self.txbuf[sent:]
# if we still have a txbuf, we've done all we can
if self.txbuf:
return
# no more txbuf... are we done?
if not self.txque:
if self.istxfini:
self.fini()
return
self.flags &= ~selectors.EVENT_WRITE
self.plex.modify(self.sock, self.flags)
return
self.txbuf = self.txque.popleft()
class LinkDisp:
'''
The Link Dispatcher ensures sequential/bulk processing
which executes from the global thread pool as needed.
This can be used to create transaction boundaries across
multiple links or prevent the need to permenantly eat threads.
Example:
def func(items):
with getFooXact() as xact:
for link, mesg in items:
xact.dostuff(mesg)
link.tx(True)
disp = LinkDisp(func):
chan.onrx(disp.rx)
'''
def __init__(self, func):
self.func = func
self.lock = threading.Lock()
self.items = collections.deque()
self.working = False
def rx(self, link, item):
with self.lock:
self.items.append((link, item))
if not self.working:
self.working = True
self._runItemsFunc()
@s_glob.inpool
def _runItemsFunc(self):
while True:
with self.lock:
items = self.items
if not items:
self.working = False
return
self.items = []
try:
self.func(items)
except Exception as e:
logger.exception('LinkDisp callback error')
|
|
import json
import logging
import mimetypes
import os
from typing import Dict, List, Optional
from urllib.parse import quote
from uuid import uuid4
import requests
# noinspection PyPackageRequirements
from slugify import slugify
from federation.entities.base import Post, Profile
from federation.entities.matrix.enums import EventType
from federation.entities.mixins import BaseEntity
from federation.entities.utils import get_base_attributes, get_profile
from federation.utils.django import get_configuration
from federation.utils.matrix import get_matrix_configuration, appservice_auth_header
from federation.utils.network import fetch_document, fetch_file
logger = logging.getLogger("federation")
class MatrixEntityMixin(BaseEntity):
_event_type: str = None
_payloads: List[Dict] = []
_profile_room_id = None
_txn_id: str = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We always require an mxid
self._required.append('mxid')
# Create a transaction ID
self._txn_id = str(uuid4())
@property
def event_type(self) -> str:
return self._event_type
@classmethod
def from_base(cls, entity):
# type: (BaseEntity) -> MatrixEntityMixin
# noinspection PyArgumentList
return cls(**get_base_attributes(entity))
# noinspection PyMethodMayBeStatic
def get_endpoint(self) -> str:
config = get_matrix_configuration()
return f"{config['homeserver_base_url']}/_matrix/client/r0"
# noinspection PyMethodMayBeStatic
def get_endpoint_media(self) -> str:
config = get_matrix_configuration()
return f"{config['homeserver_base_url']}/_matrix/media/r0"
def get_profile_room_id(self):
# TODO: we should cache these.
doc, status, error = fetch_document(
url=f"{self.get_endpoint()}/directory/room/{self.profile_room_alias_url_safe}",
extra_headers=appservice_auth_header(),
)
if status == 200:
data = json.loads(doc)
self._profile_room_id = data["room_id"]
# noinspection PyMethodMayBeStatic
def payloads(self) -> List[Dict]:
return self._payloads
@property
def profile_room_alias(self):
return f"#{self.mxid}"
@property
def profile_room_alias_url_safe(self):
return f"{quote(self.profile_room_alias)}"
@property
def server_name(self) -> str:
config = get_matrix_configuration()
return config['homeserver_name']
@property
def txn_id(self) -> str:
return self._txn_id
class MatrixRoomMessage(Post, MatrixEntityMixin):
_event_type = EventType.ROOM_MESSAGE.value
_thread_room_event_id: str = None
_thread_room_id: str = None
def add_tag_room_payloads(self, tag_room_id: str):
self._payloads.append({
"endpoint": f"{super().get_endpoint()}/rooms/{tag_room_id}/join?user_id={self.mxid}",
"payload": {},
})
self._payloads.append({
# TODO at some point we'll need to track the event_id's, for now just random
# When we start listening to events from the other side, we'll need to filter
# the ones we sent. Additionally if there is going to be some kind of symlink MSC,
# we're going to want to stop carbon copying to many rooms.
"endpoint": f"{super().get_endpoint()}/rooms/{tag_room_id}/send/{self.event_type}/"
f"{str(uuid4())}?user_id={self.mxid}",
"payload": {
"body": self.raw_content,
"msgtype": "m.text",
"format": "org.matrix.custom.html",
"formatted_body": self.rendered_content,
},
"method": "put",
})
def create_tag_room(self, tag: str) -> str:
headers = appservice_auth_header()
config = get_configuration()
topic = f"Content for the tag #{tag}."
if config.get("tags_path"):
topic += f" Mirrored from {config['base_url']}{config['tags_path'].replace(':tag:', slugify(tag))}"
matrix_config = get_matrix_configuration()
response = requests.post(
url=f"{super().get_endpoint()}/createRoom",
json={
"preset": "public_chat",
"name": f"#{tag} ({matrix_config['appservice']['shortcode']} | {matrix_config['homeserver_name']})",
"room_alias_name": self.get_tag_room_alias_localpart(tag).strip('#'),
"topic": topic,
},
headers=headers,
)
response.raise_for_status()
room_id = response.json()["room_id"]
self._payloads.append({
"endpoint": f"{super().get_endpoint()}/directory/list/room/{room_id}",
"payload": {
"visibility": "public",
},
"method": "put",
})
return room_id
def create_thread_room(self):
headers = appservice_auth_header()
# Create the thread room
response = requests.post(
url=f"{super().get_endpoint()}/createRoom?user_id={self.mxid}",
json={
# TODO auto-invite other recipients if private chat
"preset": "public_chat" if self.public else "private_chat",
"name": f"Thread by {self.mxid}",
"topic": self.url,
},
headers=headers,
)
response.raise_for_status()
self._thread_room_id = response.json()["room_id"]
# Send the thread message
response = requests.put(
url=f"{super().get_endpoint()}/rooms/{self._thread_room_id}/send/{self.event_type}/"
f"{str(uuid4())}?user_id={self.mxid}",
json={
"body": self.raw_content,
"msgtype": "m.text",
"format": "org.matrix.custom.html",
"formatted_body": self.rendered_content,
},
headers=headers,
)
response.raise_for_status()
self._thread_room_event_id = response.json()["event_id"]
def get_profile_room_id(self):
super().get_profile_room_id()
if not self._profile_room_id:
from federation.entities.matrix.mappers import get_outbound_entity
# Need to also create the profile
profile = get_profile(self.actor_id)
profile_entity = get_outbound_entity(profile, None)
payloads = profile_entity.payloads()
if payloads:
self._payloads.extend(payloads)
@staticmethod
def get_tag_room_alias_localpart(tag: str) -> str:
config = get_matrix_configuration()
return f"#_{config['appservice']['shortcode']}_#{slugify(tag)}"
def get_tag_room_alias_url_safe(self, tag: str) -> str:
return quote(f"{self.get_tag_room_alias_localpart(tag)}:{self.server_name}")
def get_tag_room_id(self, tag: str) -> Optional[str]:
# TODO: we should cache these.
doc, status, error = fetch_document(
url=f"{self.get_endpoint()}/directory/room/{self.get_tag_room_alias_url_safe(tag)}",
extra_headers=appservice_auth_header(),
)
if status == 200:
data = json.loads(doc)
return data["room_id"]
def payloads(self) -> List[Dict]:
payloads = super().payloads()
payloads.append({
"endpoint": f"{super().get_endpoint()}/rooms/{self._profile_room_id}/send/{self.event_type}/"
f"{self.txn_id}?user_id={self.mxid}",
"payload": {
"body": self.raw_content,
"msgtype": "m.text",
"format": "org.matrix.custom.html",
"formatted_body": self.rendered_content,
# Fields to emulate Cerulean
"org.matrix.cerulean.event_id": self._thread_room_event_id,
"org.matrix.cerulean.room_id": self._thread_room_id,
"org.matrix.cerulean.root": True,
},
"method": "put",
})
# Tag the thread room as low priority
payloads.append({
"endpoint": f"{super().get_endpoint()}/user/{self.mxid}/rooms/{self._thread_room_id}/tags/m.lowpriority"
f"?user_id={self.mxid}",
"payload": {
"order": 0,
},
"method": "put",
})
return payloads
def pre_send(self):
"""
Do various pre-send things.
"""
super().pre_send()
# Get profile room ID
self.get_profile_room_id()
# Upload embedded images and replace the HTTP urls in the message with MXC urls so clients show the images
self.upload_embedded_images()
# Create thread room
self.create_thread_room()
# Process tags if public post
if self.public:
for tag in self.tags:
tag_room_id = self.get_tag_room_id(tag)
if not tag_room_id:
# noinspection PyBroadException
try:
tag_room_id = self.create_tag_room(tag)
except Exception as ex:
logger.warning("Failed to create tag room for tag %s for post %s: %s", tag, self.id, ex)
continue
self.add_tag_room_payloads(tag_room_id)
def upload_embedded_images(self):
"""
Upload embedded images
Replaces the HTTP urls in the message with MXC urls so that Matrix clients will show the images.
"""
for image in self.embedded_images:
url, name = image
headers = appservice_auth_header()
content_type, _encoding = mimetypes.guess_type(url)
headers["Content-Type"] = content_type
# Random name if none
if not name:
name = f"{uuid4()}{mimetypes.guess_extension(content_type, strict=False)}"
# Need to fetch it locally first
# noinspection PyBroadException
try:
image_file = fetch_file(url=url, timeout=60)
except Exception as ex:
logger.warning("MatrixRoomMessage.pre_send | Failed to retrieve image %s to be uploaded: %s",
url, ex)
continue
# Then upload
headers["Content-Length"] = str(os.stat(image_file).st_size)
# noinspection PyBroadException
try:
with open(image_file, "rb") as f:
response = requests.post(
f"{super().get_endpoint_media()}/upload?filename={quote(name)}&user_id={self.mxid}",
data=f.read(),
headers=headers,
timeout=60,
)
response.raise_for_status()
except Exception as ex:
logger.warning("MatrixRoomMessage.pre_send | Failed to upload image %s: %s",
url, ex)
continue
finally:
os.unlink(image_file)
# Replace in raw content
try:
logger.debug("MatrixRoomMessage.pre_send | Got response %s", response.json())
content_uri = response.json()["content_uri"]
self.raw_content = self.raw_content.replace(url, content_uri)
except Exception as ex:
logger.error("MatrixRoomMessage.pre_send | Failed to find content_uri from the image upload "
"response: %s", ex)
class MatrixProfile(Profile, MatrixEntityMixin):
_remote_profile_create_needed = False
_remote_room_create_needed = False
def create_profile_room(self):
headers = appservice_auth_header()
response = requests.post(
url=f"{super().get_endpoint()}/createRoom?user_id={self.mxid}",
json={
"name": self.name,
"preset": "public_chat" if self.public else "private_chat",
"room_alias_name": f"@{self.localpart}",
"topic": f"Profile room of {self.url}",
},
headers=headers,
)
response.raise_for_status()
self._profile_room_id = response.json()["room_id"]
def register_user(self):
headers = appservice_auth_header()
response = requests.post(
url=f"{super().get_endpoint()}/register",
json={
"username": f"{self.localpart}",
"type": "m.login.application_service",
},
headers=headers,
)
response.raise_for_status()
@property
def localpart(self) -> str:
return self.mxid.replace("@", "").replace(f":{self.server_name}", "")
def payloads(self) -> List[Dict]:
payloads = super().payloads()
if self._remote_profile_create_needed:
self.register_user()
if self._remote_room_create_needed:
self.create_profile_room()
payloads.append({
"endpoint": f"{super().get_endpoint()}/profile/{self.mxid}/displayname?user_id={self.mxid}",
"payload": {
"displayname": self.name,
},
"method": "put",
})
# TODO avatar url in mxc format
return payloads
def pre_send(self):
"""
Check whether we need to create the user or their profile room.
"""
doc, status, error = fetch_document(
url=f"{super().get_endpoint()}/profile/{self.mxid}",
extra_headers=appservice_auth_header(),
)
if status != 200:
self._remote_profile_create_needed = True
else:
self.get_profile_room_id()
if self._remote_profile_create_needed or not self._profile_room_id:
self._remote_room_create_needed = True
|
|
"""
Unittests for gj2ascii.core
"""
from collections import OrderedDict
import itertools
import os
import unittest
import emoji
import fiona as fio
import pytest
import gj2ascii
import gj2ascii.core
import numpy as np
@pytest.fixture(scope='function')
def geo_interface_feature():
class GIFeature(object):
__geo_interface__ = {
'type': 'Feature',
'properties': {},
'geometry': {
'type': 'Point',
'coordinates': [10, 20, 30]
}
}
return GIFeature()
@pytest.fixture(scope='function')
def geo_interface_geometry():
class GIGeometry(object):
__geo_interface__ = {
'type': 'Polygon',
'coordinates': [[(1.23, -56.5678), (4.897, 20.937), (9.9999999, -23.45)]]
}
return GIGeometry()
@pytest.fixture(scope='function')
def feature():
return {
'type': 'Feature',
'properties': {},
'geometry': {
'type': 'Line',
'coordinates': ((1.23, -67.345), (87.12354, -23.4555), (123.876, -78.9444))
}
}
@pytest.fixture(scope='function')
def geometry():
return {
'type': 'Point',
'coordinates': (0, 0, 10)
}
@pytest.fixture(scope='function')
def ascii():
return os.linesep.join([
'* * * * *',
' * * ',
'* * * * *'
])
@pytest.fixture(scope='function')
def array():
return [
['*', '*', '*', '*', '*'],
[' ', '*', ' ', '*', ' '],
['*', '*', '*', '*', '*']]
@pytest.fixture(scope='function')
def np_array(array):
return np.array(array)
def test_compare_ascii(compare_ascii):
block = """
line1
line2
something
a n o t h e r line
None
6789.2349
"""
assert compare_ascii(block, block) is True
def test_dict2table_empty_dict():
with pytest.raises(ValueError):
gj2ascii.dict2table({})
def test_dict2table():
test_dict = OrderedDict((
('Field1', None),
('__something', 'a string'),
('more', 12345),
('other', 1.2344566)
))
expected = """
+-------------+-----------+
| Field1 | None |
| __something | a string |
| more | 12345 |
| other | 1.2344566 |
+-------------+-----------+
""".strip()
assert gj2ascii.dict2table(test_dict) == expected
def test_render_exception():
with pytest.raises(TypeError):
gj2ascii.render([], None, fill='asdf')
with pytest.raises(TypeError):
gj2ascii.render([], None, char='asdf')
with pytest.raises(ValueError):
gj2ascii.render([], width=-1)
def test_render_compare_bbox_given_vs_detect_collection_vs_compute_vs_as_generator(poly_file):
# Easiest to compare these 3 things together since they are related
with fio.open(poly_file) as src:
given = gj2ascii.render(src, 15, bbox=src.bounds)
computed = gj2ascii.render([i for i in src], 15)
fio_collection = gj2ascii.render(src, 15)
# Passing in a generator and not specifying x/y min/max requires the features to
# be iterated over twice which is a problem because generators cannot be reset.
# A backup of the generator should be created automatically and iterated over the
# second time.
generator_output = gj2ascii.render((f for f in src), 15)
for pair in itertools.combinations(
[given, computed, fio_collection, generator_output], 2):
assert len(set(pair)) == 1
def test_with_fio(expected_polygon_40_wide, poly_file):
with fio.open(poly_file) as src:
r = gj2ascii.render(src, width=40, fill='.', char='+', bbox=src.bounds)
assert expected_polygon_40_wide == r.rstrip()
def test_geometry_extractor_exceptions():
with pytest.raises(TypeError):
next(gj2ascii.core._geometry_extractor([{'type': None}]))
def test_single_object(geometry, feature, geo_interface_feature, geo_interface_geometry):
assert geometry == next(gj2ascii.core._geometry_extractor(geometry))
assert feature['geometry'] == next(gj2ascii.core._geometry_extractor(feature))
assert geo_interface_feature.__geo_interface__['geometry'] == next(gj2ascii.core._geometry_extractor(geo_interface_feature))
assert geo_interface_geometry.__geo_interface__ == next(gj2ascii.core._geometry_extractor(geo_interface_geometry))
def test_multiple_homogeneous(geometry, feature, geo_interface_geometry, geo_interface_feature):
for item in gj2ascii.core._geometry_extractor(
(geometry, geometry, geometry)):
assert item == geometry
for item in gj2ascii.core._geometry_extractor(
(feature, feature, feature)):
assert item == feature['geometry']
for item in gj2ascii.core._geometry_extractor(
(geo_interface_geometry, geo_interface_geometry, geo_interface_geometry)):
assert item == geo_interface_geometry.__geo_interface__
for item in gj2ascii.core._geometry_extractor(
(geo_interface_feature, geo_interface_feature, geo_interface_feature)):
assert item == geo_interface_feature.__geo_interface__['geometry']
def test_multiple_heterogeneous(geometry, feature, geo_interface_feature, geo_interface_geometry):
input_objects = (geometry, feature, geo_interface_feature, geo_interface_geometry)
expected = (
geometry, feature['geometry'],
geo_interface_feature.__geo_interface__['geometry'],
geo_interface_geometry.__geo_interface__
)
for expected, actual in zip(
expected, gj2ascii.core._geometry_extractor(input_objects)):
assert expected == actual
def test_standard():
l1 = gj2ascii.array2ascii([['*', '*', '*', '*', '*'],
[' ', ' ', '*', ' ', ' '],
['*', '*', ' ', ' ', ' ']])
l2 = gj2ascii.array2ascii([[' ', ' ', ' ', '+', '+'],
[' ', '+', ' ', ' ', ' '],
[' ', ' ', '+', '+', '+']])
eo = gj2ascii.array2ascii([['*', '*', '*', '+', '+'],
['.', '+', '*', '.', '.'],
['*', '*', '+', '+', '+']])
assert gj2ascii.stack(
[l1, l2], fill='.').strip(os.linesep), eo.strip(os.linesep)
def test_exceptions():
# Bad fill value
with pytest.raises(ValueError):
gj2ascii.stack([], fill='too-long')
# Input layers have different dimensions
with pytest.raises(ValueError):
gj2ascii.stack(['1', '1234'])
def test_single_layer(compare_ascii):
l1 = gj2ascii.array2ascii([['*', '*', '*', '*', '*'],
[' ', ' ', '*', ' ', ' '],
['*', '*', ' ', ' ', ' ']])
assert compare_ascii(l1, gj2ascii.stack([l1]))
def test_ascii2array(array, ascii):
assert array == gj2ascii.ascii2array(ascii)
assert np.array_equal(array, np.array(gj2ascii.ascii2array(ascii)))
def test_array2ascii(ascii, array):
assert ascii == gj2ascii.array2ascii(array)
assert ascii == gj2ascii.array2ascii(array)
def test_roundhouse(ascii, array):
assert ascii == gj2ascii.array2ascii(gj2ascii.ascii2array(ascii))
assert array == gj2ascii.ascii2array(gj2ascii.array2ascii(array))
def test_style():
array = [['0', '0', '0', '1', '0'],
[' ', ' ', '2', '0', '1'],
['1', '1', '2', '1', '3']]
colormap = {
' ': 'black',
'0': 'blue',
'1': 'yellow',
'2': 'white',
'3': 'red'
}
expected = []
for row in array:
o_row = []
for char in row:
color = gj2ascii.ANSI_COLORMAP[colormap[char]]
o_row.append(color + char + ' ' + gj2ascii.core._ANSI_RESET)
expected.append(''.join(o_row))
expected = os.linesep.join(expected)
assert expected == gj2ascii.style(gj2ascii.array2ascii(array), stylemap=colormap)
def test_paginate(poly_file):
char = '+'
fill = '.'
colormap = {
'+': 'red',
'.': 'black'
}
with fio.open(poly_file) as src1, fio.open(poly_file) as src2:
for paginated_feat, feat in zip(
gj2ascii.paginate(src1, char=char, fill=fill, colormap=colormap), src2):
assert paginated_feat.strip() == gj2ascii.style(
gj2ascii.render(feat, char=char, fill=fill), stylemap=colormap)
def test_bbox_from_arbitrary_iterator(poly_file):
# Python 2 doesn't give direct access to an object that can be used to check if an object
# is an instance of tee
pair = itertools.tee(range(10))
itertools_tee_type = pair[1].__class__
with fio.open(poly_file) as c_src, \
fio.open(poly_file) as l_src, \
fio.open(poly_file) as g_src,\
fio.open(poly_file) as expected:
# Tuple element 1 is an iterable object to test and element 2 is the expected type of
# the output iterator
test_objects = [
(c_src, fio.Collection),
([i for i in l_src], list),
((i for i in g_src), itertools_tee_type)
]
for in_obj, e_type in test_objects:
bbox, iterator = gj2ascii.core.min_bbox(in_obj, return_iter=True)
assert bbox == expected.bounds, \
"Bounds don't match: %s != %s" % (bbox, expected.bounds)
assert isinstance(iterator, e_type), "Output iterator is %s" % iterator
for e, a in zip(expected, iterator):
assert e['id'] == a['id'], "%s != %s" % (e['id'], a['id'])
def test_render_multiple(poly_file, line_file, point_file, compare_ascii):
with fio.open(poly_file) as poly, \
fio.open(line_file) as lines, \
fio.open(point_file) as points:
coords = list(poly.bounds) + list(lines.bounds) + list(points.bounds)
bbox = (min(coords[0::4]), min(coords[1::4]), max(coords[2::4]), max(coords[3::4]))
width = 20
lyr_char_pairs = [(poly, '+'), (lines, '-'), (points, '*')]
actual = gj2ascii.render_multiple(lyr_char_pairs, width, fill='#')
rendered_layers = []
for l, char in lyr_char_pairs:
rendered_layers.append(gj2ascii.render(l, width, fill=' ', bbox=bbox, char=char))
expected = gj2ascii.stack(rendered_layers, fill='#')
assert compare_ascii(actual.strip(), expected.strip())
def test_render_exceptions():
for arg in ('fill', 'char'):
with pytest.raises(ValueError):
gj2ascii.render([], **{arg: 'too long'})
for w in (0, -1, -1000):
with pytest.raises(ValueError):
gj2ascii.render([], width=w)
def test_style_multiple(poly_file, line_file, point_file):
with fio.open(poly_file) as poly, \
fio.open(line_file) as lines, \
fio.open(point_file) as points:
coords = list(poly.bounds) + list(lines.bounds) + list(points.bounds)
bbox = (min(coords[0::4]), min(coords[1::4]), max(coords[2::4]), max(coords[3::4]))
width = 20
# Mix of colors and emoji with a color fill
lyr_color_pairs = [(poly, ':+1:'), (lines, 'blue'), (points, 'red')]
actual = gj2ascii.style_multiple(
lyr_color_pairs, fill='yellow', width=width, bbox=bbox)
assert emoji.unicode_codes.EMOJI_ALIAS_UNICODE[':+1:'] in actual
assert '\x1b[34m\x1b[44m' in actual # blue
assert '\x1b[31m\x1b[41m' in actual # red
assert '\x1b[33m\x1b[43m' in actual # yellow
# Same as above but single character fill
lyr_color_pairs = [(poly, ':+1:'), (lines, 'blue'), (points, 'red')]
actual = gj2ascii.style_multiple(
lyr_color_pairs, fill='.', width=width, bbox=bbox)
assert emoji.unicode_codes.EMOJI_ALIAS_UNICODE[':+1:'] in actual
assert '\x1b[34m\x1b[44m' in actual # blue
assert '\x1b[31m\x1b[41m' in actual # red
assert '.' in actual
# Same as above but emoji fill
lyr_color_pairs = [(poly, ':+1:'), (lines, 'blue'), (points, 'red')]
actual = gj2ascii.style_multiple(
lyr_color_pairs, fill=':water_wave:', width=width, bbox=bbox)
assert emoji.unicode_codes.EMOJI_ALIAS_UNICODE[':+1:'] in actual
assert '\x1b[34m\x1b[44m' in actual # blue
assert '\x1b[31m\x1b[41m' in actual # red
assert emoji.unicode_codes.EMOJI_ALIAS_UNICODE[':water_wave:'] in actual
|
|
"""
Tests for django test runner
"""
from __future__ import absolute_import
from optparse import make_option
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django import db
from django.test import simple, TransactionTestCase, skipUnlessDBFeature
from django.test.simple import DjangoTestSuiteRunner, get_tests
from django.test.testcases import connections_support_transactions
from django.utils import unittest
from django.utils.importlib import import_module
from ..admin_scripts.tests import AdminScriptTestCase
from .models import Person
TEST_APP_OK = 'regressiontests.test_runner.valid_app.models'
TEST_APP_ERROR = 'regressiontests.test_runner.invalid_app.models'
class DependencyOrderingTests(unittest.TestCase):
def test_simple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['charlie'],
'bravo': ['charlie'],
}
ordered = simple.dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig,value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
def test_chained_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['bravo'],
'bravo': ['charlie'],
}
ordered = simple.dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig,value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
# Implied dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_multiple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
('s4', ('s4_db', ['delta'])),
]
dependencies = {
'alpha': ['bravo','delta'],
'bravo': ['charlie'],
'delta': ['charlie'],
}
ordered = simple.dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig,aliases in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertIn('s4', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s4'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s4'))
# Implicit dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_circular_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
]
dependencies = {
'bravo': ['alpha'],
'alpha': ['bravo'],
}
self.assertRaises(ImproperlyConfigured, simple.dependency_ordered, raw, dependencies=dependencies)
def test_own_alias_dependency(self):
raw = [
('s1', ('s1_db', ['alpha', 'bravo']))
]
dependencies = {
'alpha': ['bravo']
}
with self.assertRaises(ImproperlyConfigured):
simple.dependency_ordered(raw, dependencies=dependencies)
# reordering aliases shouldn't matter
raw = [
('s1', ('s1_db', ['bravo', 'alpha']))
]
with self.assertRaises(ImproperlyConfigured):
simple.dependency_ordered(raw, dependencies=dependencies)
class MockTestRunner(object):
invoked = False
def __init__(self, *args, **kwargs):
pass
def run_tests(self, test_labels, extra_tests=None, **kwargs):
MockTestRunner.invoked = True
class ManageCommandTests(unittest.TestCase):
def test_custom_test_runner(self):
call_command('test', 'sites',
testrunner='regressiontests.test_runner.tests.MockTestRunner')
self.assertTrue(MockTestRunner.invoked,
"The custom test runner has not been invoked")
class CustomOptionsTestRunner(simple.DjangoTestSuiteRunner):
option_list = (
make_option('--option_a','-a', action='store', dest='option_a', default='1'),
make_option('--option_b','-b', action='store', dest='option_b', default='2'),
make_option('--option_c','-c', action='store', dest='option_c', default='3'),
)
def __init__(self, verbosity=1, interactive=True, failfast=True, option_a=None, option_b=None, option_c=None, **kwargs):
super(CustomOptionsTestRunner, self).__init__(verbosity=verbosity, interactive=interactive,
failfast=failfast)
self.option_a = option_a
self.option_b = option_b
self.option_c = option_c
def run_tests(self, test_labels, extra_tests=None, **kwargs):
print("%s:%s:%s" % (self.option_a, self.option_b, self.option_c))
class CustomTestRunnerOptionsTests(AdminScriptTestCase):
def setUp(self):
settings = {
'TEST_RUNNER': '\'regressiontests.test_runner.tests.CustomOptionsTestRunner\'',
}
self.write_settings('settings.py', sdict=settings)
def tearDown(self):
self.remove_settings('settings.py')
def test_default_options(self):
args = ['test', '--settings=regressiontests.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:2:3')
def test_default_and_given_options(self):
args = ['test', '--settings=regressiontests.settings', '--option_b=foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_option_name_and_value_separated(self):
args = ['test', '--settings=regressiontests.settings', '--option_b', 'foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_all_options_given(self):
args = ['test', '--settings=regressiontests.settings', '--option_a=bar', '--option_b=foo', '--option_c=31337']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'bar:foo:31337')
class Ticket16885RegressionTests(unittest.TestCase):
def test_ticket_16885(self):
"""Features are also confirmed on mirrored databases."""
old_db_connections = db.connections
try:
db.connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
'slave': {
'ENGINE': 'django.db.backends.sqlite3',
'TEST_MIRROR': 'default',
},
})
slave = db.connections['slave']
self.assertEqual(slave.features.supports_transactions, None)
DjangoTestSuiteRunner(verbosity=0).setup_databases()
self.assertNotEqual(slave.features.supports_transactions, None)
finally:
db.connections = old_db_connections
class Ticket17477RegressionTests(AdminScriptTestCase):
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_ticket_17477(self):
"""'manage.py help test' works after r16352."""
args = ['help', 'test']
out, err = self.run_manage(args)
self.assertNoOutput(err)
class ModulesTestsPackages(unittest.TestCase):
def test_get_tests(self):
"Check that the get_tests helper function can find tests in a directory"
module = import_module(TEST_APP_OK)
tests = get_tests(module)
self.assertIsInstance(tests, type(module))
def test_import_error(self):
"Test for #12658 - Tests with ImportError's shouldn't fail silently"
module = import_module(TEST_APP_ERROR)
self.assertRaises(ImportError, get_tests, module)
class Sqlite3InMemoryTestDbs(unittest.TestCase):
@unittest.skipUnless(all(db.connections[conn].vendor == 'sqlite' for conn in db.connections),
"This is a sqlite-specific issue")
def test_transaction_support(self):
"""Ticket #16329: sqlite3 in-memory test databases"""
old_db_connections = db.connections
for option in ('NAME', 'TEST_NAME'):
try:
db.connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.sqlite3',
option: ':memory:',
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
option: ':memory:',
},
})
other = db.connections['other']
self.assertIsNone(other.features.supports_transactions)
DjangoTestSuiteRunner(verbosity=0).setup_databases()
msg = "DATABASES setting '%s' option set to sqlite3's ':memory:' value shouldn't interfere with transaction support detection." % option
# Transaction support should be properly initialised for the 'other' DB
self.assertIsNotNone(other.features.supports_transactions, msg)
# And all the DBs should report that they support transactions
self.assertTrue(connections_support_transactions(), msg)
finally:
db.connections = old_db_connections
class AutoIncrementResetTest(TransactionTestCase):
"""
Here we test creating the same model two times in different test methods,
and check that both times they get "1" as their PK value. That is, we test
that AutoField values start from 1 for each transactional test case.
"""
@skipUnlessDBFeature('supports_sequence_reset')
def test_autoincrement_reset1(self):
p = Person.objects.create(first_name='Jack', last_name='Smith')
self.assertEqual(p.pk, 1)
@skipUnlessDBFeature('supports_sequence_reset')
def test_autoincrement_reset2(self):
p = Person.objects.create(first_name='Jack', last_name='Smith')
self.assertEqual(p.pk, 1)
|
|
"""
- /*
- * Copyright 2008 Google Inc.
- * Copyright (C) 2009 Luke Kenneth Casson Leighton <[email protected]>
- * Copyright (C) 2010 Rich Newpol <[email protected]>
- *
- * Licensed under the Apache License, Version 2.0 (the "License") you may not
- * use this file except in compliance with the License. You may obtain a copy
- * of the License at
- *
- * http:#www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
"""
from __pyjamas__ import console
from pyjamas import DOM
from pyjamas import Window
from pyjamas import DeferredCommand
from pyjamas.EventController import EventGenerator
from pyjamas.ui import GlassWidget
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.AbsolutePanel import AbsolutePanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.MouseListener import MouseHandler, fireMouseEvent
from pyjamas.ui import Event
class SplitPanelSplitter(SimplePanel, MouseHandler):
""" a splitter is just a SimplePanel which can receive mouse events """
elem_props = SimplePanel.elem_props + [
("height", "Cell Height", "CellHeight", str, None),
("width", "Cell Width", "CellWidth", str, None),
("halign", "Cell Horizontal Alignment",
"CellHorizontalAlignment", None, "left"),
("valign", "Cell Vertical Alignment",
"CellVerticalAlignment", None, "top"),
]
def _getElementProps(self):
return self.elem_props
def __init__(self, splitPanel, **kwargs):
# keep a ref to our parent panel for event callback
self._splitpanel = splitPanel
SimplePanel.__init__(self, **kwargs)
MouseHandler.__init__(self)
self.addMouseListener(self)
# set some constant styles
elem = self.getElement()
# the following allows splitter to be small enough in IE
DOM.setStyleAttribute(elem, "overflow", "hidden")
def onMouseDown(self, sender, x, y):
""" catch a mouse down for parent """
ev = DOM.eventGetCurrentEvent()
# ignore right-button downs
if DOM.eventGetButton(ev) != Event.BUTTON_LEFT:
return
DOM.eventPreventDefault(DOM.eventGetCurrentEvent())
# parent will capture the mouse and handle the dragging from here
self._splitpanel.startSplitterDrag(x, y)
class SplitPanel(AbsolutePanel, MouseHandler, EventGenerator):
""" Provides the SplitPanel baseclass functionality
A SplitPanel is an AbsolutePanel containing an HTMLTable
with three cells. The first cell holds the first ScrollPanel,
while the center cell holds a Splitter, and the last cell
holds the other ScrollPanel.
"""
def __init__(self, vertical=False, **kwargs):
# set defaults
if not 'StyleName' in kwargs:
if vertical: # vertical split panel
kwargs['StyleName'] = "gwt-VerticalSplitPanel"
else:
kwargs['StyleName'] = "gwt-HorizontalSplitPanel"
# splitter drag state vars
self._drag_start = None
self._pos = "50%"
# orientation
self._vertical = vertical
# now init the bases
AbsolutePanel.__init__(self, **kwargs)
MouseHandler.__init__(self)
# add our event support?
self.addListenedEvent("Resize")
# create the top/left widget container
self._container1 = ScrollPanel()
# create the bottom/right widget container
self._container2 = ScrollPanel()
# create the splitter
self._splitter = SplitPanelSplitter(self)
# add splitter handling
self._splitter.addMouseListener(self)
# add mouse event handling
self.addMouseListener(self)
# add the parts
AbsolutePanel.add(self, self._container1, 0, 0)
AbsolutePanel.add(self, self._splitter, 0, 0)
AbsolutePanel.add(self, self._container2, 0, 0)
# set the layout
if vertical: # vertical split panel
self._splitter.setStyleName("vsplitter")
self._splitter.setWidth("100%")
self._container1.setWidth("100%")
self._container2.setWidth("100%")
# set drag cursor
DOM.setStyleAttribute(self._splitter.getElement(),
"cursor", "n-resize")
else: # horizontal split panel
self._splitter.setStyleName("hsplitter")
self._splitter.setHeight("100%")
self._container1.setHeight("100%")
self._container2.setHeight("100%")
# set drag cursor
DOM.setStyleAttribute(self._splitter.getElement(),
"cursor", "e-resize")
def onAttach(self):
AbsolutePanel.onAttach(self)
self.setSplitPosition()
# fixup the container 2 size and position
def _finalizePositions(self, pos=None):
finalized = False
if self._vertical:
if pos is None:
pos = self._container1.getOffsetHeight()
space = self.getOffsetHeight()
sz = self._splitter.getOffsetHeight()
if space > 0 and sz > 0 and pos > 0:
# limit pos
if pos > space - sz:
pos = space - sz
self._container1.setHeight(pos)
self.setWidgetPosition(self._splitter, 0, pos)
self.setWidgetPosition(self._container2, 0, pos + sz)
self._container2.setHeight(space - (pos + sz))
finalized = True
else:
if pos is None:
pos = self._container1.getOffsetWidth()
space = self.getOffsetWidth()
sz = self._splitter.getOffsetWidth()
if space > 0 and sz > 0 and pos > 0:
# limit pos
if pos > space - sz:
pos = space - sz
self._container1.setWidth(pos)
self.setWidgetPosition(self._splitter, pos, 0)
self.setWidgetPosition(self._container2, pos + sz, 0)
self._container2.setWidth(space - (pos + sz))
finalized = True
if finalized:
self.dispatchResizeEvent(self, pos)
return finalized
# end a drag operation
def _stopDragging(self):
if self._drag_start is not None:
# we are no longer dragging
self._drag_start = None
# deactivate the transparent overlay
GlassWidget.hide()
# don't let a mouse-up become a click event
DOM.eventCancelBubble(DOM.eventGetCurrentEvent(), True)
def _isDragging(self):
return self._drag_start is not None
# start a drag operation (called by splitter)
def startSplitterDrag(self, x, y):
if self._drag_start is None:
# remember where on the slider we are dragging
if self._vertical:
self._drag_start = y
else:
self._drag_start = x
# activate the transparent overlay to keep mouse events flowing to
# the splitter (and to us) even if the mouse leaves the splitter
GlassWidget.show(self)
# add handlers for mouse events to support dragging the slider
# NOTE: the x,y positioni s relative to the splitter
def onMouseMove(self, sender, x, y):
# if dragging, then use current mouse position
# to reset splitter position
if not self._isDragging():
return
# remove the offset into the splitter
# where we started dragging
if self._vertical:
self._pos = y - self._drag_start
else:
self._pos = x - self._drag_start
# apply limit
if self._pos < 1:
self._pos = 1
# apply new position
self.setSplitPosition()
def onMouseUp(self, sender, x, y):
ev = DOM.eventGetCurrentEvent()
# ignore right-button ups
if DOM.eventGetButton(ev) != Event.BUTTON_LEFT:
return
DOM.eventPreventDefault(ev)
# if we are dragging
if self._isDragging():
# stop dragging on mouse up
self._stopDragging()
# called when we start dragging
def onMouseGlassEnter(self, sender):
pass
# called when we drag out of the window
# (NOT called when we just stop dragging)
def onMouseGlassLeave(self, sender):
# we left the window, so stop dragging
self._stopDragging()
#
# Start the inherited 'public' API
#
# specify splitter position in pix OR percentage
# if pixels (number) specified, we can make change now
# otherwise, we have to set the offset as specified, then
# 'fixup' the remaining space after rendering
def setSplitPosition(self, pos=None):
if pos is not None:
# remember last pos set
self._pos = pos
else:
pos = self._pos
if pos < 1:
pos = 1
self._pos = pos
# change adjustable dimension
if self._vertical:
self._container1.setHeight(pos)
else:
self._container1.setWidth(pos)
# if pix are given, we can try to finalize the positions
finalized = False
if isinstance(pos, int):
finalized = self._finalizePositions(pos)
# if needed, queue callback to finalize
if not finalized:
DeferredCommand.add(self._finalizePositions)
def getWidget(self, index):
if index == 0:
return self._container1.getWidget()
return self._container2.getWidget()
def setWidget(self, index, widget):
if index == 0:
return self._container1.setWidget(widget)
return self._container2.setWidget(widget)
# Adds a widget to a pane
def add(self, widget):
if self.getWidget(0) == None:
self.setWidget(0, widget)
elif self.getWidget(1) == None:
self.setWidget(1, widget)
else:
console.error("SimplePanel can only contain one child widget")
# Removes a child widget.
def remove(self, widget):
if self.getWidget(0) == widget:
self._container1.remove(widget)
elif self.getWidget(1) == widget:
self._container2.remove(widget)
else:
AbsolutePanel.remove(self, widget)
# Gets the content element for the given index.
def getElement(self, index=None):
if index is None:
return AbsolutePanel.getElement(self)
return self.getWidget(index).getElement()
# Gets the widget in the pane at end of the line direction for the layout
def getEndOfLineWidget(self):
return self.getWidget(1)
# Gets the element that is acting as the splitter.
def getSplitElement(self):
return self._splitter.getElement()
# Gets the widget in the pane at the start of line direction for the layout
def getStartOfLineWidget(self):
return self.getWidget(0)
# Indicates whether the split panel is being resized.
def isResizing(self):
return False
# Sets the widget in the pane at the end of line direction for the layout
def setEndOfLineWidget(self, widget):
self.setWidget(1, widget)
def setStartOfLineWidget(self, widget):
self.setWidget(0, widget)
|
|
import unittest
import weakref
import copy
from pyorm.column import Column
from pyorm.expression import Expression, Equation
from pyorm.token import *
class MockOwner(object):
def __hash__(self):
return 1
class ColumnTestCase(unittest.TestCase):
def test_path(self):
"""
Test to verify that a new column creates a new
path chain based on the attributes accessed.
"""
col = Column.test.column.chain
self.assertEqual(col._path, ['test', 'column', 'chain'])
def test_alias(self):
"""
Tests that an alias can be applied, and re-applied
"""
col = Column.test.field
col.set_alias('fish')
self.assertEqual(col._alias, 'fish')
self.assertEqual(col.get_alias(), 'fish')
col.set_alias('cheese')
self.assertEqual(col._alias, 'cheese')
self.assertEqual(col.get_alias(), 'cheese')
def test_scope(self):
"""
Test setting scope both on Column initialization and
after column initialization.
"""
col = Column(scope='parent').test.field
self.assertEqual(col._scope, 'parent')
self.assertEqual(col.get_scope(), 'parent')
col.set_scope('normal')
self.assertEqual(col._scope, 'normal')
self.assertEqual(col.get_scope(), 'normal')
def test_owner(self):
"""
Verify that the owner is saved as a weak reference proxy
to the original object.
"""
col = Column.test.field
mock_owner = MockOwner()
col.owner = mock_owner
self.assertEqual(col._owner, weakref.proxy(mock_owner))
self.assertEqual(col.owner, weakref.proxy(mock_owner))
self.assertEqual(id(col._owner_ref()), id(mock_owner))
def test_copy(self):
"""
Tests the creation of a copy
"""
mock_owner = MockOwner()
col = Column(scope='parent').test.field
col.set_alias('cheese')
col.owner = mock_owner
col_copy = copy.copy(col)
self.assertNotEqual(id(col), id(col_copy))
self.assertEqual(id(col._owner), id(col_copy._owner))
def test_deepcopy(self):
"""
Tests the creation of a deep copy (which is the same as
a copy for column objects, due to the use of weakref.proxy()
for the owner).
"""
mock_owner = MockOwner()
col = Column(scope='parent').test.field
col.set_alias('cheese')
col.owner = mock_owner
col_copy = copy.copy(col)
self.assertNotEqual(id(col), id(col_copy))
self.assertEqual(id(col._owner), id(col_copy._owner))
def test_hash(self):
"""
Checks to see if the has of two objects with the same options
hash to the same value, and that two column objects that do
not share all of the same objects hash to different values
"""
mock_owner = MockOwner()
col = Column(scope='parent').test.field
col.set_alias('fish')
col.owner = mock_owner
mock_owner2 = MockOwner()
col2 = Column(scope='parent').test.field
col2.set_alias('fish')
col2.owner = mock_owner2
self.assertEqual(hash(col), hash(col2))
mock_owner3 = MockOwner()
col3 = Column(scope='parent').test.field
col3.set_alias('cheese')
col3.owner = mock_owner
self.assertNotEqual(hash(col), hash(col3))
def test_and(self):
exp = Column.test & 1
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_AND)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
def test_rand(self):
exp = 1 & Column.test
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_AND)
self.assertEqual(exp._tokens[-1].type, T_COL)
self.assertEqual(exp._tokens[0].type, T_LIT)
def test_or(self):
exp = Column.test | 1
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_OR)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
def test_ror(self):
exp = 1 | Column.test
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_OR)
self.assertEqual(exp._tokens[-1].type, T_COL)
self.assertEqual(exp._tokens[0].type, T_LIT)
def test_add(self):
exp = Column.test + 1
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_ADD)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
def test_radd(self):
exp = 1 + Column.test
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_ADD)
self.assertEqual(exp._tokens[-1].type, T_COL)
self.assertEqual(exp._tokens[0].type, T_LIT)
def test_sub(self):
exp = Column.test - 1
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_SUB)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
def test_rsub(self):
exp = 1 - Column.test
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_SUB)
self.assertEqual(exp._tokens[-1].type, T_COL)
self.assertEqual(exp._tokens[0].type, T_LIT)
def test_mul(self):
exp = Column.test * 1
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_MUL)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
def test_rmul(self):
exp = 1 * Column.test
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_MUL)
self.assertEqual(exp._tokens[-1].type, T_COL)
self.assertEqual(exp._tokens[0].type, T_LIT)
def test_div(self):
exp = Column.test / 1
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_DIV)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
def test_rdiv(self):
exp = 1 / Column.test
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_DIV)
self.assertEqual(exp._tokens[-1].type, T_COL)
self.assertEqual(exp._tokens[0].type, T_LIT)
def test_mod(self):
exp = Column.test % 1
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_MOD)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
def test_rmod(self):
exp = 1 % Column.test
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_MOD)
self.assertEqual(exp._tokens[-1].type, T_COL)
self.assertEqual(exp._tokens[0].type, T_LIT)
def test_pow(self):
exp = Column.test ** 1
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_POW)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
def test_rpow(self):
exp = 1 ** Column.test
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_POW)
self.assertEqual(exp._tokens[-1].type, T_COL)
self.assertEqual(exp._tokens[0].type, T_LIT)
def test_ne(self):
exp = Column.test != 1
self.assertEqual(type(exp), Equation)
self.assertEqual(exp.op, OP_NE)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
exp = Column.test != None
self.assertEqual(type(exp), Equation)
self.assertEqual(exp.op, OP_NULLNE)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
exp = Column.test != Column.fish
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_NE)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_COL)
def test_ne(self):
exp = Column.test == 1
self.assertEqual(type(exp), Equation)
self.assertEqual(exp.op, OP_EQ)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
exp = Column.test == None
self.assertEqual(type(exp), Equation)
self.assertEqual(exp.op, OP_NULLEQ)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
exp = Column.test == Column.fish
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_EQ)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_COL)
def test_lt(self):
exp = Column.test < 1
self.assertEqual(type(exp), Equation)
self.assertEqual(exp.op, OP_LT)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
exp = Column.test < Column.fish
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_LT)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_COL)
def test_le(self):
exp = Column.test <= 1
self.assertEqual(type(exp), Equation)
self.assertEqual(exp.op, OP_LE)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
exp = Column.test <= Column.fish
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_LE)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_COL)
def test_ge(self):
exp = Column.test >= 1
self.assertEqual(type(exp), Equation)
self.assertEqual(exp.op, OP_GE)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
exp = Column.test >= Column.fish
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_GE)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_COL)
def test_gt(self):
exp = Column.test > 1
self.assertEqual(type(exp), Equation)
self.assertEqual(exp.op, OP_GT)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_LIT)
exp = Column.test > Column.fish
self.assertEqual(type(exp), Expression)
self.assertEqual(exp.op, OP_GT)
self.assertEqual(exp._tokens[0].type, T_COL)
self.assertEqual(exp._tokens[-1].type, T_COL)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.common import exceptions as exc
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.extensions.flavor import (FLAVOR_NETWORK, FLAVOR_ROUTER)
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.metaplugin.common import config # noqa
from neutron.plugins.metaplugin import meta_db_v2
from neutron.plugins.metaplugin.meta_models_v2 import (NetworkFlavor,
RouterFlavor)
LOG = logging.getLogger(__name__)
# Metaplugin Exceptions
class FlavorNotFound(exc.NotFound):
message = _("Flavor %(flavor)s could not be found")
class FaildToAddFlavorBinding(exc.NeutronException):
message = _("Failed to add flavor binding")
class MetaPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin):
def __init__(self, configfile=None):
super(MetaPluginV2, self).__init__()
LOG.debug(_("Start initializing metaplugin"))
self.supported_extension_aliases = ['flavor', 'external-net']
if cfg.CONF.META.supported_extension_aliases:
cfg_aliases = cfg.CONF.META.supported_extension_aliases.split(',')
self.supported_extension_aliases += cfg_aliases
# Ignore config option overapping
def _is_opt_registered(opts, opt):
if opt.dest in opts:
return True
else:
return False
cfg._is_opt_registered = _is_opt_registered
# Keep existing tables if multiple plugin use same table name.
db.model_base.NeutronBase.__table_args__ = {'keep_existing': True}
self.plugins = {}
plugin_list = [plugin_set.split(':')
for plugin_set
in cfg.CONF.META.plugin_list.split(',')]
for flavor, plugin_provider in plugin_list:
self.plugins[flavor] = self._load_plugin(plugin_provider)
self.l3_plugins = {}
if cfg.CONF.META.l3_plugin_list:
l3_plugin_list = [plugin_set.split(':')
for plugin_set
in cfg.CONF.META.l3_plugin_list.split(',')]
for flavor, plugin_provider in l3_plugin_list:
if flavor in self.plugins:
self.l3_plugins[flavor] = self.plugins[flavor]
else:
# For l3 only plugin
self.l3_plugins[flavor] = self._load_plugin(
plugin_provider)
self.default_flavor = cfg.CONF.META.default_flavor
if self.default_flavor not in self.plugins:
raise exc.Invalid(_('default_flavor %s is not plugin list') %
self.default_flavor)
if self.l3_plugins:
self.default_l3_flavor = cfg.CONF.META.default_l3_flavor
if self.default_l3_flavor not in self.l3_plugins:
raise exc.Invalid(_('default_l3_flavor %s is not plugin list')
% self.default_l3_flavor)
self.supported_extension_aliases += ['router', 'ext-gw-mode',
'extraroute']
self.extension_map = {}
if not cfg.CONF.META.extension_map == '':
extension_list = [method_set.split(':')
for method_set
in cfg.CONF.META.extension_map.split(',')]
for method_name, flavor in extension_list:
self.extension_map[method_name] = flavor
def _load_plugin(self, plugin_provider):
LOG.debug(_("Plugin location: %s"), plugin_provider)
plugin_klass = importutils.import_class(plugin_provider)
return plugin_klass()
def _get_plugin(self, flavor):
if flavor not in self.plugins:
raise FlavorNotFound(flavor=flavor)
return self.plugins[flavor]
def _get_l3_plugin(self, flavor):
if flavor not in self.l3_plugins:
raise FlavorNotFound(flavor=flavor)
return self.l3_plugins[flavor]
def __getattr__(self, key):
# At first, try to pickup extension command from extension_map
if key in self.extension_map:
flavor = self.extension_map[key]
plugin = self._get_plugin(flavor)
if plugin and hasattr(plugin, key):
return getattr(plugin, key)
# Second, try to match extension method in order of plugin list
for flavor, plugin in self.plugins.items():
if hasattr(plugin, key):
return getattr(plugin, key)
# if no plugin support the method, then raise
raise AttributeError
def _extend_network_dict(self, context, network):
flavor = self._get_flavor_by_network_id(context, network['id'])
network[FLAVOR_NETWORK] = flavor
def create_network(self, context, network):
n = network['network']
flavor = n.get(FLAVOR_NETWORK)
if str(flavor) not in self.plugins:
flavor = self.default_flavor
plugin = self._get_plugin(flavor)
with context.session.begin(subtransactions=True):
net = plugin.create_network(context, network)
LOG.debug(_("Created network: %(net_id)s with flavor "
"%(flavor)s"), {'net_id': net['id'], 'flavor': flavor})
try:
meta_db_v2.add_network_flavor_binding(context.session,
flavor, str(net['id']))
except Exception:
LOG.exception(_('Failed to add flavor bindings'))
plugin.delete_network(context, net['id'])
raise FaildToAddFlavorBinding()
LOG.debug(_("Created network: %s"), net['id'])
self._extend_network_dict(context, net)
return net
def update_network(self, context, id, network):
flavor = meta_db_v2.get_flavor_by_network(context.session, id)
plugin = self._get_plugin(flavor)
return plugin.update_network(context, id, network)
def delete_network(self, context, id):
flavor = meta_db_v2.get_flavor_by_network(context.session, id)
plugin = self._get_plugin(flavor)
return plugin.delete_network(context, id)
def get_network(self, context, id, fields=None):
flavor = meta_db_v2.get_flavor_by_network(context.session, id)
plugin = self._get_plugin(flavor)
net = plugin.get_network(context, id, fields)
net['id'] = id
if not fields or FLAVOR_NETWORK in fields:
self._extend_network_dict(context, net)
if fields and 'id' not in fields:
del net['id']
return net
def get_networks_with_flavor(self, context, filters=None,
fields=None):
collection = self._model_query(context, models_v2.Network)
model = NetworkFlavor
collection = collection.join(model,
models_v2.Network.id == model.network_id)
if filters:
for key, value in filters.iteritems():
if key == FLAVOR_NETWORK:
column = NetworkFlavor.flavor
else:
column = getattr(models_v2.Network, key, None)
if column:
collection = collection.filter(column.in_(value))
return [self._make_network_dict(c, fields) for c in collection]
def get_networks(self, context, filters=None, fields=None):
nets = self.get_networks_with_flavor(context, filters, None)
if filters:
nets = self._filter_nets_l3(context, nets, filters)
nets = [self.get_network(context, net['id'], fields)
for net in nets]
return nets
def _get_flavor_by_network_id(self, context, network_id):
return meta_db_v2.get_flavor_by_network(context.session, network_id)
def _get_flavor_by_router_id(self, context, router_id):
return meta_db_v2.get_flavor_by_router(context.session, router_id)
def _get_plugin_by_network_id(self, context, network_id):
flavor = self._get_flavor_by_network_id(context, network_id)
return self._get_plugin(flavor)
def create_port(self, context, port):
p = port['port']
if 'network_id' not in p:
raise exc.NotFound
plugin = self._get_plugin_by_network_id(context, p['network_id'])
return plugin.create_port(context, port)
def update_port(self, context, id, port):
port_in_db = self.get_port(context, id)
plugin = self._get_plugin_by_network_id(context,
port_in_db['network_id'])
return plugin.update_port(context, id, port)
def delete_port(self, context, id, l3_port_check=True):
port_in_db = self.get_port(context, id)
plugin = self._get_plugin_by_network_id(context,
port_in_db['network_id'])
return plugin.delete_port(context, id, l3_port_check)
def create_subnet(self, context, subnet):
s = subnet['subnet']
if 'network_id' not in s:
raise exc.NotFound
plugin = self._get_plugin_by_network_id(context,
s['network_id'])
return plugin.create_subnet(context, subnet)
def update_subnet(self, context, id, subnet):
s = self.get_subnet(context, id)
plugin = self._get_plugin_by_network_id(context,
s['network_id'])
return plugin.update_subnet(context, id, subnet)
def delete_subnet(self, context, id):
s = self.get_subnet(context, id)
plugin = self._get_plugin_by_network_id(context,
s['network_id'])
return plugin.delete_subnet(context, id)
def _extend_router_dict(self, context, router):
flavor = self._get_flavor_by_router_id(context, router['id'])
router[FLAVOR_ROUTER] = flavor
def create_router(self, context, router):
r = router['router']
flavor = r.get(FLAVOR_ROUTER)
if str(flavor) not in self.l3_plugins:
flavor = self.default_l3_flavor
plugin = self._get_l3_plugin(flavor)
with context.session.begin(subtransactions=True):
r_in_db = plugin.create_router(context, router)
LOG.debug(_("Created router: %(router_id)s with flavor "
"%(flavor)s"),
{'router_id': r_in_db['id'], 'flavor': flavor})
meta_db_v2.add_router_flavor_binding(context.session,
flavor, str(r_in_db['id']))
LOG.debug(_("Created router: %s"), r_in_db['id'])
self._extend_router_dict(context, r_in_db)
return r_in_db
def update_router(self, context, id, router):
flavor = meta_db_v2.get_flavor_by_router(context.session, id)
plugin = self._get_l3_plugin(flavor)
return plugin.update_router(context, id, router)
def delete_router(self, context, id):
flavor = meta_db_v2.get_flavor_by_router(context.session, id)
plugin = self._get_l3_plugin(flavor)
return plugin.delete_router(context, id)
def get_router(self, context, id, fields=None):
flavor = meta_db_v2.get_flavor_by_router(context.session, id)
plugin = self._get_l3_plugin(flavor)
router = plugin.get_router(context, id, fields)
if not fields or FLAVOR_ROUTER in fields:
self._extend_router_dict(context, router)
return router
def get_routers_with_flavor(self, context, filters=None,
fields=None):
collection = self._model_query(context, l3_db.Router)
r_model = RouterFlavor
collection = collection.join(r_model,
l3_db.Router.id == r_model.router_id)
if filters:
for key, value in filters.iteritems():
if key == FLAVOR_ROUTER:
column = RouterFlavor.flavor
else:
column = getattr(l3_db.Router, key, None)
if column:
collection = collection.filter(column.in_(value))
return [self._make_router_dict(c, fields) for c in collection]
def get_routers(self, context, filters=None, fields=None):
routers = self.get_routers_with_flavor(context, filters,
None)
return [self.get_router(context, router['id'],
fields)
for router in routers]
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2018-2020 The Ion Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as SignatureHash().
This file is modified from python-bitcoinlib.
"""
from .mininode import CTransaction, CTxOut, sha256, hash256
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKSEQUENCEVERIFY,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
OPCODES_BY_NAME = {
'OP_0' : OP_0,
'OP_PUSHDATA1' : OP_PUSHDATA1,
'OP_PUSHDATA2' : OP_PUSHDATA2,
'OP_PUSHDATA4' : OP_PUSHDATA4,
'OP_1NEGATE' : OP_1NEGATE,
'OP_RESERVED' : OP_RESERVED,
'OP_1' : OP_1,
'OP_2' : OP_2,
'OP_3' : OP_3,
'OP_4' : OP_4,
'OP_5' : OP_5,
'OP_6' : OP_6,
'OP_7' : OP_7,
'OP_8' : OP_8,
'OP_9' : OP_9,
'OP_10' : OP_10,
'OP_11' : OP_11,
'OP_12' : OP_12,
'OP_13' : OP_13,
'OP_14' : OP_14,
'OP_15' : OP_15,
'OP_16' : OP_16,
'OP_NOP' : OP_NOP,
'OP_VER' : OP_VER,
'OP_IF' : OP_IF,
'OP_NOTIF' : OP_NOTIF,
'OP_VERIF' : OP_VERIF,
'OP_VERNOTIF' : OP_VERNOTIF,
'OP_ELSE' : OP_ELSE,
'OP_ENDIF' : OP_ENDIF,
'OP_VERIFY' : OP_VERIFY,
'OP_RETURN' : OP_RETURN,
'OP_TOALTSTACK' : OP_TOALTSTACK,
'OP_FROMALTSTACK' : OP_FROMALTSTACK,
'OP_2DROP' : OP_2DROP,
'OP_2DUP' : OP_2DUP,
'OP_3DUP' : OP_3DUP,
'OP_2OVER' : OP_2OVER,
'OP_2ROT' : OP_2ROT,
'OP_2SWAP' : OP_2SWAP,
'OP_IFDUP' : OP_IFDUP,
'OP_DEPTH' : OP_DEPTH,
'OP_DROP' : OP_DROP,
'OP_DUP' : OP_DUP,
'OP_NIP' : OP_NIP,
'OP_OVER' : OP_OVER,
'OP_PICK' : OP_PICK,
'OP_ROLL' : OP_ROLL,
'OP_ROT' : OP_ROT,
'OP_SWAP' : OP_SWAP,
'OP_TUCK' : OP_TUCK,
'OP_CAT' : OP_CAT,
'OP_SUBSTR' : OP_SUBSTR,
'OP_LEFT' : OP_LEFT,
'OP_RIGHT' : OP_RIGHT,
'OP_SIZE' : OP_SIZE,
'OP_INVERT' : OP_INVERT,
'OP_AND' : OP_AND,
'OP_OR' : OP_OR,
'OP_XOR' : OP_XOR,
'OP_EQUAL' : OP_EQUAL,
'OP_EQUALVERIFY' : OP_EQUALVERIFY,
'OP_RESERVED1' : OP_RESERVED1,
'OP_RESERVED2' : OP_RESERVED2,
'OP_1ADD' : OP_1ADD,
'OP_1SUB' : OP_1SUB,
'OP_2MUL' : OP_2MUL,
'OP_2DIV' : OP_2DIV,
'OP_NEGATE' : OP_NEGATE,
'OP_ABS' : OP_ABS,
'OP_NOT' : OP_NOT,
'OP_0NOTEQUAL' : OP_0NOTEQUAL,
'OP_ADD' : OP_ADD,
'OP_SUB' : OP_SUB,
'OP_MUL' : OP_MUL,
'OP_DIV' : OP_DIV,
'OP_MOD' : OP_MOD,
'OP_LSHIFT' : OP_LSHIFT,
'OP_RSHIFT' : OP_RSHIFT,
'OP_BOOLAND' : OP_BOOLAND,
'OP_BOOLOR' : OP_BOOLOR,
'OP_NUMEQUAL' : OP_NUMEQUAL,
'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
'OP_LESSTHAN' : OP_LESSTHAN,
'OP_GREATERTHAN' : OP_GREATERTHAN,
'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
'OP_MIN' : OP_MIN,
'OP_MAX' : OP_MAX,
'OP_WITHIN' : OP_WITHIN,
'OP_RIPEMD160' : OP_RIPEMD160,
'OP_SHA1' : OP_SHA1,
'OP_SHA256' : OP_SHA256,
'OP_HASH160' : OP_HASH160,
'OP_HASH256' : OP_HASH256,
'OP_CODESEPARATOR' : OP_CODESEPARATOR,
'OP_CHECKSIG' : OP_CHECKSIG,
'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
'OP_NOP1' : OP_NOP1,
'OP_CHECKLOCKTIMEVERIFY' : OP_CHECKLOCKTIMEVERIFY,
'OP_CHECKSEQUENCEVERIFY' : OP_CHECKSEQUENCEVERIFY,
'OP_NOP4' : OP_NOP4,
'OP_NOP5' : OP_NOP5,
'OP_NOP6' : OP_NOP6,
'OP_NOP7' : OP_NOP7,
'OP_NOP8' : OP_NOP8,
'OP_NOP9' : OP_NOP9,
'OP_NOP10' : OP_NOP10,
'OP_SMALLINTEGER' : OP_SMALLINTEGER,
'OP_PUBKEYS' : OP_PUBKEYS,
'OP_PUBKEYHASH' : OP_PUBKEYHASH,
'OP_PUBKEY' : OP_PUBKEY,
}
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum(object):
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return b"x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
|
|
"""
Unit test for treadmill.appmgr.run
"""
# Disable C0302: Too many lines in module.
# pylint: disable=C0302
import os
import pwd
import shutil
import socket
import stat
import tempfile
import unittest
from collections import namedtuple
# Disable W0611: Unused import
import tests.treadmill_test_deps # pylint: disable=W0611
import mock
import treadmill
from treadmill import appmgr
from treadmill import firewall
from treadmill import utils
from treadmill import fs
from treadmill.appmgr import run as app_run
class AppMgrRunTest(unittest.TestCase):
"""Tests for teadmill.appmgr."""
def setUp(self):
# Access protected module _base_service
# pylint: disable=W0212
self.root = tempfile.mkdtemp()
self.app_env = mock.Mock(
root=self.root,
host_ip='172.31.81.67',
svc_cgroup=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
svc_localdisk=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
svc_network=mock.Mock(
spec_set=treadmill.services._base_service.ResourceService,
),
rules=mock.Mock(
spec_set=treadmill.rulefile.RuleMgr,
),
)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.fs.chroot_init', mock.Mock())
@mock.patch('treadmill.fs.create_filesystem', mock.Mock())
@mock.patch('treadmill.fs.test_filesystem', mock.Mock(return_value=False))
@mock.patch('treadmill.fs.make_rootfs', mock.Mock())
@mock.patch('treadmill.fs.mkdir_safe', mock.Mock())
@mock.patch('treadmill.fs.configure_plugins', mock.Mock())
@mock.patch('treadmill.fs.mount_bind', mock.Mock())
@mock.patch('treadmill.fs.mount_filesystem', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/test_treadmill'))
@mock.patch('shutil.copytree', mock.Mock())
@mock.patch('shutil.copyfile', mock.Mock())
def test__create_root_dir(self):
"""Test creation on the container root directory."""
# Access protected module _create_root_dir
# pylint: disable=W0212
app = utils.to_obj(
{
'proid': 'myproid',
'name': 'myproid.test#0',
'uniqueid': 'ID1234',
'environment': 'dev',
'disk': '100G',
}
)
app_unique_name = appmgr.app_unique_name(app)
container_dir = os.path.join(self.root, 'apps', app_unique_name)
mock_ld_client = self.app_env.svc_localdisk.make_client.return_value
localdisk = {
'block_dev': '/dev/foo',
}
mock_ld_client.wait.return_value = localdisk
treadmill.appmgr.run._create_root_dir(self.app_env,
container_dir,
'/some/root_dir',
app)
treadmill.fs.chroot_init.assert_called_with('/some/root_dir')
treadmill.fs.create_filesystem.assert_called_with('/dev/foo')
treadmill.fs.mount_filesystem('/dev/foo', '/some/root_dir')
treadmill.fs.make_rootfs.assert_called_with('/some/root_dir',
'myproid')
treadmill.fs.configure_plugins.assert_called_with(
self.root,
'/some/root_dir',
app
)
shutil.copytree.assert_called_with(
os.path.join(self.app_env.root, 'etc'),
'/some/root_dir/.etc'
)
shutil.copyfile.assert_called_with(
'/etc/hosts',
'/some/root_dir/.etc/hosts'
)
treadmill.subproc.check_call.assert_has_calls([
mock.call(
[
'mount', '-n', '--bind',
os.path.join(self.app_env.root, 'etc/resolv.conf'),
'/etc/resolv.conf'
]
)
])
@mock.patch('treadmill.appmgr.manifest.read', mock.Mock())
@mock.patch('treadmill.cgroups.join', mock.Mock())
def test_apply_cgroup_limits(self):
"""Test cgroup creation."""
manifest = {
'name': 'myproid.test#0',
'uniqueid': 'ID1234',
}
treadmill.appmgr.manifest.read.return_value = manifest
mock_cgroup_client = self.app_env.svc_cgroup.make_client.return_value
cgroups = {
'cpu': '/some/path',
'cpuacct': '/some/other/path',
'memory': '/mem/path',
'blkio': '/io/path',
}
mock_cgroup_client.wait.return_value = cgroups
app_dir = os.path.join(self.root, 'apps', 'myproid.test#0')
os.makedirs(app_dir)
app_run.apply_cgroup_limits(
self.app_env,
app_dir,
)
self.app_env.svc_cgroup.make_client.assert_called_with(
os.path.join(app_dir, 'cgroups')
)
mock_cgroup_client.wait.assert_called_with(
'myproid.test-0-0000000ID1234'
)
treadmill.cgroups.join.assert_has_calls(
[
mock.call(ss, path)
for ss, path in cgroups.items()
],
any_order=True
)
@mock.patch('treadmill.cgroups.makepath',
mock.Mock(return_value='/test/cgroup/path'))
@mock.patch('treadmill.fs.mkdir_safe', mock.Mock())
@mock.patch('treadmill.fs.mount_bind', mock.Mock())
def test__share_cgroup_info(self):
"""Test sharing of cgroup information with the container."""
# Access protected module _share_cgroup_info
# pylint: disable=W0212
app = utils.to_obj(
{
'name': 'myproid.test#0',
'uniqueid': 'ID1234',
}
)
treadmill.appmgr.run._share_cgroup_info(app, '/some/root_dir')
# Check that cgroup mountpoint exists inside the container.
treadmill.fs.mkdir_safe.assert_has_calls([
mock.call('/some/root_dir/cgroup/memory')
])
treadmill.fs.mount_bind.assert_has_calls([
mock.call('/some/root_dir', '/cgroup/memory', '/test/cgroup/path')
])
@mock.patch('pwd.getpwnam', mock.Mock(
return_value=namedtuple(
'pwnam',
['pw_uid', 'pw_dir', 'pw_shell']
)(3, '/', '/bin/sh')))
@mock.patch('treadmill.fs.mkdir_safe', mock.Mock())
@mock.patch('treadmill.fs.mount_bind', mock.Mock())
@mock.patch('treadmill.supervisor.create_service', mock.Mock())
@mock.patch('treadmill.utils.create_script', mock.Mock())
@mock.patch('treadmill.utils.touch', mock.Mock())
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/test_treadmill'))
def test__create_supervision_tree(self):
"""Test creation of the supervision tree."""
# pylint: disable=W0212
treadmill.subproc.EXECUTABLES = {
'chroot': '/bin/ls',
'pid1': '/bin/ls',
}
# Access protected module _create_supervision_tree
# pylint: disable=W0212
app = utils.to_obj(
{
'proid': 'myproid',
'name': 'myproid.test#0',
'uniqueid': 'ID1234',
'environment': 'prod',
'services': [
{
'name': 'command1',
'command': '/path/to/command',
'restart': {
'limit': 3,
'interval': 60,
},
}, {
'name': 'command2',
'command': '/path/to/other/command',
'restart': {
'limit': 3,
'interval': 60,
},
}
],
'system_services': [
{
'name': 'command3',
'command': '/path/to/sbin/command',
'restart': {
'limit': 5,
'interval': 60,
},
}, {
'name': 'command4',
'command': '/path/to/other/sbin/command',
'restart': {
'limit': 5,
'interval': 60,
},
}
],
'vring': {
'cells': ['a', 'b']
},
}
)
base_dir = '/some/dir'
events_dir = '/some/dir/appevents'
treadmill.appmgr.run._create_supervision_tree(
base_dir,
events_dir,
app,
)
treadmill.fs.mkdir_safe.assert_has_calls([
mock.call('/some/dir/root/services'),
mock.call('/some/dir/services'),
mock.call('/some/dir/services/command1/log'),
mock.call('/some/dir/services/command2/log'),
mock.call('/some/dir/services/command3/log'),
mock.call('/some/dir/services/command4/log'),
mock.call('/some/dir/sys/vring.a'),
mock.call('/some/dir/sys/vring.a/log'),
mock.call('/some/dir/sys/vring.b'),
mock.call('/some/dir/sys/vring.b/log'),
mock.call('/some/dir/sys/monitor'),
mock.call('/some/dir/sys/monitor/log'),
mock.call('/some/dir/sys/register'),
mock.call('/some/dir/sys/register/log'),
mock.call('/some/dir/sys/start_container'),
mock.call('/some/dir/sys/start_container/log'),
])
treadmill.fs.mount_bind.assert_called_with(
'/some/dir/root', '/services', '/some/dir/services',
)
pwd.getpwnam.assert_has_calls(
[
mock.call('myproid'),
mock.call('root')
],
any_order=True
)
treadmill.supervisor.create_service.assert_has_calls([
# user services
mock.call('/some/dir/services',
'myproid',
mock.ANY, mock.ANY,
'command1',
'/path/to/command',
as_root=True,
down=True,
envdirs=['/environ/app', '/environ/sys'],
env='prod'),
mock.call('/some/dir/services',
'myproid',
mock.ANY, mock.ANY,
'command2',
'/path/to/other/command',
as_root=True,
down=True,
envdirs=['/environ/app', '/environ/sys'],
env='prod'),
# system services
mock.call('/some/dir/services',
'root',
mock.ANY, mock.ANY,
'command3',
'/path/to/sbin/command',
as_root=True,
down=False,
envdirs=['/environ/sys'],
env='prod'),
mock.call('/some/dir/services',
'root',
mock.ANY, mock.ANY,
'command4',
'/path/to/other/sbin/command',
as_root=True,
down=False,
envdirs=['/environ/sys'],
env='prod')
])
treadmill.utils.create_script.assert_has_calls([
mock.call('/some/dir/services/command1/log/run', 'logger.run'),
mock.call('/some/dir/services/command2/log/run', 'logger.run'),
mock.call('/some/dir/services/command3/log/run', 'logger.run'),
mock.call('/some/dir/services/command4/log/run', 'logger.run'),
mock.call('/some/dir/sys/vring.a/run',
'supervisor.run_sys',
cmd=mock.ANY),
mock.call('/some/dir/sys/vring.a/log/run',
'logger.run'),
mock.call('/some/dir/sys/vring.b/run',
'supervisor.run_sys',
cmd=mock.ANY),
mock.call('/some/dir/sys/vring.b/log/run',
'logger.run'),
mock.call('/some/dir/sys/monitor/run',
'supervisor.run_sys',
cmd=mock.ANY),
mock.call('/some/dir/sys/monitor/log/run',
'logger.run'),
mock.call('/some/dir/sys/register/run',
'supervisor.run_sys',
cmd=mock.ANY),
mock.call('/some/dir/sys/register/log/run',
'logger.run'),
mock.call(
'/some/dir/sys/start_container/run',
'supervisor.run_sys',
cmd=('/bin/ls /some/dir/root /bin/ls '
'-m -p -i s6-svscan /services')
),
mock.call('/some/dir/sys/start_container/log/run',
'logger.run'),
])
treadmill.utils.touch.assert_has_calls([
mock.call('/some/dir/sys/start_container/down'),
])
@mock.patch('socket.socket', mock.Mock(autospec=True))
@mock.patch('treadmill.appmgr.run._allocate_sockets', mock.Mock())
def test__allocate_network_ports(self):
"""Test network port allocation.
"""
# access protected module _allocate_network_ports
# pylint: disable=w0212
treadmill.appmgr.run._allocate_sockets.side_effect = \
lambda _x, _y, _z, count: [socket.socket()] * count
mock_socket = socket.socket.return_value
mock_socket.getsockname.side_effect = [
('unused', 50001),
('unused', 60001),
('unused', 10000),
('unused', 10001),
('unused', 10002),
('unused', 12345),
('unused', 54321),
]
manifest = {
'environment': 'dev',
'endpoints': [
{
'name': 'http',
'port': 8000,
'proto': 'tcp',
}, {
'name': 'ssh',
'port': 0,
'proto': 'tcp',
}, {
'name': 'dns',
'port': 5353,
'proto': 'udp',
}, {
'name': 'port0',
'port': 0,
'proto': 'udp',
}
],
'ephemeral_ports': 3,
}
treadmill.appmgr.run._allocate_network_ports(
'1.2.3.4',
manifest
)
# in the updated manifest, make sure that real_port is specificed from
# the ephemeral range as returnd by getsockname.
self.assertEquals(8000,
manifest['endpoints'][0]['port'])
self.assertEquals(50001,
manifest['endpoints'][0]['real_port'])
self.assertEquals(60001,
manifest['endpoints'][1]['port'])
self.assertEquals(60001,
manifest['endpoints'][1]['real_port'])
self.assertEquals(5353,
manifest['endpoints'][2]['port'])
self.assertEquals(12345,
manifest['endpoints'][2]['real_port'])
self.assertEquals(54321,
manifest['endpoints'][3]['port'])
self.assertEquals(54321,
manifest['endpoints'][3]['real_port'])
self.assertEquals([10000, 10001, 10002],
manifest['ephemeral_ports'])
@mock.patch('treadmill.iptables.add_ip_set', mock.Mock())
@mock.patch('treadmill.newnet.create_newnet', mock.Mock())
def test__unshare_network_simple(self):
"""Tests unshare network sequence.
"""
# Access protected module _create_supervision_tree
# pylint: disable=W0212
app = utils.to_obj(
{
'name': 'proid.test#0',
'uniqueid': 'ID1234',
'environment': 'dev',
'network': {
'veth': 'id1234.0',
'vip': '192.168.1.1',
'gateway': '192.168.254.254',
},
'host_ip': '172.31.81.67',
'shared_ip': True,
'ephemeral_ports': [],
'endpoints': [
{
'real_port': '5007',
'proto': 'tcp',
'port': '22',
'type': 'infra'
},
{
'real_port': '5013',
'proto': 'udp',
'port': '12345'
}
],
}
)
app_unique_name = appmgr.app_unique_name(app)
appmgr.run._unshare_network(self.app_env, app)
treadmill.iptables.add_ip_set.assert_has_calls([
mock.call(treadmill.iptables.SET_INFRA_SVC,
'192.168.1.1,tcp:22'),
])
self.app_env.rules.create_rule.assert_has_calls(
[
mock.call(rule=firewall.DNATRule('tcp',
'172.31.81.67', '5007',
'192.168.1.1', '22'),
owner=app_unique_name),
mock.call(rule=firewall.DNATRule('udp',
'172.31.81.67', '5013',
'192.168.1.1', '12345'),
owner=app_unique_name)
],
any_order=True
)
treadmill.newnet.create_newnet.assert_called_with(
'id1234.0',
'192.168.1.1',
'192.168.254.254',
'172.31.81.67',
)
@mock.patch('socket.gethostbyname', mock.Mock())
@mock.patch('treadmill.iptables.add_ip_set', mock.Mock())
@mock.patch('treadmill.newnet.create_newnet', mock.Mock())
def test__unshare_network_complex(self):
"""Test unshare network advanced sequence (ephemeral/passthrough)."""
# Access protected module _create_supervision_tree
# pylint: disable=W0212
app = utils.to_obj(
{
'name': 'myproid.test#0',
'environment': 'dev',
'uniqueid': 'ID1234',
'network': {
'veth': 'id1234.0',
'vip': '192.168.0.2',
'gateway': '192.168.254.254'
},
'shared_ip': False,
'endpoints': [
{
'name': 'ssh',
'port': 54321,
'real_port': 54321,
'type': 'infra',
'proto': 'tcp',
},
{
'name': 'test2',
'port': 54322,
'real_port': 54322,
'proto': 'udp',
}
],
'ephemeral_ports': [
10000,
10001,
10002,
],
'passthrough': [
'xxx',
'yyy',
'zzz',
],
}
)
app_unique_name = appmgr.app_unique_name(app)
hosts_to_ip = {
'xxx': '4.4.4.4',
'yyy': '5.5.5.5',
'zzz': '5.5.5.5',
}
socket.gethostbyname.side_effect = lambda h: hosts_to_ip[h]
self.app_env.rules.get_rules.return_value = set()
treadmill.appmgr.run._unshare_network(
self.app_env,
app
)
self.app_env.rules.create_rule.assert_has_calls(
[
mock.call(rule=firewall.DNATRule('tcp',
'172.31.81.67', 54321,
'192.168.0.2', 54321),
owner=app_unique_name),
mock.call(rule=firewall.DNATRule('udp',
'172.31.81.67', 54322,
'192.168.0.2', 54322),
owner=app_unique_name),
mock.call(rule=firewall.DNATRule('tcp',
'172.31.81.67', 10000,
'192.168.0.2', 10000),
owner=app_unique_name),
mock.call(rule=firewall.DNATRule('tcp',
'172.31.81.67', 10001,
'192.168.0.2', 10001),
owner=app_unique_name),
mock.call(rule=firewall.DNATRule('tcp',
'172.31.81.67', 10002,
'192.168.0.2', 10002),
owner=app_unique_name),
mock.call(rule=firewall.PassThroughRule('4.4.4.4',
'192.168.0.2'),
owner=app_unique_name),
mock.call(rule=firewall.PassThroughRule('5.5.5.5',
'192.168.0.2'),
owner=app_unique_name),
],
any_order=True
)
# Check that infra services + ephemeral ports are in the same set.
treadmill.iptables.add_ip_set.assert_has_calls([
mock.call(treadmill.iptables.SET_INFRA_SVC,
'192.168.0.2,tcp:54321'),
mock.call(treadmill.iptables.SET_INFRA_SVC,
'192.168.0.2,tcp:10000'),
mock.call(treadmill.iptables.SET_INFRA_SVC,
'192.168.0.2,tcp:10001'),
mock.call(treadmill.iptables.SET_INFRA_SVC,
'192.168.0.2,tcp:10002'),
])
treadmill.newnet.create_newnet.assert_called_with(
'id1234.0',
'192.168.0.2',
'192.168.254.254',
None,
)
@mock.patch('shutil.copy', mock.Mock())
@mock.patch('treadmill.appmgr.manifest.read', mock.Mock())
@mock.patch('treadmill.appmgr.run._allocate_network_ports', mock.Mock())
@mock.patch('treadmill.appmgr.run._create_environ_dir', mock.Mock())
@mock.patch('treadmill.appmgr.run._create_root_dir', mock.Mock())
@mock.patch('treadmill.appmgr.run._create_supervision_tree', mock.Mock())
@mock.patch('treadmill.appmgr.run._prepare_ldpreload', mock.Mock())
@mock.patch('treadmill.appmgr.run._share_cgroup_info', mock.Mock())
@mock.patch('treadmill.appmgr.run._unshare_network', mock.Mock())
@mock.patch('treadmill.fs.mount_bind', mock.Mock())
@mock.patch('treadmill.supervisor.exec_root_supervisor', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/treadmill'))
def test_run(self):
"""Tests appmgr.run sequence, which will result in supervisor exec.
"""
# access protected module _allocate_network_ports
# pylint: disable=w0212
manifest = {
'shared_network': False,
'ephemeral_ports': 3,
'passthrough': [
'xxx',
'yyy',
'zzz'
],
'memory': '100M',
'host_ip': '172.31.81.67',
'uniqueid': 'ID1234',
'services': [
{
'name': 'web_server',
'command': '/bin/true',
'restart': {
'limit': 3,
'interval': 60,
},
}
],
'disk': '100G',
'tickets': True,
'name': 'proid.myapp#0',
'system_services': [],
'environment': 'dev',
'proid': 'foo',
'endpoints': [
{
'name': 'http',
'port': 8000
},
{
'name': 'port0',
'port': 0
},
{
'type': 'infra',
'name': 'ssh',
'port': 0
}
],
'cpu': '100%'
}
treadmill.appmgr.manifest.read.return_value = manifest
app_unique_name = 'proid.myapp-0-0000000ID1234'
app_dir = os.path.join(self.root, 'apps', app_unique_name)
os.makedirs(app_dir)
mock_nwrk_client = self.app_env.svc_network.make_client.return_value
network = {
'vip': '2.2.2.2',
'gateway': '1.1.1.1',
'veth': 'testveth.0',
}
mock_nwrk_client.wait.return_value = network
def _fake_allocate_network_ports(_ip, manifest):
"""Mimick inplace manifest modification in _allocate_network_ports.
"""
manifest['ephemeral_ports'] = ['1', '2', '3']
return mock.DEFAULT
treadmill.appmgr.run._allocate_network_ports.side_effect = \
_fake_allocate_network_ports
mock_watchdog = mock.Mock()
treadmill.subproc.EXECUTABLES['treadmill_bind_preload.so'] = (
'/some/$LIB/treadmill_bind_preload.so')
app_run.run(
self.app_env, app_dir, mock_watchdog, terminated=()
)
# Check that port allocation is correctly called.
# XXX(boysson): potential mock bug: assert_call expects the vip since
# manifest is modified in place even though the vip are
# allocated *after*.
manifest['vip'] = {
'ip0': '1.1.1.1',
'ip1': '2.2.2.2',
}
manifest['network'] = network
manifest['ephemeral_ports'] = ['1', '2', '3']
treadmill.appmgr.run._allocate_network_ports.assert_called_with(
'172.31.81.67', manifest,
)
# Make sure, post modification, that the manifest is readable by other.
st = os.stat(os.path.join(app_dir, 'state.yml'))
self.assertTrue(st.st_mode & stat.S_IRUSR)
self.assertTrue(st.st_mode & stat.S_IRGRP)
self.assertTrue(st.st_mode & stat.S_IROTH)
self.assertTrue(st.st_mode & stat.S_IWUSR)
self.assertFalse(st.st_mode & stat.S_IWGRP)
self.assertFalse(st.st_mode & stat.S_IWOTH)
# State yml is what is copied in the container
shutil.copy.assert_called_with(
os.path.join(app_dir, 'state.yml'),
os.path.join(app_dir, 'root', 'app.yml'),
)
# Network unshare
app = utils.to_obj(manifest)
treadmill.appmgr.run._unshare_network.assert_called_with(
self.app_env, app
)
# Create root dir
treadmill.appmgr.run._create_root_dir.assert_called_with(
self.app_env,
app_dir,
os.path.join(app_dir, 'root'),
app,
)
# XXX(boysson): Missing environ_dir/manifest_dir tests
# Create supervision tree
treadmill.appmgr.run._create_supervision_tree.assert_called_with(
app_dir,
self.app_env.app_events_dir,
app
)
treadmill.appmgr.run._share_cgroup_info.assert_called_with(
app,
os.path.join(app_dir, 'root'),
)
# Ephemeral LDPRELOAD
treadmill.appmgr.run._prepare_ldpreload.assert_called_with(
os.path.join(app_dir, 'root'),
['/some/$LIB/treadmill_bind_preload.so']
)
# Misc bind mounts
treadmill.fs.mount_bind.assert_has_calls([
mock.call(
os.path.join(app_dir, 'root'),
'/etc/resolv.conf',
bind_opt='--bind',
target=os.path.join(app_dir, 'root/.etc/resolv.conf')
),
mock.call(
os.path.join(app_dir, 'root'),
'/etc/hosts',
bind_opt='--bind',
target=os.path.join(app_dir, 'root/.etc/hosts')
),
mock.call(
os.path.join(app_dir, 'root'),
'/etc/ld.so.preload',
bind_opt='--bind',
target=os.path.join(app_dir, 'root/.etc/ld.so.preload')
),
mock.call(
os.path.join(app_dir, 'root'),
'/etc/pam.d/sshd',
bind_opt='--bind',
target=os.path.join(app_dir, 'root/.etc/pam.d/sshd')
),
])
self.assertTrue(mock_watchdog.remove.called)
@mock.patch('shutil.copy', mock.Mock())
@mock.patch('treadmill.appmgr.manifest.read', mock.Mock())
@mock.patch('treadmill.appmgr.run._allocate_network_ports', mock.Mock())
@mock.patch('treadmill.appmgr.run._create_environ_dir', mock.Mock())
@mock.patch('treadmill.appmgr.run._create_root_dir', mock.Mock())
@mock.patch('treadmill.appmgr.run._create_supervision_tree', mock.Mock())
@mock.patch('treadmill.appmgr.run._prepare_ldpreload', mock.Mock())
@mock.patch('treadmill.appmgr.run._share_cgroup_info', mock.Mock())
@mock.patch('treadmill.appmgr.run._unshare_network', mock.Mock())
@mock.patch('treadmill.fs.configure_plugins', mock.Mock())
@mock.patch('treadmill.fs.mount_bind', mock.Mock())
@mock.patch('treadmill.supervisor.exec_root_supervisor', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/treadmill'))
def test_run_no_ephemeral(self):
"""Tests appmgr.run without ephemeral ports in manifest."""
# Modify app manifest so that it does not contain ephemeral ports,
# make sure that .etc/ld.so.preload is not created.
# access protected module _allocate_network_ports
# pylint: disable=w0212
manifest = {
'shared_network': False,
'disk': '100G',
'name': 'proid.myapp#0',
'passthrough': [
'xxx',
'yyy',
'zzz'
],
'memory': '100M',
'host_ip': '172.31.81.67',
'system_services': [],
'environment': 'dev',
'uniqueid': 'ID1234',
'proid': 'foo',
'services': [
{
'name': 'web_server',
'command': '/bin/true',
'restart': {
'limit': 3,
'interval': 60,
},
}
],
'endpoints': [
{
'name': 'http',
'port': 8000
},
{
'name': 'port0',
'port': 0
},
{
'type': 'infra',
'name': 'ssh',
'port': 0
}
],
'cpu': '100%'
}
treadmill.appmgr.manifest.read.return_value = manifest
app_dir = os.path.join(self.root, 'apps', 'proid.myapp#0')
os.makedirs(app_dir)
mock_nwrk_client = self.app_env.svc_network.make_client.return_value
network = {
'vip': '2.2.2.2',
'gateway': '1.1.1.1',
'veth': 'testveth.0',
}
mock_nwrk_client.wait.return_value = network
rootdir = os.path.join(app_dir, 'root')
def _fake_allocate_network_ports(_ip, manifest):
"""Mimick inplace manifest modification in _allocate_network_ports.
"""
manifest['ephemeral_ports'] = []
return mock.DEFAULT
treadmill.appmgr.run._allocate_network_ports.side_effect = \
_fake_allocate_network_ports
mock_watchdog = mock.Mock()
app_run.run(
self.app_env, app_dir, mock_watchdog, terminated=()
)
self.assertFalse(
os.path.exists(
os.path.join(rootdir, '.etc/ld.so.preload')
)
)
self.assertTrue(mock_watchdog.remove.called)
@mock.patch('shutil.copy', mock.Mock())
@mock.patch('treadmill.appmgr.manifest.read', mock.Mock())
@mock.patch('treadmill.appmgr.run._allocate_network_ports', mock.Mock())
@mock.patch('treadmill.appmgr.run._create_environ_dir', mock.Mock())
@mock.patch('treadmill.appmgr.run._create_root_dir', mock.Mock())
@mock.patch('treadmill.appmgr.run._create_supervision_tree', mock.Mock())
@mock.patch('treadmill.appmgr.run._prepare_ldpreload', mock.Mock())
@mock.patch('treadmill.appmgr.run._share_cgroup_info', mock.Mock())
@mock.patch('treadmill.appmgr.run._unshare_network', mock.Mock())
@mock.patch('treadmill.fs.configure_plugins', mock.Mock())
@mock.patch('treadmill.fs.mount_bind', mock.Mock())
@mock.patch('treadmill.supervisor.exec_root_supervisor', mock.Mock())
@mock.patch('treadmill.subproc.check_call', mock.Mock())
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/treadmill'))
def test_run_ticket_failure(self):
"""Tests appmgr.run sequence, which will result in supervisor exec.
"""
# access protected module _allocate_network_ports
# pylint: disable=w0212
manifest = {
'shared_network': False,
'disk': '100G',
'name': 'proid.myapp#0',
'memory': '100M',
'environment': 'dev',
'uniqueid': 'ID1234',
'proid': 'foo',
'services': [
{
'name': 'web_server',
'command': '/bin/true',
'restart': {
'limit': 3,
'interval': 60,
},
}
],
'endpoints': [
{
'name': 'http',
'port': 8000
},
{
'name': 'port0',
'port': 0
}
],
'cpu': '100%'
}
treadmill.appmgr.manifest.read.return_value = manifest
app_dir = os.path.join(self.root, 'apps', 'proid.myapp#0')
os.makedirs(app_dir)
mock_nwrk_client = self.app_env.svc_network.make_client.return_value
network = {
'vip': '2.2.2.2',
'gateway': '1.1.1.1',
'veth': 'testveth.0',
}
mock_nwrk_client.wait.return_value = network
def _fake_allocate_network_ports(_ip, manifest):
"""Mimick inplace manifest modification in _allocate_network_ports.
"""
manifest['ephemeral_ports'] = []
return mock.DEFAULT
treadmill.appmgr.run._allocate_network_ports.side_effect = \
_fake_allocate_network_ports
# Make sure that despite ticket absence there is no throw.
mock_watchdog = mock.Mock()
app_run.run(
self.app_env, app_dir, mock_watchdog, terminated=()
)
self.assertTrue(mock_watchdog.remove.called)
def test__prepare_ldpreload(self):
"""Test generation of the etc/ldpreload file."""
# access protected module _prepare_ldpreload
# pylint: disable=w0212
appmgr.run._prepare_ldpreload(self.root, ['/foo/1.so', '/foo/2.so'])
newfile = open(os.path.join(self.root,
'.etc', 'ld.so.preload')).readlines()
self.assertEquals('/foo/2.so\n', newfile[-1])
self.assertEquals('/foo/1.so\n', newfile[-2])
@mock.patch('pwd.getpwnam', mock.Mock(
return_value=namedtuple(
'pwnam',
['pw_uid', 'pw_dir', 'pw_shell']
)(3, '/', '/bin/sh')))
@mock.patch('treadmill.fs.mount_bind', mock.Mock())
@mock.patch('treadmill.utils.rootdir', mock.Mock(return_value='/some/dir'))
@mock.patch('treadmill.subproc.resolve', mock.Mock(return_value=''))
def test_sysdir_cleanslate(self):
"""Verifies that sys directories are always clean slate."""
# Disable access to protected member warning.
#
# pylint: disable=W0212
base_dir = os.path.join(self.root, 'some/dir')
events_dir = os.path.join(base_dir, 'appevents')
fs.mkdir_safe(base_dir)
app = utils.to_obj(
{
'proid': 'myproid',
'name': 'myproid.test#0',
'uniqueid': 'ID1234',
'environment': 'prod',
'services': [
{
'name': 'command1',
'command': '/path/to/command',
'restart': {
'limit': 3,
'interval': 60,
},
}, {
'name': 'command2',
'command': '/path/to/other/command',
'restart': {
'limit': 3,
'interval': 60,
},
}
],
'system_services': [
{
'name': 'command3',
'command': '/path/to/sbin/command',
'restart': {
'limit': 3,
'interval': 60,
},
}, {
'name': 'command4',
'command': '/path/to/other/sbin/command',
'restart': {
'limit': 3,
'interval': 60,
},
}
],
'vring': {
'cells': [],
},
}
)
treadmill.appmgr.run._create_supervision_tree(
base_dir,
events_dir,
app,
)
self.assertTrue(os.path.exists(os.path.join(base_dir, 'sys')))
with open(os.path.join(base_dir, 'sys', 'toberemoved'), 'w+') as _f:
pass
self.assertTrue(
os.path.exists(os.path.join(base_dir, 'sys', 'toberemoved')))
treadmill.appmgr.run._create_supervision_tree(
base_dir,
events_dir,
app,
)
self.assertTrue(os.path.exists(os.path.join(base_dir, 'sys')))
self.assertFalse(
os.path.exists(os.path.join(base_dir, 'sys', 'toberemoved')))
if __name__ == '__main__':
unittest.main()
|
|
from collections import defaultdict
import logging
import networkx
from .pathprioritizer import PathPrioritizer
from .errors import AngrAnnotatedCFGError, AngrExitError
from .analyses.cfg.cfg_node import CFGNode
l = logging.getLogger("angr.annocfg")
class AnnotatedCFG(object):
"""
AnnotatedCFG is a control flow graph with statement whitelists and exit whitelists to describe a slice of the
program.
"""
def __init__(self, project, cfg=None, detect_loops=False):
"""
Constructor.
:param project: The angr Project instance
:param cfg: Control flow graph. Only used when path prioritizer is used.
:param detect_loops: Only used when path prioritizer is used.
"""
self._project = project
self._cfg = None
self._target = None
self._run_statement_whitelist = defaultdict(list)
self._exit_taken = defaultdict(list)
self._addr_to_run = {}
self._addr_to_last_stmt_id = {}
self._loops = []
self._path_merge_points = [ ]
self._path_prioritizer = None
if cfg is not None:
self._cfg = cfg
#if target_irsb_addr is not None:
# self._path_prioritizer = PathPrioritizer(self._cfg, self._target)
if self._cfg is not None:
for run in self._cfg.nodes():
self._addr_to_run[self.get_addr(run)] = run
#
# Public methods
#
def from_digraph(self, digraph):
"""
Initialize this AnnotatedCFG object with a networkx.DiGraph consisting of the following
form of nodes:
Tuples like (block address, statement ID)
Those nodes are connected by edges indicating the execution flow.
:param digraph: A networkx.DiGraph object
"""
for n1 in digraph.nodes_iter():
addr1, stmt_idx1 = n1
self.add_statements_to_whitelist(addr1, (stmt_idx1,))
successors = digraph[n1]
for n2 in successors:
addr2, stmt_idx2 = n2
if addr1 != addr2:
# There is a control flow transition from block `addr1` to block `addr2`
self.add_exit_to_whitelist(addr1, addr2)
self.add_statements_to_whitelist(addr2, (stmt_idx2,))
def get_addr(self, run):
if isinstance(run, CFGNode):
return run.addr
elif type(run) in (int, long):
return run
else:
raise AngrAnnotatedCFGError("Unknown type '%s' of the 'run' argument" % type(run))
def add_block_to_whitelist(self, block):
addr = self.get_addr(block)
self._run_statement_whitelist[addr] = True
def add_statements_to_whitelist(self, block, stmt_ids):
addr = self.get_addr(block)
if type(stmt_ids) is bool:
if type(self._run_statement_whitelist[addr]) is list and self._run_statement_whitelist[addr]:
raise Exception("WTF")
self._run_statement_whitelist[addr] = stmt_ids
elif -1 in stmt_ids:
self._run_statement_whitelist[addr] = True
else:
self._run_statement_whitelist[addr].extend(stmt_ids)
self._run_statement_whitelist[addr] = \
sorted(list(set(self._run_statement_whitelist[addr])))
def add_exit_to_whitelist(self, run_from, run_to):
addr_from = self.get_addr(run_from)
addr_to = self.get_addr(run_to)
self._exit_taken[addr_from].append(addr_to)
def set_last_statement(self, block_addr, stmt_id):
self._addr_to_last_stmt_id[block_addr] = stmt_id
def add_loop(self, loop_tuple):
"""
A loop tuple contains a series of IRSB addresses that form a loop. Ideally
it always starts with the first IRSB that we meet during the execution.
"""
self._loops.append(loop_tuple)
def set_path_merge_points(self, points):
self._path_merge_points = points.copy()
def should_take_exit(self, addr_from, addr_to):
if addr_from in self._exit_taken:
return addr_to in self._exit_taken[addr_from]
return False
def should_execute_statement(self, addr, stmt_id):
if self._run_statement_whitelist is None:
return True
elif addr in self._run_statement_whitelist:
r = self._run_statement_whitelist[addr]
if isinstance(r, bool):
return r
else:
return stmt_id in self._run_statement_whitelist[addr]
return False
def get_run(self, addr):
if addr in self._addr_to_run:
return self._addr_to_run[addr]
return None
def get_whitelisted_statements(self, addr):
"""
:returns: True if all statements are whitelisted
"""
if addr in self._run_statement_whitelist:
if self._run_statement_whitelist[addr] is True:
return None # This is the default value used to say
# we execute all statements in this basic block. A
# little weird...
else:
return self._run_statement_whitelist[addr]
else:
return []
def get_last_statement_index(self, addr):
if addr in self._exit_taken:
return None
if addr in self._addr_to_last_stmt_id:
return self._addr_to_last_stmt_id[addr]
elif addr in self._run_statement_whitelist:
return max(self._run_statement_whitelist[addr])
return None
def get_loops(self):
return self._loops
def get_targets(self, source_addr):
if source_addr in self._exit_taken:
return self._exit_taken[source_addr]
return None
#
# Debugging helpers
#
def dbg_repr(self):
ret_str = ""
ret_str += "IRSBs:\n"
for addr, run in self._addr_to_run.items():
if addr is None:
continue
ret_str += "%#x => %s\n" % (addr, run)
l.debug("statements: ")
for addr, stmts in self._run_statement_whitelist.items():
if addr is None:
continue
ret_str += "Address 0x%08x:\n" % addr
l.debug(stmts)
l.debug("Loops: ")
for loop in self._loops:
s = ""
for addr in loop:
s += "0x%08x -> " % addr
ret_str += s + "\n"
return ret_str
def dbg_print_irsb(self, irsb_addr, project=None):
"""
Pretty-print an IRSB with whitelist information
"""
if project is None:
project = self._project
if project is None:
raise Exception("Dict addr_to_run is empty. " + \
"Give me a project, and I'll recreate the IRSBs for you.")
else:
vex_block = project.factory.block(irsb_addr).vex
statements = vex_block.statements
whitelist = self.get_whitelisted_statements(irsb_addr)
for i in range(0, len(statements)):
if whitelist is True or i in whitelist:
line = "+"
else:
line = "-"
line += "[% 3d] " % i
# We cannot get data returned by pp(). WTF?
print line,
statements[i].pp()
#
# Helper methods for path priorization
#
def keep_path(self, path):
"""
Given a path, returns True if the path should be kept, False if it should be cut.
"""
if len(path.addr_trace) < 2:
return True
return self.should_take_exit(path.addr_trace[-2], path.addr_trace[-1])
def filter_path(self, path):
"""
Used for debugging.
:param path: A Path instance
:return: True/False
"""
return True
def merge_points(self, path):
addr = path.addr
if addr in self._path_merge_points:
return {self._path_merge_points[addr]}
else:
return set()
def path_priority(self, path):
"""
Given a path, returns the path priority. A lower number means a higher priority.
"""
return self._path_prioritizer.get_priority(path)
def successor_func(self, path):
"""
Callback routine that takes in a path, and returns all feasible successors to path group. This callback routine
should be passed to the keyword argument "successor_func" of PathGroup.step().
:param path: A Path instance.
:return: A list of all feasible Path successors.
"""
whitelist = self.get_whitelisted_statements(path.addr)
last_stmt = self.get_last_statement_index(path.addr)
# pass in those arguments
successors = path.step(
stmt_whitelist=whitelist,
last_stmt=None
)
# further filter successors based on the annotated CFG
taken_successors = [ ]
for suc in successors:
try:
taken = self.should_take_exit(path.addr, suc.addr)
except AngrExitError:
l.debug("Got an unknown exit that AnnotatedCFG does not know about: %#x -> %#x", path.addr, suc.addr)
continue
if taken:
taken_successors.append(suc)
return taken_successors
#
# Overridden methods
#
def __getstate__(self):
state = {}
state['_run_statement_whitelist'] = self._run_statement_whitelist
state['_exit_taken'] = self._exit_taken
# state['_addr_to_run'] = self._addr_to_run
state['_addr_to_last_stmt_id'] = self._addr_to_last_stmt_id
state['_loops'] = self._loops
state['_path_merge_points'] = self._path_merge_points
state['_path_prioritizer'] = self._path_prioritizer
state['_cfg'] = None
state['_project'] = None
state['_addr_to_run'] = None
return state
#
# Private methods
#
def _detect_loops(self):
temp_graph = networkx.DiGraph()
for source, target_list in self._cfg._edge_map.items():
for target in target_list:
temp_graph.add_edge(source, target)
ctr = 0
for loop_lst in networkx.simple_cycles(temp_graph):
l.debug("A loop is found. %d", ctr)
ctr += 1
loop = (tuple([x[-1] for x in loop_lst]))
print " => ".join(["0x%08x" % x for x in loop])
self.add_loop(loop)
|
|
import os
import pytest
from django.core.cache import caches
from django.db.models import Q
from django.db.models.functions import Upper
from django.test import SimpleTestCase, TestCase, override_settings
from django_perf_rec import TestCaseMixin, get_perf_path, get_record_name, record
from tests.testapp.models import Author
from tests.utils import pretend_not_under_pytest, run_query, temporary_path
FILE_DIR = os.path.dirname(__file__)
class RecordTests(TestCase):
def test_single_db_query(self):
with record():
run_query("default", "SELECT 1337")
def test_single_db_query_model(self):
with record():
list(Author.objects.all())
@override_settings(PERF_REC={"HIDE_COLUMNS": False})
def test_single_db_query_model_with_columns(self):
with record():
list(Author.objects.all())
def test_multiple_db_queries(self):
with record():
run_query("default", "SELECT 1337")
run_query("default", "SELECT 4949")
def test_non_deterministic_QuerySet_annotate(self):
with record():
list(Author.objects.annotate(x=Upper("name"), y=Upper("name")))
def test_non_deterministic_QuerySet_extra(self):
with record():
list(Author.objects.extra(select={"x": "1", "y": "1"}))
def test_non_deterministic_Q_query(self):
with record():
list(Author.objects.filter(Q(name="foo", age=1)))
def test_single_cache_op(self):
with record():
caches["default"].get("foo")
def test_multiple_cache_ops(self):
with record():
caches["default"].set("foo", "bar")
caches["second"].get_many(["foo", "bar"])
caches["default"].delete("foo")
def test_multiple_calls_in_same_function_are_different_records(self):
with record():
caches["default"].get("foo")
with record():
caches["default"].get("bar")
def test_custom_name(self):
with record(record_name="custom"):
caches["default"].get("foo")
def test_custom_name_multiple_calls(self):
with record(record_name="custom"):
caches["default"].get("foo")
with pytest.raises(AssertionError) as excinfo:
with record(record_name="custom"):
caches["default"].get("bar")
assert "Performance record did not match" in str(excinfo.value)
def test_diff(self):
with pretend_not_under_pytest():
with record(record_name="test_diff"):
caches["default"].get("foo")
with pytest.raises(AssertionError) as excinfo:
with record(record_name="test_diff"):
caches["default"].get("bar")
msg = str(excinfo.value)
assert "- cache|get: foo\n" in msg
assert "+ cache|get: bar\n" in msg
def test_path_pointing_to_filename(self):
with temporary_path("custom.perf.yml"):
with record(path="custom.perf.yml"):
caches["default"].get("foo")
assert os.path.exists("custom.perf.yml")
def test_path_pointing_to_filename_record_twice(self):
with temporary_path("custom.perf.yml"):
with record(path="custom.perf.yml"):
caches["default"].get("foo")
with record(path="custom.perf.yml"):
caches["default"].get("foo")
def test_path_pointing_to_dir(self):
temp_dir = os.path.join(FILE_DIR, "perf_files/")
with temporary_path(temp_dir):
with record(path="perf_files/"):
caches["default"].get("foo")
full_path = os.path.join(FILE_DIR, "perf_files", "test_api.perf.yml")
assert os.path.exists(full_path)
def test_custom_nested_path(self):
temp_dir = os.path.join(FILE_DIR, "perf_files/")
with temporary_path(temp_dir):
with record(path="perf_files/api/"):
caches["default"].get("foo")
full_path = os.path.join(FILE_DIR, "perf_files", "api", "test_api.perf.yml")
assert os.path.exists(full_path)
@override_settings(PERF_REC={"MODE": "once"})
def test_mode_once(self):
temp_dir = os.path.join(FILE_DIR, "perf_files/")
with temporary_path(temp_dir):
with record(path="perf_files/api/"):
caches["default"].get("foo")
full_path = os.path.join(FILE_DIR, "perf_files", "api", "test_api.perf.yml")
assert os.path.exists(full_path)
@override_settings(PERF_REC={"MODE": "none"})
def test_mode_none(self):
temp_dir = os.path.join(FILE_DIR, "perf_files/")
with temporary_path(temp_dir):
with pytest.raises(AssertionError) as excinfo:
with record(path="perf_files/api/"):
caches["default"].get("foo")
assert "Original performance record does not exist" in str(excinfo.value)
full_path = os.path.join(FILE_DIR, "perf_files", "api", "test_api.perf.yml")
assert not os.path.exists(full_path)
@override_settings(PERF_REC={"MODE": "all"})
def test_mode_all(self):
temp_dir = os.path.join(FILE_DIR, "perf_files/")
with temporary_path(temp_dir):
with pytest.raises(AssertionError) as excinfo:
with record(path="perf_files/api/"):
caches["default"].get("foo")
assert "Original performance record did not exist" in str(excinfo.value)
full_path = os.path.join(FILE_DIR, "perf_files", "api", "test_api.perf.yml")
assert os.path.exists(full_path)
def test_delete_on_cascade_called_twice(self):
arthur = Author.objects.create(name="Arthur", age=42)
with record():
arthur.delete()
class GetPerfPathTests(SimpleTestCase):
def test_py_file(self):
assert get_perf_path("foo.py") == "foo.perf.yml"
def test_pyc_file(self):
assert get_perf_path("foo.pyc") == "foo.perf.yml"
def test_unknown_file(self):
assert get_perf_path("foo.plob") == "foo.plob.perf.yml"
class GetRecordNameTests(SimpleTestCase):
def test_class_and_test(self):
assert (
get_record_name(class_name="FooTests", test_name="test_bar")
== "FooTests.test_bar"
)
def test_just_test(self):
assert get_record_name(test_name="test_baz") == "test_baz"
def test_multiple_calls(self):
assert get_record_name(test_name="test_qux") == "test_qux"
assert get_record_name(test_name="test_qux") == "test_qux.2"
def test_multiple_calls_from_different_files(self):
assert get_record_name(test_name="test_qux", file_name="foo.py") == "test_qux"
assert get_record_name(test_name="test_qux", file_name="foo2.py") == "test_qux"
assert get_record_name(test_name="test_qux", file_name="foo.py") == "test_qux"
assert get_record_name(test_name="test_qux", file_name="foo.py") == "test_qux.2"
class TestCaseMixinTests(TestCaseMixin, TestCase):
def test_record_performance(self):
with self.record_performance():
caches["default"].get("foo")
def test_record_performance_record_name(self):
with self.record_performance(record_name="other"):
caches["default"].get("foo")
def test_record_performance_file_name(self):
perf_name = __file__.replace(".py", ".file_name.perf.yml")
with self.record_performance(path=perf_name):
caches["default"].get("foo")
|
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""This file contains the widget for displaying the properties panel for a
selected alias in the list
Classes defined in this file:
QAliasInspector
QAliasDetailsWidget
QValuesListEditor
QListEditDialog
QListEditItemDelegate
"""
from __future__ import division
import copy
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import pyqtSignal, pyqtSlot
from vistrails.core.mashup.alias import Alias
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.system import get_vistrails_basic_pkg_id
from vistrails.gui.modules.utils import get_widget_class
from vistrails.gui.modules.constant_configuration import StandardConstantWidget
from vistrails.gui.theme import CurrentTheme
from vistrails.gui.utils import show_warning
################################################################################
class QAliasInspector(QtGui.QScrollArea):
"""
QAliasInspector is a widget to display the details of an alias.
"""
#signals
aliasChanged = pyqtSignal(Alias)
def __init__(self, alias_list, parent=None):
QtGui.QScrollArea.__init__(self,parent)
self.setAcceptDrops(False)
self.setWidgetResizable(True)
self.vWidget = QAliasDetailsWidget(alias_list)
self.setWidget(self.vWidget)
#connecting signals
self.vWidget.aliasChanged.connect(self.aliasChanged)
def updateContents(self, alias_item=None, controller=None):
self.vWidget.updateContents(alias_item, controller)
################################################################################
class QAliasDetailsWidget(QtGui.QWidget):
#signals
aliasChanged = pyqtSignal(Alias)
def __init__(self, table, parent=None):
QtGui.QWidget.__init__(self,parent)
self.alias = None
self.table = table
self.createWidgets()
self.updateContents()
def createWidgets(self):
self.main_layout = QtGui.QVBoxLayout()
self.label = QtGui.QLabel("Alias Details")
self.main_layout.addWidget(self.label)
self.name_label = QtGui.QLabel("Name")
self.name_edit = QtGui.QLineEdit()
l1 = QtGui.QVBoxLayout()
l1.setContentsMargins(0, 11, 0, 0)
l1.setSpacing(3)
l1.addWidget(self.name_label)
l1.addWidget(self.name_edit)
self.order_label = QtGui.QLabel("Order")
self.order_spinbox = QtGui.QSpinBox()
self.order_spinbox.setRange(0,self.table.topLevelItemCount()-1)
if self.alias:
self.order_spinbox.setValue(self.alias.component.pos)
l2 = QtGui.QVBoxLayout()
l2.setContentsMargins(0, 11, 0, 0)
l2.setSpacing(3)
l2.addWidget(self.order_label)
l2.addWidget(self.order_spinbox)
l3 = QtGui.QHBoxLayout()
l3.addLayout(l1)
l3.addLayout(l2)
self.main_layout.addLayout(l3)
#Display Widget
self.dw_groupbox = QtGui.QGroupBox()
self.dw_groupbox.setFlat(True)
self.dw_label = QtGui.QLabel("Display Widget")
self.dw_combobox = QtGui.QComboBox()
self.dw_combobox.addItem("combobox")
self.dw_combobox.addItem("slider")
self.dw_combobox.addItem("numericstepper")
self.dw_layout = QtGui.QVBoxLayout()
self.dw_layout.setContentsMargins(0, 11, 0, 0)
self.dw_slider_layout = QtGui.QHBoxLayout()
self.dw_minval_label = QtGui.QLabel("Min Val")
self.dw_maxval_label = QtGui.QLabel("Max Val")
self.dw_stepsize_label = QtGui.QLabel("Step Size")
self.dw_minval_edit = QtGui.QLineEdit()
self.dw_maxval_edit = QtGui.QLineEdit()
self.dw_stepsize_edit = QtGui.QLineEdit()
l = QtGui.QVBoxLayout()
l.setMargin(0)
l.setSpacing(0)
l.addWidget(self.dw_minval_label)
l.addWidget(self.dw_minval_edit)
self.dw_slider_layout.addLayout(l)
l = QtGui.QVBoxLayout()
l.setMargin(0)
l.setSpacing(0)
l.addWidget(self.dw_maxval_label)
l.addWidget(self.dw_maxval_edit)
self.dw_slider_layout.addLayout(l)
l = QtGui.QVBoxLayout()
l.setMargin(0)
l.setSpacing(0)
l.addWidget(self.dw_stepsize_label)
l.addWidget(self.dw_stepsize_edit)
self.dw_slider_layout.addLayout(l)
self.dw_seq_toggle = QtGui.QCheckBox("Loop")
self.dw_seq_toggle.setToolTip("Enable option to loop through all steps")
self.dw_slider_layout.addWidget(self.dw_seq_toggle)
self.dw_layout.addWidget(self.dw_label)
self.dw_layout.addWidget(self.dw_combobox)
self.dw_layout.addLayout(self.dw_slider_layout)
self.dw_groupbox.setLayout(self.dw_layout)
self.toggle_dw_combobox(0)
#Default Value
self.dv_groupbox = QtGui.QGroupBox()
self.dv_groupbox.setFlat(True)
self.dv_label = QtGui.QLabel("Default Value")
self.dv_layout = QtGui.QVBoxLayout()
self.dv_layout.setContentsMargins(0, 11, 0, 0)
self.dv_layout.addWidget(self.dv_label)
self.dv_groupbox.setLayout(self.dv_layout)
self.dv_widget = None
#Values List
self.vl_groupbox = QtGui.QGroupBox()
self.vl_groupbox.setFlat(True)
self.vl_label = QtGui.QLabel("Values List")
self.vl_layout = QtGui.QVBoxLayout()
self.vl_layout.setContentsMargins(0, 11, 0, 0)
self.vl_layout.addWidget(self.vl_label)
self.vl_editor = None
self.vl_groupbox.setLayout(self.vl_layout)
self.main_layout.addWidget(self.dw_groupbox)
self.main_layout.addWidget(self.dv_groupbox)
self.main_layout.addWidget(self.vl_groupbox)
self.main_layout.addStretch(1)
self.deleteButton = QtGui.QPushButton("Delete Alias")
self.deleteButton.clicked.connect(self.table.removeCurrentAlias)
self.deleteButton.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed))
self.main_layout.addWidget(self.deleteButton)
self.setLayout(self.main_layout)
#connect signals
self.plugSignals()
def plugSignals(self):
self.dw_combobox.currentIndexChanged.connect(self.toggle_dw_combobox)
self.name_edit.editingFinished.connect(self.nameChanged)
self.order_spinbox.valueChanged.connect(self.orderChanged)
self.dw_minval_edit.editingFinished.connect(self.minvalChanged)
self.dw_stepsize_edit.editingFinished.connect(self.stepsizeChanged)
self.dw_seq_toggle.clicked.connect(self.seqToggled)
self.dw_maxval_edit.editingFinished.connect(self.maxvalChanged)
def unplugSignals(self):
self.dw_combobox.currentIndexChanged.disconnect(self.toggle_dw_combobox)
self.name_edit.editingFinished.disconnect(self.nameChanged)
self.order_spinbox.valueChanged.disconnect(self.orderChanged)
self.dw_minval_edit.editingFinished.disconnect(self.minvalChanged)
self.dw_stepsize_edit.editingFinished.disconnect(self.stepsizeChanged)
self.dw_maxval_edit.editingFinished.disconnect(self.maxvalChanged)
def valuesListChanged(self):
self.aliasChanged.emit(self.alias)
@pyqtSlot()
def minvalChanged(self):
if self.alias:
old_minval = self.alias.component.minVal
new_minval = str(self.dw_minval_edit.text())
if old_minval == new_minval:
return
self.alias.component.minVal = new_minval
self.aliasChanged.emit(self.alias)
@pyqtSlot()
def maxvalChanged(self):
if self.alias:
old_maxval = self.alias.component.maxVal
new_maxval = str(self.dw_maxval_edit.text())
if old_maxval == new_maxval:
return
self.alias.component.maxVal = new_maxval
self.aliasChanged.emit(self.alias)
@pyqtSlot()
def stepsizeChanged(self):
if self.alias:
old_stepsize = self.alias.component.stepSize
new_stepsize = str(self.dw_stepsize_edit.text())
if old_stepsize == new_stepsize:
return
self.alias.component.stepSize = new_stepsize
self.aliasChanged.emit(self.alias)
@pyqtSlot()
def seqToggled(self):
if self.alias:
old_seq = self.alias.component.seq
new_seq = self.dw_seq_toggle.isChecked()
if old_seq == new_seq:
return
self.alias.component.seq = new_seq
self.aliasChanged.emit(self.alias)
@pyqtSlot()
def nameChanged(self):
old_alias = self.alias.name
new_alias = str(self.name_edit.text())
if old_alias == new_alias:
return
if new_alias in self.table.aliases.keys():
show_warning("Mashup",
"Label name %s already exists. "
"Please type a different name." % new_alias)
self.name_edit.setText(old_alias)
self.name_edit.setFocus()
elif new_alias == '':
show_warning("Mashup",
"Variables with empty name are not allowed. "
"Please type a unique name.")
self.name_edit.setText(old_alias)
self.name_edit.setFocus()
else:
self.table.aliases[new_alias] = self.table.aliases[old_alias]
#self.table.alias_cache[new_alias] = self.table.alias_cache[old_alias]
del self.table.aliases[old_alias]
#del self.table.alias_cache[old_alias]
self.alias.name = new_alias
self.aliasChanged.emit(self.alias)
@pyqtSlot(int)
def orderChanged(self, neworder):
if self.alias.component.pos == neworder:
return
oldorder = self.alias.component.pos
self.alias.component.pos = neworder
self.table.moveItemToNewPos(oldorder, neworder)
@pyqtSlot(int)
def toggle_dw_combobox(self, index):
if index == 0:
self.show_dw_contents(False)
elif index in [1,2]:
self.show_dw_contents(True)
if self.alias and self.alias.component.type == "Integer":
self.set_int_validators()
elif self.alias and self.alias.component.type == "Float":
self.set_float_validators()
# show loop option for stepper
self.dw_seq_toggle.setVisible(index == 1)
if self.alias:
self.alias.component.widget = str(self.dw_combobox.currentText())
self.aliasChanged.emit(self.alias)
def set_int_validators(self):
validator = QtGui.QIntValidator(self)
self.dw_minval_edit.setValidator(validator)
self.dw_maxval_edit.setValidator(validator)
self.dw_stepsize_edit.setValidator(validator)
def set_float_validators(self):
validator = QtGui.QDoubleValidator(self)
self.dw_minval_edit.setValidator(validator)
self.dw_maxval_edit.setValidator(validator)
self.dw_stepsize_edit.setValidator(validator)
def show_dw_contents(self, on=True):
self.dw_minval_label.setVisible(on)
self.dw_minval_edit.setVisible(on)
self.dw_maxval_label.setVisible(on)
self.dw_maxval_edit.setVisible(on)
self.dw_stepsize_label.setVisible(on)
self.dw_stepsize_edit.setVisible(on)
self.dw_seq_toggle.setVisible(on)
def populate_dw_combobox(self):
self.dw_combobox.currentIndexChanged.disconnect(self.toggle_dw_combobox)
self.dw_combobox.clear()
if self.alias is not None:
self.dw_combobox.addItem("combobox")
if self.alias.component.type in ["Float", "Integer"]:
self.dw_combobox.addItem("slider")
self.dw_combobox.addItem("numericstepper")
self.dw_combobox.currentIndexChanged.connect(self.toggle_dw_combobox)
def updateContents(self, alias=None, controller=None):
self.alias = copy.copy(alias)
self.controller = controller
self.populate_dw_combobox()
self.unplugSignals()
if alias is not None and controller is not None:
self.name_edit.setText(self.alias.name)
#print "widget:", self.alias.component.widget
wtype = self.alias.component.widget
if wtype == 'text':
wtype = "combobox"
index = self.dw_combobox.findText(wtype)
if index < 0:
index = 0
self.dw_combobox.setCurrentIndex(index)
self.order_spinbox.setRange(0,self.table.topLevelItemCount()-1)
self.order_spinbox.setValue(self.alias.component.pos)
self.dw_minval_edit.setText(self.alias.component.minVal)
self.dw_maxval_edit.setText(self.alias.component.maxVal)
self.dw_stepsize_edit.setText(self.alias.component.stepSize)
self.dw_seq_toggle.setChecked(self.alias.component.seq)
if self.dw_combobox.currentIndex() == 0:
self.show_dw_contents(False)
else:
self.show_dw_contents(True)
# show loop option for stepper
self.dw_seq_toggle.setVisible(index == 1)
if self.dv_widget:
self.dv_layout.removeWidget(self.dv_widget)
self.disconnect(self.dv_widget,
QtCore.SIGNAL("contentsChanged"),
self.widgetContentsChanged)
self.dv_widget.deleteLater()
self.dv_widget = QAliasDetailsWidget.createAliasWidget(self.alias, self.controller, self)
self.dv_layout.addWidget(self.dv_widget)
self.connect(self.dv_widget,
QtCore.SIGNAL("contentsChanged"),
self.widgetContentsChanged)
if self.vl_editor:
self.vl_layout.removeWidget(self.vl_editor)
self.disconnect(self.vl_editor,
QtCore.SIGNAL("valuesChanged"),
self.valuesListChanged)
self.vl_editor.deleteLater()
self.vl_editor = None
self.vl_editor = QValuesListEditor(self.alias,self.controller)
self.vl_layout.addWidget(self.vl_editor)
#capturing widget changes to update alias
self.connect(self.vl_editor,
QtCore.SIGNAL("valuesChanged"),
self.valuesListChanged)
self.setEnabled(True)
else:
self.name_edit.setText("")
if self.dv_widget:
self.dv_layout.removeWidget(self.dv_widget)
self.disconnect(self.dv_widget,
QtCore.SIGNAL("contentsChanged"),
self.widgetContentsChanged)
self.dv_widget.deleteLater()
self.dv_widget = None
if self.vl_editor:
self.vl_layout.removeWidget(self.vl_editor)
self.disconnect(self.vl_editor,
QtCore.SIGNAL("valuesChanged"),
self.valuesListChanged)
self.vl_editor.deleteLater()
self.vl_editor = None
self.setEnabled(False)
self.plugSignals()
@staticmethod
def createAliasWidget(alias, controller, parent=None):
v = controller.vtController.vistrail
p = v.db_get_object(alias.component.vttype, alias.component.vtid)
if p.identifier == '':
idn = get_vistrails_basic_pkg_id()
else:
idn = p.identifier
reg = get_module_registry()
p_descriptor = reg.get_descriptor_by_name(idn, p.type, p.namespace)
widget_type = get_widget_class(p_descriptor)
p.strValue = alias.component.val
return widget_type(p, parent)
def widgetContentsChanged(self, info):
self.alias.component.val = info[0].contents()
if self.alias.component.val not in self.alias.component.valueList:
self.alias.component.valueList.append(self.alias.component.val)
self.alias.component.valueList.sort()
self.vl_editor.alias_item_updated()
self.aliasChanged.emit(self.alias)
################################################################################
class QValuesListEditor(QtGui.QWidget):
"""
QValuesListEditor is the actual widget allowing users to
enter a list of values
"""
def __init__(self, alias, controller, parent=None):
""" QValuesListEditor(alias_item: AliasTableItem, parent: QWidget)
-> QValuesListEditor
Construct an edit box with a button for bringing up the dialog
"""
QtGui.QWidget.__init__(self, parent)
self._alias = alias
self.type = alias.component.type
self.controller = controller
hLayout = QtGui.QHBoxLayout(self)
hLayout.setMargin(0)
hLayout.setSpacing(0)
self.setLayout(hLayout)
self.listValues = QtGui.QLineEdit()
self.listValues.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Maximum)
self.alias_item_updated()
hLayout.addWidget(self.listValues)
self.connect(self.listValues, QtCore.SIGNAL('editingFinished()'),
self.values_were_edited)
inputButton = QtGui.QToolButton()
inputButton.setText('...')
self.connect(inputButton, QtCore.SIGNAL('clicked()'),
self.editListValues)
hLayout.addWidget(inputButton)
def alias_item_updated(self):
if self._alias.component.type not in ['Float', 'Integer']:
values = []
for v in self._alias.component.valueList:
values.append("'%s'"% v.replace("'", "\'"))
self.listValues.setText("[%s]" % ", ".join(values))
else:
self.listValues.setText('[%s]' % ", ".join(self._alias.component.valueList))
if self._alias.component.type in ['String','Integer','Float']:
self.listValues.setReadOnly(False)
else:
self.listValues.setReadOnly(True)
self.listValues.home(False)
def values_were_edited(self):
"""values_were_edited(): None
Connected to self.listValues.textEdited.
Updates self._alias.valueList.
NB: Allowing the user to edit the LineEdit field directly is
not a very good idea, because we don't know what are the
syntactic rules for the translate_to_python() calls in
arbitrary classes. Right now, I'm assuming removing the
leading and trailing brackets and splitting on ',' is
enough. (in passing, The previous call to eval() is just
broken is a general scenario like we have now)
For example, this will break horribly if the user manually edits
a list of strings with commas in them."""
#print "values_were_edited"
new_text = self.listValues.text()
t = str(new_text)
if len(t) < 2:
self._alias.component.valueList = []
return
if not (t[0] == '[' and t[-1] == ']'):
self._alias.valueList = []
else:
self._alias.component.valueList = t[1:-1].split(',')
if self._alias.component.type not in ['Float', 'Integer']:
for i, val in enumerate(self._alias.component.valueList):
val = val.strip()
if len(val) >= 2 and \
((val[0] == "'" and val[-1] == "'") or
(val[0] == '"' and val[-1] == '"')):
self._alias.component.valueList[i] = val.strip()[1:-1]
def editListValues(self):
""" editListValues() -> None
Show a dialog for editing the values
"""
dialog = QListEditDialog(self._alias, self.controller, None)
if dialog.exec_() == QtGui.QDialog.Accepted:
values = dialog.getList()
#print values
self._alias.component.valueList = copy.copy(values)
self._str_values = [str(v) for v in values]
values2 = values
if self.type not in ['Float', 'Integer']:
values2 = ["'%s'" % v.replace("'", "\'")
for v in values]
self.listValues.setText('[%s]' % ', '.join(values2))
self.listValues.home(False)
self.emit(QtCore.SIGNAL("valuesChanged"))
dialog.deleteLater()
##############################################################################
class QListEditDialog(QtGui.QDialog):
"""
QListEditDialog provides an interface for user to edit a list of
values and export to a string
"""
def __init__(self, alias, controller, parent=None):
""" QListEditDialog(pType: str, strValues: list, parent: QWidget)
-> QListEditDialog
Parse values and setup the table
"""
QtGui.QDialog.__init__(self, parent)
self._alias = alias
vLayout = QtGui.QVBoxLayout()
vLayout.setMargin(0)
vLayout.setSpacing(0)
self.controller = controller
self.setLayout(vLayout)
label = QtGui.QLabel("Please enter values in boxes below. "
"'Add' appends an empty value to the list. "
"And 'Del' removes the selected values.")
label.setMargin(5)
label.setWordWrap(True)
vLayout.addWidget(label)
self.table = QtGui.QTableWidget(0, 1, parent)
self.table.setHorizontalHeaderLabels(['Values'])
self.table.horizontalHeader().setStretchLastSection(True)
self.table.verticalHeader().setMovable(True)
self.delegate = QListEditItemDelegate(alias, controller)
self.table.setItemDelegate(self.delegate)
self.table.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
for v in alias.component.valueList:
self.addRow(v)
self.connect(self.table.verticalHeader(),
QtCore.SIGNAL('sectionMoved(int,int,int)'),
self.rowMoved)
vLayout.addWidget(self.table)
hLayout = QtGui.QHBoxLayout()
vLayout.addLayout(hLayout)
okButton = QtGui.QPushButton('&OK')
okButton.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
self.connect(okButton, QtCore.SIGNAL('clicked()'), self.okButtonPressed)
hLayout.addWidget(okButton)
cancelButton = QtGui.QPushButton('&Cancel')
cancelButton.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
self.connect(cancelButton, QtCore.SIGNAL('clicked()'), self.reject)
hLayout.addWidget(cancelButton)
addButton = QtGui.QPushButton('&Add')
addButton.setIcon(CurrentTheme.ADD_STRING_ICON)
addButton.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
self.connect(addButton, QtCore.SIGNAL('clicked()'), self.addRow)
hLayout.addWidget(addButton)
removeButton = QtGui.QPushButton('&Del')
removeButton.setIcon(QtGui.QIcon(
self.style().standardPixmap(QtGui.QStyle.SP_DialogCancelButton)))
removeButton.setSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Maximum)
self.connect(removeButton, QtCore.SIGNAL('clicked()'),
self.removeSelection)
hLayout.addWidget(removeButton)
def sizeHint(self):
""" sizeHint() -> QSize
Return the recommended size for the widget
"""
return QtCore.QSize(256, 384)
def okButtonPressed(self):
""" okButtonPressed() -> None
Make sure to commit the editor data before accepting
"""
self.table.itemDelegate().finishEditing()
self.accept()
def getList(self):
""" getList() -> list of str values
Return a list of values
"""
result = []
for i in xrange(self.table.rowCount()):
logicalIndex = self.table.verticalHeader().logicalIndex(i)
value = self.table.cellWidget(logicalIndex, 0).contents()
result.append(str(value))
return result
def rowMoved(self, row, old, new):
""" rowMove(row: int, old: int, new: int) -> None
Renumber the vertical header labels when row moved
"""
vHeader = self.table.verticalHeader()
labels = []
for i in xrange(self.table.rowCount()):
labels.append(str(vHeader.visualIndex(i)+1))
self.table.setVerticalHeaderLabels(labels)
def addRow(self, text=''):
""" addRow(text: str) -> QListStringEdit
Add an extra row to the end of the table
"""
self.table.setRowCount(self.table.rowCount()+1)
alias = copy.copy(self._alias)
alias.component.val = text
widget = \
QAliasDetailsWidget.createAliasWidget(alias, self.controller, None)
if not isinstance(widget, StandardConstantWidget):
item = QtGui.QTableWidgetItem()
else:
item = QtGui.QTableWidgetItem(text)
row = self.table.rowCount()-1
self.table.setItem(row, 0, item)
self.table.setCellWidget(row,
0,
widget)
h = widget.sizeHint().height()
self.table.setRowHeight(row,h)
def removeSelection(self):
""" removeSelection() -> None
Remove selected rows on the table
"""
for item in self.table.selectedItems():
self.table.removeRow(item.row())
##############################################################################
class QListEditItemDelegate(QtGui.QItemDelegate):
"""
QListEditItemDelegate sets up the editor for the QListEditDialog
table
"""
def __init__(self, alias_item, controller, parent=None):
""" QListEditItemDelegate(parent: QWidget) -> QListEditItemDelegate
Store the uncommit editor for commit later
"""
QtGui.QItemDelegate.__init__(self, parent)
self.controller = controller
self.alias_item = alias_item
self.editor = None
def createEditor(self, parent, option, index):
""" createEditor(parent: QWidget,
option: QStyleOptionViewItem,
index: QModelIndex) -> QStringEdit
Return the editor widget for the index
"""
self.editor = QAliasDetailsWidget.createAliasWidget(self.alias_item,
self.controller,
parent)
#print "editor created"
return self.editor
def updateEditorGeometry(self, editor, option, index):
""" updateEditorGeometry(editor: QStringEdit,
option: QStyleOptionViewItem,
index: QModelIndex) -> None
Update the geometry of the editor based on the style option
"""
editor.setGeometry(option.rect)
def setModelData(self, editor, model, index):
""" setModelData(editor: QStringEdit,
model: QAbstractItemModel,
index: QModelIndex) -> None
Set the text of the editor back to the item model
"""
model.setData(index, editor.contents())
self.editor = None
def finishEditing(self):
#print "finishEditing"
if self.editor:
self.emit(QtCore.SIGNAL('commitData(QWidget*)'), self.editor)
##############################################################################
|
|
# Point-and-shoot camera for Raspberry Pi w/camera and Adafruit PiTFT.
# This must run as root (sudo python cam.py) due to framebuffer, etc.
#
# Adafruit invests time and resources providing this open source code,
# please support Adafruit and open-source development by purchasing
# products from Adafruit, thanks!
#
# http://www.adafruit.com/products/998 (Raspberry Pi Model B)
# http://www.adafruit.com/products/1367 (Raspberry Pi Camera Board)
# http://www.adafruit.com/products/1601 (PiTFT Mini Kit)
# This can also work with the Model A board and/or the Pi NoIR camera.
#
# Prerequisite tutorials: aside from the basic Raspbian setup and
# enabling the camera in raspi-config, you should configure WiFi (if
# using wireless with the Dropbox upload feature) and read these:
# PiTFT setup (the tactile switch buttons are not required for this
# project, but can be installed if you want them for other things):
# http://learn.adafruit.com/adafruit-pitft-28-inch-resistive-touchscreen-display-raspberry-pi
# Dropbox setup (if using the Dropbox upload feature):
# http://raspi.tv/2013/how-to-use-dropbox-with-raspberry-pi
#
# Written by Phil Burgess / Paint Your Dragon for Adafruit Industries.
# BSD license, all text above must be included in any redistribution.
import atexit
import cPickle as pickle
import errno
import fnmatch
import io
import os
import os.path
import picamera
import pygame
import stat
import threading
import time
import yuv2rgb
from pygame.locals import *
from subprocess import call
# UI classes ---------------------------------------------------------------
# Small resistive touchscreen is best suited to simple tap interactions.
# Importing a big widget library seemed a bit overkill. Instead, a couple
# of rudimentary classes are sufficient for the UI elements:
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
try:
self.bitmap = pygame.image.load(iconPath + '/' + name + '.png')
except:
pass
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, rect, **kwargs):
self.rect = rect # Bounds
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
for key, value in kwargs.iteritems():
if key == 'color': self.color = value
elif key == 'bg' : self.bg = value
elif key == 'fg' : self.fg = value
elif key == 'cb' : self.callback = value
elif key == 'value': self.value = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None: self.callback()
else: self.callback(self.value)
return True
return False
def draw(self, screen):
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
screen.blit(self.iconBg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconBg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconBg.bitmap.get_height())/2))
if self.iconFg:
screen.blit(self.iconFg.bitmap,
(self.rect[0]+(self.rect[2]-self.iconFg.bitmap.get_width())/2,
self.rect[1]+(self.rect[3]-self.iconFg.bitmap.get_height())/2))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
# UI callbacks -------------------------------------------------------------
# These are defined before globals because they're referenced by items in
# the global buttons[] list.
def isoCallback(n): # Pass 1 (next ISO) or -1 (prev ISO)
global isoMode
setIsoMode((isoMode + n) % len(isoData))
def settingCallback(n): # Pass 1 (next setting) or -1 (prev setting)
global screenMode
screenMode += n
if screenMode < 4: screenMode = len(buttons) - 1
elif screenMode >= len(buttons): screenMode = 4
def fxCallback(n): # Pass 1 (next effect) or -1 (prev effect)
global fxMode
setFxMode((fxMode + n) % len(fxData))
def quitCallback(): # Quit confirmation button
saveSettings()
raise SystemExit
def viewCallback(n): # Viewfinder buttons
global loadIdx, scaled, screenMode, screenModePrior, settingMode, storeMode
if n is 0: # Gear icon (settings)
screenMode = settingMode # Switch to last settings mode
elif n is 1: # Play icon (image playback)
if scaled: # Last photo is already memory-resident
loadIdx = saveIdx
screenMode = 0 # Image playback
screenModePrior = -1 # Force screen refresh
else: # Load image
r = imgRange(pathData[storeMode])
if r: showImage(r[1]) # Show last image in directory
else: screenMode = 2 # No images
else: # Rest of screen = shutter
takePicture()
def doneCallback(): # Exit settings
global screenMode, settingMode
if screenMode > 3:
settingMode = screenMode
saveSettings()
screenMode = 3 # Switch back to viewfinder mode
def imageCallback(n): # Pass 1 (next image), -1 (prev image) or 0 (delete)
global screenMode
if n is 0:
screenMode = 1 # Delete confirmation
else:
showNextImage(n)
def deleteCallback(n): # Delete confirmation
global loadIdx, scaled, screenMode, storeMode
screenMode = 0
screenModePrior = -1
if n is True:
os.remove(pathData[storeMode] + '/IMG_' + '%04d' % loadIdx + '.JPG')
if(imgRange(pathData[storeMode])):
screen.fill(0)
pygame.display.update()
showNextImage(-1)
else: # Last image deleteted; go to 'no images' mode
screenMode = 2
scaled = None
loadIdx = -1
def storeModeCallback(n): # Radio buttons on storage settings screen
global storeMode
buttons[4][storeMode + 3].setBg('radio3-0')
storeMode = n
buttons[4][storeMode + 3].setBg('radio3-1')
def sizeModeCallback(n): # Radio buttons on size settings screen
global sizeMode
buttons[5][sizeMode + 3].setBg('radio3-0')
sizeMode = n
buttons[5][sizeMode + 3].setBg('radio3-1')
camera.resolution = sizeData[sizeMode][1]
# camera.crop = sizeData[sizeMode][2]
# Global stuff -------------------------------------------------------------
screenMode = 3 # Current screen mode; default = viewfinder
screenModePrior = -1 # Prior screen mode (for detecting changes)
settingMode = 4 # Last-used settings mode (default = storage)
storeMode = 0 # Storage mode; default = Photos folder
storeModePrior = -1 # Prior storage mode (for detecting changes)
sizeMode = 0 # Image size; default = Large
fxMode = 0 # Image effect; default = Normal
isoMode = 0 # ISO settingl default = Auto
iconPath = 'icons' # Subdirectory containing UI bitmaps (PNG format)
saveIdx = -1 # Image index for saving (-1 = none set yet)
loadIdx = -1 # Image index for loading
scaled = None # pygame Surface w/last-loaded image
# To use Dropbox uploader, must have previously run the dropbox_uploader.sh
# script to set up the app key and such. If this was done as the normal pi
# user, set upconfig to the .dropbox_uploader config file in that account's
# home directory. Alternately, could run the setup script as root and
# delete the upconfig line below.
uploader = '/home/pi/Dropbox-Uploader/dropbox_uploader.sh'
upconfig = '/home/pi/.dropbox_uploader'
sizeData = [ # Camera parameters for different size settings
# Full res Viewfinder Crop window
[(2592, 1944), (320, 240), (0.0 , 0.0 , 1.0 , 1.0 )], # Large
[(1920, 1080), (320, 180), (0.1296, 0.2222, 0.7408, 0.5556)], # Med
[(1440, 1080), (320, 240), (0.2222, 0.2222, 0.5556, 0.5556)]] # Small
isoData = [ # Values for ISO settings [ISO value, indicator X position]
[ 0, 27], [100, 64], [200, 97], [320, 137],
[400, 164], [500, 197], [640, 244], [800, 297]]
# A fixed list of image effects is used (rather than polling
# camera.IMAGE_EFFECTS) because the latter contains a few elements
# that aren't valid (at least in video_port mode) -- e.g. blackboard,
# whiteboard, posterize (but posterise, British spelling, is OK).
# Others have no visible effect (or might require setting add'l
# camera parameters for which there's no GUI yet) -- e.g. saturation,
# colorbalance, colorpoint.
fxData = [
'none', 'sketch', 'gpen', 'pastel', 'watercolor', 'oilpaint', 'hatch',
'negative', 'colorswap', 'posterise', 'denoise', 'blur', 'film',
'washedout', 'emboss', 'cartoon', 'solarize' ]
pathData = [
'/home/pi/Photos', # Path for storeMode = 0 (Photos folder)
'/boot/DCIM/CANON999', # Path for storeMode = 1 (Boot partition)
'/home/pi/Photos'] # Path for storeMode = 2 (Dropbox)
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttons = [
# Screen mode 0 is photo playback
[Button(( 0,188,320, 52), bg='done' , cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev' , cb=imageCallback, value=-1),
Button((240, 0, 80, 52), bg='next' , cb=imageCallback, value= 1),
Button(( 88, 70,157,102)), # 'Working' label (when enabled)
Button((148,129, 22, 22)), # Spinner (when enabled)
Button((121, 0, 78, 52), bg='trash', cb=imageCallback, value= 0)],
# Screen mode 1 is delete confirmation
[Button(( 0,35,320, 33), bg='delete'),
Button(( 32,86,120,100), bg='yn', fg='yes',
cb=deleteCallback, value=True),
Button((168,86,120,100), bg='yn', fg='no',
cb=deleteCallback, value=False)],
# Screen mode 2 is 'No Images'
[Button((0, 0,320,240), cb=doneCallback), # Full screen = button
Button((0,188,320, 52), bg='done'), # Fake 'Done' button
Button((0, 53,320, 80), bg='empty')], # 'Empty' message
# Screen mode 3 is viewfinder / snapshot
[Button(( 0,188,156, 52), bg='gear', cb=viewCallback, value=0),
Button((164,188,156, 52), bg='play', cb=viewCallback, value=1),
Button(( 0, 0,320,240) , cb=viewCallback, value=2),
Button(( 88, 51,157,102)), # 'Working' label (when enabled)
Button((148, 110,22, 22))], # Spinner (when enabled)
# Remaining screens are settings modes
# Screen mode 4 is storage settings
[Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 2, 60,100,120), bg='radio3-1', fg='store-folder',
cb=storeModeCallback, value=0),
Button((110, 60,100,120), bg='radio3-0', fg='store-boot',
cb=storeModeCallback, value=1),
Button((218, 60,100,120), bg='radio3-0', fg='store-dropbox',
cb=storeModeCallback, value=2),
Button(( 0, 10,320, 35), bg='storage')],
# Screen mode 5 is size settings
[Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 2, 60,100,120), bg='radio3-1', fg='size-l',
cb=sizeModeCallback, value=0),
Button((110, 60,100,120), bg='radio3-0', fg='size-m',
cb=sizeModeCallback, value=1),
Button((218, 60,100,120), bg='radio3-0', fg='size-s',
cb=sizeModeCallback, value=2),
Button(( 0, 10,320, 29), bg='size')],
# Screen mode 6 is graphic effect
[Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 0, 70, 80, 52), bg='prev', cb=fxCallback , value=-1),
Button((240, 70, 80, 52), bg='next', cb=fxCallback , value= 1),
Button(( 0, 67,320, 91), bg='fx-none'),
Button(( 0, 11,320, 29), bg='fx')],
# Screen mode 7 is ISO
[Button(( 0,188,320, 52), bg='done', cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev', cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next', cb=settingCallback, value= 1),
Button(( 0, 70, 80, 52), bg='prev', cb=isoCallback , value=-1),
Button((240, 70, 80, 52), bg='next', cb=isoCallback , value= 1),
Button(( 0, 79,320, 33), bg='iso-0'),
Button(( 9,134,302, 26), bg='iso-bar'),
Button(( 17,157, 21, 19), bg='iso-arrow'),
Button(( 0, 10,320, 29), bg='iso')],
# Screen mode 8 is quit confirmation
[Button(( 0,188,320, 52), bg='done' , cb=doneCallback),
Button(( 0, 0, 80, 52), bg='prev' , cb=settingCallback, value=-1),
Button((240, 0, 80, 52), bg='next' , cb=settingCallback, value= 1),
Button((110, 60,100,120), bg='quit-ok', cb=quitCallback),
Button(( 0, 10,320, 35), bg='quit')]
]
# Assorted utility functions -----------------------------------------------
def setFxMode(n):
global fxMode
fxMode = n
camera.image_effect = fxData[fxMode]
buttons[6][5].setBg('fx-' + fxData[fxMode])
def setIsoMode(n):
global isoMode
isoMode = n
camera.ISO = isoData[isoMode][0]
buttons[7][5].setBg('iso-' + str(isoData[isoMode][0]))
buttons[7][7].rect = ((isoData[isoMode][1] - 10,) +
buttons[7][7].rect[1:])
def saveSettings():
try:
outfile = open('cam.pkl', 'wb')
# Use a dictionary (rather than pickling 'raw' values) so
# the number & order of things can change without breaking.
d = { 'fx' : fxMode,
'iso' : isoMode,
'size' : sizeMode,
'store' : storeMode }
pickle.dump(d, outfile)
outfile.close()
except:
pass
def loadSettings():
try:
infile = open('cam.pkl', 'rb')
d = pickle.load(infile)
infile.close()
if 'fx' in d: setFxMode( d['fx'])
if 'iso' in d: setIsoMode( d['iso'])
if 'size' in d: sizeModeCallback( d['size'])
if 'store' in d: storeModeCallback(d['store'])
except:
pass
# Scan files in a directory, locating JPEGs with names matching the
# software's convention (IMG_XXXX.JPG), returning a tuple with the
# lowest and highest indices (or None if no matching files).
def imgRange(path):
min = 9999
max = 0
try:
for file in os.listdir(path):
if fnmatch.fnmatch(file, 'IMG_[0-9][0-9][0-9][0-9].JPG'):
i = int(file[4:8])
if(i < min): min = i
if(i > max): max = i
finally:
return None if min > max else (min, max)
# Busy indicator. To use, run in separate thread, set global 'busy'
# to False when done.
def spinner():
global busy, screenMode, screenModePrior
buttons[screenMode][3].setBg('working')
buttons[screenMode][3].draw(screen)
pygame.display.update()
busy = True
n = 0
while busy is True:
buttons[screenMode][4].setBg('work-' + str(n))
buttons[screenMode][4].draw(screen)
pygame.display.update()
n = (n + 1) % 5
time.sleep(0.15)
buttons[screenMode][3].setBg(None)
buttons[screenMode][4].setBg(None)
screenModePrior = -1 # Force refresh
def takePicture():
global busy, gid, loadIdx, saveIdx, scaled, sizeMode, storeMode, storeModePrior, uid
if not os.path.isdir(pathData[storeMode]):
try:
os.makedirs(pathData[storeMode])
# Set new directory ownership to pi user, mode to 755
os.chown(pathData[storeMode], uid, gid)
os.chmod(pathData[storeMode],
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
except OSError as e:
# errno = 2 if can't create folder
print errno.errorcode[e.errno]
return
# If this is the first time accessing this directory,
# scan for the max image index, start at next pos.
if storeMode != storeModePrior:
r = imgRange(pathData[storeMode])
if r is None:
saveIdx = 1
else:
saveIdx = r[1] + 1
if saveIdx > 9999: saveIdx = 0
storeModePrior = storeMode
# Scan for next available image slot
while True:
filename = pathData[storeMode] + '/IMG_' + '%04d' % saveIdx + '.JPG'
if not os.path.isfile(filename): break
saveIdx += 1
if saveIdx > 9999: saveIdx = 0
t = threading.Thread(target=spinner)
t.start()
scaled = None
camera.resolution = sizeData[sizeMode][0]
camera.crop = sizeData[sizeMode][2]
try:
camera.capture(filename, use_video_port=False, format='jpeg',
thumbnail=None)
# Set image file ownership to pi user, mode to 644
# os.chown(filename, uid, gid) # Not working, why?
os.chmod(filename,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
img = pygame.image.load(filename)
scaled = pygame.transform.scale(img, sizeData[sizeMode][1])
if storeMode == 2: # Dropbox
if upconfig:
cmd = uploader + ' -f ' + upconfig + ' upload ' + filename + ' Photos/' + os.path.basename(filename)
else:
cmd = uploader + ' upload ' + filename + ' Photos/' + os.path.basename(filename)
call ([cmd], shell=True)
finally:
# Add error handling/indicator (disk full, etc.)
camera.resolution = sizeData[sizeMode][1]
camera.crop = (0.0, 0.0, 1.0, 1.0)
busy = False
t.join()
if scaled:
if scaled.get_height() < 240: # Letterbox
screen.fill(0)
screen.blit(scaled,
((320 - scaled.get_width() ) / 2,
(240 - scaled.get_height()) / 2))
pygame.display.update()
time.sleep(2.5)
loadIdx = saveIdx
def showNextImage(direction):
global busy, loadIdx
t = threading.Thread(target=spinner)
t.start()
n = loadIdx
while True:
n += direction
if(n > 9999): n = 0
elif(n < 0): n = 9999
if os.path.exists(pathData[storeMode]+'/IMG_'+'%04d'%n+'.JPG'):
showImage(n)
break
busy = False
t.join()
def showImage(n):
global busy, loadIdx, scaled, screenMode, screenModePrior, sizeMode, storeMode
t = threading.Thread(target=spinner)
t.start()
img = pygame.image.load(
pathData[storeMode] + '/IMG_' + '%04d' % n + '.JPG')
scaled = pygame.transform.scale(img, sizeData[sizeMode][1])
loadIdx = n
busy = False
t.join()
screenMode = 0 # Photo playback
screenModePrior = -1 # Force screen refresh
# Initialization -----------------------------------------------------------
# Init framebuffer/touchscreen environment variables
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.putenv('SDL_FBDEV' , '/dev/fb1')
os.putenv('SDL_MOUSEDRV' , 'TSLIB')
os.putenv('SDL_MOUSEDEV' , '/dev/input/touchscreen')
# Get user & group IDs for file & folder creation
# (Want these to be 'pi' or other user, not root)
s = os.getenv("SUDO_UID")
uid = int(s) if s else os.getuid()
s = os.getenv("SUDO_GID")
gid = int(s) if s else os.getgid()
# Buffers for viewfinder data
rgb = bytearray(320 * 240 * 3)
yuv = bytearray(320 * 240 * 3 / 2)
# Init pygame and screen
pygame.init()
pygame.mouse.set_visible(False)
screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
# Init camera and set up default values
camera = picamera.PiCamera()
atexit.register(camera.close)
camera.resolution = sizeData[sizeMode][1]
#camera.crop = sizeData[sizeMode][2]
camera.crop = (0.0, 0.0, 1.0, 1.0)
# Leave raw format at default YUV, don't touch, don't set to RGB!
# Load all icons at startup.
for file in os.listdir(iconPath):
if fnmatch.fnmatch(file, '*.png'):
icons.append(Icon(file.split('.')[0]))
# Assign Icons to Buttons, now that they're loaded
for s in buttons: # For each screenful of buttons...
for b in s: # For each button on screen...
for i in icons: # For each icon...
if b.bg == i.name: # Compare names; match?
b.iconBg = i # Assign Icon to Button
b.bg = None # Name no longer used; allow garbage collection
if b.fg == i.name:
b.iconFg = i
b.fg = None
loadSettings() # Must come last; fiddles with Button/Icon states
# Main loop ----------------------------------------------------------------
while(True):
# Process touchscreen input
while True:
for event in pygame.event.get():
if(event.type is MOUSEBUTTONDOWN):
pos = pygame.mouse.get_pos()
for b in buttons[screenMode]:
if b.selected(pos): break
# If in viewfinder or settings modes, stop processing touchscreen
# and refresh the display to show the live preview. In other modes
# (image playback, etc.), stop and refresh the screen only when
# screenMode changes.
if screenMode >= 3 or screenMode != screenModePrior: break
# Refresh display
if screenMode >= 3: # Viewfinder or settings modes
stream = io.BytesIO() # Capture into in-memory stream
camera.capture(stream, use_video_port=True, format='raw')
stream.seek(0)
stream.readinto(yuv) # stream -> YUV buffer
stream.close()
yuv2rgb.convert(yuv, rgb, sizeData[sizeMode][1][0],
sizeData[sizeMode][1][1])
img = pygame.image.frombuffer(rgb[0:
(sizeData[sizeMode][1][0] * sizeData[sizeMode][1][1] * 3)],
sizeData[sizeMode][1], 'RGB')
elif screenMode < 2: # Playback mode or delete confirmation
img = scaled # Show last-loaded image
else: # 'No Photos' mode
img = None # You get nothing, good day sir
if img is None or img.get_height() < 240: # Letterbox, clear background
screen.fill(0)
if img:
screen.blit(img,
((320 - img.get_width() ) / 2,
(240 - img.get_height()) / 2))
# Overlay buttons on display and update
for i,b in enumerate(buttons[screenMode]):
b.draw(screen)
pygame.display.update()
screenModePrior = screenMode
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import uuid
import mock
import testscenarios
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import importutils
from oslo_utils import timeutils
from sqlalchemy.orm import query
from neutron.common import constants
from neutron.common import topics
from neutron import context as q_context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import db_base_plugin_v2 as db_v2
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.extensions import l3agentscheduler as l3agent
from neutron import manager
from neutron.scheduler import l3_agent_scheduler
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
# the below code is required for the following reason
# (as documented in testscenarios)
"""Multiply tests depending on their 'scenarios' attribute.
This can be assigned to 'load_tests' in any test module to make this
automatically work across tests in the module.
"""
load_tests = testscenarios.load_tests_apply_scenarios
HOST_DVR = 'my_l3_host_dvr'
DVR_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
DVR_SNAT_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR_SNAT,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr_snat'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler):
def schedule(self):
pass
def _choose_router_agent(self):
pass
def _choose_router_agents_for_ha(self):
pass
class FakePortDB(object):
def __init__(self, port_list):
self._port_list = port_list
def _get_query_answer(self, port_list, filters):
answers = []
for port in port_list:
matched = True
for key, search_values in filters.items():
port_value = port.get(key, None)
if not port_value:
matched = False
break
if isinstance(port_value, list):
sub_answers = self._get_query_answer(port_value,
search_values)
matched = len(sub_answers) > 0
else:
matched = port_value in search_values
if not matched:
break
if matched:
answers.append(port)
return answers
def get_port(self, context, port_id):
for port in self._port_list:
if port['id'] == port_id:
if port['tenant_id'] == context.tenant_id or context.is_admin:
return port
break
return None
def get_ports(self, context, filters=None):
query_filters = dict()
if filters:
query_filters.update(filters)
if not context.is_admin:
query_filters['tenant_id'] = [context.tenant_id]
result = self._get_query_answer(self._port_list, query_filters)
return result
class L3SchedulerBaseTestCase(base.BaseTestCase):
def setUp(self):
super(L3SchedulerBaseTestCase, self).setUp()
self.scheduler = FakeL3Scheduler()
self.plugin = mock.Mock()
def test_auto_schedule_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with contextlib.nested(
mock.patch.object(self.scheduler, '_get_routers_to_schedule'),
mock.patch.object(self.scheduler, '_get_routers_can_schedule')
) as (gs, gr):
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertTrue(result)
self.assertTrue(gs.called)
self.assertTrue(gr.called)
def test_auto_schedule_routers_no_agents(self):
self.plugin.get_enabled_agent_on_host.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_unscheduled_routers(self):
type(self.plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=[]))
with mock.patch.object(self.scheduler,
'_get_routers_to_schedule') as mock_routers:
mock_routers.return_value = []
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_target_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with contextlib.nested(
mock.patch.object(self.scheduler, '_get_routers_to_schedule'),
mock.patch.object(self.scheduler, '_get_routers_can_schedule')
) as (mock_unscheduled_routers, mock_target_routers):
mock_unscheduled_routers.return_value = mock.ANY
mock_target_routers.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test__get_routers_to_schedule_with_router_ids(self):
router_ids = ['foo_router_1', 'foo_router_2']
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
self.plugin.get_routers.return_value = expected_routers
with mock.patch.object(self.scheduler,
'_filter_unscheduled_routers') as mock_filter:
mock_filter.return_value = expected_routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin, router_ids)
mock_filter.assert_called_once_with(
mock.ANY, self.plugin, expected_routers)
self.assertEqual(expected_routers, unscheduled_routers)
def test__get_routers_to_schedule_without_router_ids(self):
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
with mock.patch.object(self.scheduler,
'_get_unscheduled_routers') as mock_get:
mock_get.return_value = expected_routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def test__get_routers_to_schedule_exclude_distributed(self):
routers = [
{'id': 'foo_router1', 'distributed': True}, {'id': 'foo_router_2'}
]
expected_routers = [{'id': 'foo_router_2'}]
with mock.patch.object(self.scheduler,
'_get_unscheduled_routers') as mock_get:
mock_get.return_value = routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin,
router_ids=None, exclude_distributed=True)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def _test__get_routers_can_schedule(self, routers, agent, target_routers):
self.plugin.get_l3_agent_candidates.return_value = agent
result = self.scheduler._get_routers_can_schedule(
mock.ANY, self.plugin, routers, mock.ANY)
self.assertEqual(target_routers, result)
def _test__filter_unscheduled_routers(self, routers, agents, expected):
self.plugin.get_l3_agents_hosting_routers.return_value = agents
unscheduled_routers = self.scheduler._filter_unscheduled_routers(
mock.ANY, self.plugin, routers)
self.assertEqual(expected, unscheduled_routers)
def test__filter_unscheduled_routers_already_scheduled(self):
self._test__filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
[{'id': 'foo_agent_id'}], [])
def test__filter_unscheduled_routers_non_scheduled(self):
self._test__filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
None, [{'id': 'foo_router1'}, {'id': 'foo_router_2'}])
def test__get_routers_can_schedule_with_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, mock.ANY, routers)
def test__get_routers_can_schedule_with_no_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, None, [])
def test__bind_routers_centralized(self):
routers = [{'id': 'foo_router'}]
with mock.patch.object(self.scheduler, 'bind_router') as mock_bind:
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, mock.ANY)
mock_bind.assert_called_once_with(mock.ANY, 'foo_router', mock.ANY)
def _test__bind_routers_ha(self, has_binding):
routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}]
agent = agents_db.Agent(id='foo_agent')
with contextlib.nested(
mock.patch.object(self.scheduler, '_router_has_binding',
return_value=has_binding),
mock.patch.object(self.scheduler, '_create_ha_router_binding')
) as (
mock_has_binding, mock_bind):
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent)
mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router',
'foo_agent')
self.assertEqual(not has_binding, mock_bind.called)
def test__bind_routers_ha_has_binding(self):
self._test__bind_routers_ha(has_binding=True)
def test__bind_routers_ha_no_binding(self):
self._test__bind_routers_ha(has_binding=False)
class L3SchedulerBaseMixin(object):
def _register_l3_agent(self, host, agent_mode='legacy', plugin=None):
if not plugin:
plugin = self.plugin
agent = {
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': agent_mode},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent},
time=timeutils.strtime())
agent_db = plugin.get_agents_db(self.adminContext,
filters={'host': [agent['host']]})
return agent_db[0]
def _register_l3_agents(self, plugin=None):
self.agent1 = self._register_l3_agent('host_1', plugin=plugin)
self.agent_id1 = self.agent1.id
self.agent2 = self._register_l3_agent('host_2', plugin=plugin)
self.agent_id2 = self.agent2.id
def _register_l3_dvr_agents(self):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR]})
self.l3_dvr_agent = agent_db[0]
self.l3_dvr_agent_id = agent_db[0].id
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_SNAT_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR_SNAT]})
self.l3_dvr_snat_id = agent_db[0].id
self.l3_dvr_snat_agent = agent_db[0]
def _set_l3_agent_admin_state(self, context, agent_id, state=True):
update = {'agent': {'admin_state_up': state}}
self.plugin.update_agent(context, agent_id, update)
def _set_l3_agent_dead(self, agent_id):
update = {
'agent': {
'heartbeat_timestamp':
timeutils.utcnow() - datetime.timedelta(hours=1)}}
self.plugin.update_agent(self.adminContext, agent_id, update)
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=str(uuid.uuid4()),
external_gateway_info=None,
subnet=None, set_context=False,
**kwargs):
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
yield router
self._remove_external_gateway_from_router(
router['router']['id'], subnet['subnet']['network_id'])
self._delete('routers', router['router']['id'])
class L3SchedulerTestBaseMixin(object):
def _test_add_router_to_l3_agent(self,
distributed=False,
already_scheduled=False,
external_gw=None):
agent_id = self.agent_id1
agent = self.agent1
if distributed:
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
if already_scheduled:
self._test_schedule_bind_router(agent, router)
with contextlib.nested(
mock.patch.object(self, "validate_agent_router_combination"),
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])
) as (valid, auto_s, gr):
self.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
self.assertNotEqual(already_scheduled, auto_s.called)
def test__unbind_router_removes_binding(self):
agent_id = self.agent_id1
agent = self.agent1
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
self._test_schedule_bind_router(agent, router)
self._unbind_router(self.adminContext,
router['router']['id'],
agent_id)
bindings = self._get_l3_bindings_hosting_routers(
self.adminContext, [router['router']['id']])
self.assertEqual(0, len(bindings))
def _create_router_for_l3_agent_dvr_test(self,
distributed=False,
external_gw=None):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
return router
def _prepare_l3_agent_dvr_move_exceptions(self,
distributed=False,
external_gw=None,
agent_id=None,
expected_exception=None):
router = self._create_router_for_l3_agent_dvr_test(
distributed=distributed, external_gw=external_gw)
with contextlib.nested(
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])):
self.assertRaises(expected_exception,
self.add_router_to_l3_agent,
self.adminContext, agent_id,
router['router']['id'])
def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self):
self._register_l3_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.agent_id1,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
def test_add_router_to_l3_agent_dvr_to_snat(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._create_router_for_l3_agent_dvr_test(
distributed=True,
external_gw=external_gw_info)
with contextlib.nested(
mock.patch.object(self, "validate_agent_router_combination"),
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])
) as (valid_agent_rtr, rtr_agent_binding, get_rtr):
self.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
rtr_agent_binding.assert_called_once_with(
self.adminContext, agent, router['router'])
def test_add_router_to_l3_agent(self):
self._test_add_router_to_l3_agent()
def test_add_distributed_router_to_l3_agent(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
external_gw=external_gw_info)
def test_add_router_to_l3_agent_already_scheduled(self):
self._test_add_router_to_l3_agent(already_scheduled=True)
def test_add_distributed_router_to_l3_agent_already_scheduled(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
already_scheduled=True,
external_gw=external_gw_info)
def _prepare_schedule_dvr_tests(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
plugin = mock.Mock()
plugin.get_l3_agents_hosting_routers.return_value = []
plugin.get_l3_agents.return_value = [agent]
plugin.get_l3_agent_candidates.return_value = [agent]
return scheduler, agent, plugin
def test_schedule_dvr_router_without_snatbinding_and_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True
}
plugin.get_router.return_value = sync_router
with contextlib.nested(
mock.patch.object(scheduler, 'bind_router'),
mock.patch.object(
plugin, 'get_snat_bindings', return_value=False)
):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_dvr_router_with_snatbinding_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {'id': 'foo_router_id',
'distributed': True}
plugin.get_router.return_value = sync_router
with mock.patch.object(plugin, 'get_snat_bindings', return_value=True):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.unbind_snat_servicenode(mock.ANY, 'foo_router_id'),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_router_distributed(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
plugin.get_router.return_value = sync_router
with mock.patch.object(
plugin, 'get_snat_bindings', return_value=False):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.schedule_snat_router(
mock.ANY, 'foo_router_id', sync_router),
]
plugin.assert_has_calls(expected_calls)
def _test_schedule_bind_router(self, agent, router):
ctx = self.adminContext
session = ctx.session
db = l3_agentschedulers_db.RouterL3AgentBinding
scheduler = l3_agent_scheduler.ChanceScheduler()
rid = router['router']['id']
scheduler.bind_router(ctx, rid, agent)
results = (session.query(db).filter_by(router_id=rid).all())
self.assertTrue(len(results) > 0)
self.assertIn(agent.id, [bind.l3_agent_id for bind in results])
def test_bind_new_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('is scheduled', args[0])
def test_bind_absent_router(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
# checking that bind_router() is not throwing
# when supplied with router_id of non-existing router
scheduler.bind_router(self.adminContext, "dummyID", self.agent1)
def test_bind_existing_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
self._test_schedule_bind_router(self.agent1, router)
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('has already been scheduled', args[0])
def _check_get_l3_agent_candidates(
self, router, agent_list, exp_host, count=1):
candidates = self.get_l3_agent_candidates(self.adminContext,
router, agent_list)
self.assertEqual(len(candidates), count)
if count:
self.assertEqual(candidates[0]['host'], exp_host)
def test_get_l3_agent_candidates_legacy(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test legacy agent_mode case: only legacy agent should be candidate
router['distributed'] = False
exp_host = 'host_1'
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test dvr agent_mode case only dvr agent should be candidate
router['distributed'] = True
exp_host = DVR_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
exp_host = DVR_L3_AGENT.get('host')
router['distributed'] = True
# Test no VMs present case
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
self._check_get_l3_agent_candidates(
router, agent_list, exp_host, count=0)
def test_get_l3_agent_candidates_dvr_snat(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
exp_host = DVR_SNAT_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr_snat_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
exp_host = DVR_SNAT_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
# Test no VMs present case
self.check_ports_exist_on_l3agent.return_value = False
self._check_get_l3_agent_candidates(
router, agent_list, exp_host, count=0)
def test_get_l3_agent_candidates_centralized(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
# check centralized test case
router['distributed'] = False
exp_host = DVR_SNAT_L3_AGENT.get('host')
agent_list = [self.l3_dvr_snat_agent]
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def _prepare_check_ports_exist_tests(self):
l3_agent = agents_db.Agent()
l3_agent.admin_state_up = True
l3_agent.host = 'host_1'
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
self.plugin.get_ports = mock.Mock(return_value=[])
self.get_subnet_ids_on_router = mock.Mock(return_value=[])
return l3_agent, router
def test_check_ports_exist_on_l3agent_no_subnets(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# no subnets
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_if_no_subnets_then_return(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
with mock.patch.object(manager.NeutronManager,
'get_plugin') as getp:
getp.return_value = self.plugin
# no subnets and operation is remove_router_interface,
# so return immediately without calling get_ports
self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(self.plugin.get_ports.called)
def test_check_ports_exist_on_l3agent_no_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# no matching subnet
self.plugin.get_subnet_ids_on_router = mock.Mock(
return_value=[str(uuid.uuid4())])
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# matching subnet
port = {'subnet_id': str(uuid.uuid4()),
'binding:host_id': 'host_1',
'device_owner': 'compute:',
'id': 1234}
self.plugin.get_ports.return_value = [port]
self.get_subnet_ids_on_router = mock.Mock(
return_value=[port['subnet_id']])
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertTrue(val)
def test_get_l3_agents_hosting_routers(self):
agent = self._register_l3_agent('host_6')
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
ctx = self.adminContext
router_id = router['router']['id']
self.plugin.router_scheduler.bind_router(ctx, router_id, agent)
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([agent.id], [agt.id for agt in agents])
self._set_l3_agent_admin_state(ctx, agent.id, False)
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([], agents)
class L3SchedulerTestCase(l3_agentschedulers_db.L3AgentSchedulerDbMixin,
l3_db.L3_NAT_db_mixin,
common_db_mixin.CommonDbMixin,
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
L3SchedulerBaseMixin,
L3SchedulerTestBaseMixin):
def setUp(self):
self.mock_rescheduling = False
ext_mgr = test_l3.L3TestExtensionManager()
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
super(L3SchedulerTestCase, self).setUp(plugin=plugin_str,
ext_mgr=ext_mgr)
self.adminContext = q_context.get_admin_context()
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
class L3AgentChanceSchedulerTestCase(L3SchedulerTestCase):
def test_random_scheduling(self):
random_patch = mock.patch('random.choice')
random_mock = random_patch.start()
def side_effect(seq):
return seq[0]
random_mock.side_effect = side_effect
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 1)
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 2)
random_patch.stop()
def test_scheduler_auto_schedule_when_agent_added(self):
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, True)
self.plugin.auto_schedule_routers(self.adminContext,
'host_1',
[r1['router']['id']])
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual('host_1', agents[0]['host'])
class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCase):
def setUp(self):
super(L3AgentLeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
# disable one agent to force the scheduling to the only one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id1 = agents[0]['id']
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id2 = agents[0]['id']
self.assertEqual(agent_id1, agent_id2)
# re-enable the second agent to see whether the next router
# spawned will be on this one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
with self.router_with_ext_gw(name='r3',
subnet=subnet) as r3:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r3['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id3 = agents[0]['id']
self.assertNotEqual(agent_id1, agent_id3)
class L3DvrScheduler(l3_db.L3_NAT_db_mixin,
l3_dvrscheduler_db.L3_DVRsch_db_mixin):
pass
class L3DvrSchedulerTestCase(testlib_api.SqlTestCase):
def setUp(self):
plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
self.setup_coreplugin(plugin)
super(L3DvrSchedulerTestCase, self).setUp()
self.adminContext = q_context.get_admin_context()
self.dut = L3DvrScheduler()
def test__notify_port_delete(self):
plugin = manager.NeutronManager.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
with mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value={'L3_ROUTER_NAT': l3plugin}):
kwargs = {
'context': self.adminContext,
'port': mock.ANY,
'removed_routers': [
{'agent_id': 'foo_agent', 'router_id': 'foo_id'},
],
}
l3_dvrscheduler_db._notify_port_delete(
'port', 'after_delete', plugin, **kwargs)
l3plugin.dvr_vmarp_table_update.assert_called_once_with(
self.adminContext, mock.ANY, 'del')
l3plugin.remove_router_from_l3_agent.assert_called_once_with(
self.adminContext, 'foo_agent', 'foo_id')
def test_dvr_update_router_addvm(self):
port = {
'device_id': 'abcd',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.3'
}
]
}
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
self.dut.dvr_update_router_addvm(self.adminContext, port)
def test_get_dvr_routers_by_portid(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_port', return_value=dvr_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
router_id = self.dut.get_dvr_routers_by_portid(self.adminContext,
dvr_port['id'])
self.assertEqual(router_id.pop(), r1['id'])
def test_get_subnet_ids_on_router(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(sub_ids.pop(),
dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
def test_check_ports_active_on_host_and_subnet(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
result = self.dut.check_ports_active_on_host_and_subnet(
self.adminContext,
'thisHost', 'dvr_port1',
sub_ids)
self.assertFalse(result)
def _test_dvr_serviced_port_exists_on_subnet(self, port):
with mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', return_value=[port]):
result = self.dut.check_ports_active_on_host_and_subnet(
self.adminContext,
'thisHost',
'dvr1-intf-id',
'my-subnet-id')
self.assertTrue(result)
def test_dvr_serviced_vip_port_exists_on_subnet(self):
vip_port = {
'id': 'lbaas-vip-port1',
'device_id': 'vip-pool-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_LOADBALANCER,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.1'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=vip_port)
def _create_port(self, port_name, tenant_id, host, subnet_id, ip_address,
status='ACTIVE',
device_owner='compute:nova'):
return {
'id': port_name + '-port-id',
'tenant_id': tenant_id,
'device_id': port_name,
'device_owner': device_owner,
'status': status,
'binding:host_id': host,
'fixed_ips': [
{
'subnet_id': subnet_id,
'ip_address': ip_address
}
]
}
def test_dvr_deletens_if_no_port_no_routers(self):
# Delete a vm port, the port subnet has no router interface.
vm_tenant_id = 'tenant-1'
my_context = q_context.Context('user-1', vm_tenant_id, is_admin=False)
vm_port_host = 'compute-node-1'
vm_port = self._create_port(
'deleted-vm', vm_tenant_id, vm_port_host,
'shared-subnet', '10.10.10.3',
status='INACTIVE')
vm_port_id = vm_port['id']
fakePortDB = FakePortDB([vm_port])
with contextlib.nested(
mock.patch.object(my_context, 'elevated',
return_value=self.adminContext),
mock.patch('neutron.plugins.ml2.db.'
'get_port_binding_host', return_value=vm_port_host),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', return_value=vm_port)) as (
_, mock_get_port_binding_host, _, _):
routers = self.dut.dvr_deletens_if_no_port(my_context, vm_port_id)
self.assertEqual([], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, vm_port_id)
def test_dvr_deletens_if_no_ports_no_removeable_routers(self):
# A VM port is deleted, but the router can't be unscheduled from the
# compute node because there is another VM port present.
vm_tenant_id = 'tenant-1'
my_context = q_context.Context('user-1', vm_tenant_id, is_admin=False)
shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
vm_port_host = 'compute-node-1'
dvr_port = self._create_port(
'dvr-router', 'admin-tenant', vm_port_host,
shared_subnet_id, '10.10.10.1',
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
deleted_vm_port = self._create_port(
'deleted-vm', vm_tenant_id, vm_port_host,
shared_subnet_id, '10.10.10.3',
status='INACTIVE')
deleted_vm_port_id = deleted_vm_port['id']
running_vm_port = self._create_port(
'running-vn', 'tenant-2', vm_port_host,
shared_subnet_id, '10.10.10.33')
fakePortDB = FakePortDB([running_vm_port, deleted_vm_port, dvr_port])
vm_port_binding = {
'port_id': deleted_vm_port_id,
'host': vm_port_host
}
with contextlib.nested(
mock.patch.object(my_context, 'elevated',
return_value=self.adminContext),
mock.patch('neutron.plugins.ml2.db.get_port_binding_host',
return_value=vm_port_host),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', side_effect=fakePortDB.get_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),
mock.patch('neutron.plugins.ml2.db.get_dvr_port_binding_by_host',
return_value=vm_port_binding)) as (_,
mock_get_port_binding_host, _,
mock_get_ports,
mock_get_dvr_port_binding_by_host):
routers = self.dut.dvr_deletens_if_no_port(
my_context, deleted_vm_port_id)
self.assertEqual([], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, deleted_vm_port_id)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_get_dvr_port_binding_by_host.called)
def _test_dvr_deletens_if_no_ports_delete_routers(self,
vm_tenant,
router_tenant):
class FakeAgent(object):
def __init__(self, id, host, agent_type):
self.id = id
self.host = host
self.agent_type = agent_type
my_context = q_context.Context('user-1', vm_tenant, is_admin=False)
shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
vm_port_host = 'compute-node-1'
router_id = 'dvr-router'
dvr_port = self._create_port(
router_id, router_tenant, vm_port_host,
shared_subnet_id, '10.10.10.1',
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
dvr_port_id = dvr_port['id']
deleted_vm_port = self._create_port(
'deleted-vm', vm_tenant, vm_port_host,
shared_subnet_id, '10.10.10.3',
status='INACTIVE')
deleted_vm_port_id = deleted_vm_port['id']
running_vm_port = self._create_port(
'running-vn', vm_tenant, 'compute-node-2',
shared_subnet_id, '10.10.10.33')
fakePortDB = FakePortDB([running_vm_port, dvr_port, deleted_vm_port])
dvr_port_binding = {
'port_id': dvr_port_id, 'host': vm_port_host
}
agent_id = 'l3-agent-on-compute-node-1'
l3_agent_on_vm_host = FakeAgent(agent_id,
vm_port_host,
constants.AGENT_TYPE_L3)
with contextlib.nested(
mock.patch.object(my_context, 'elevated',
return_value=self.adminContext),
mock.patch('neutron.plugins.ml2.db.get_port_binding_host',
return_value=vm_port_host),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', side_effect=fakePortDB.get_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),
mock.patch('neutron.plugins.ml2.db.get_dvr_port_binding_by_host',
return_value=dvr_port_binding),
mock.patch('neutron.db.agents_db.AgentDbMixin.'
'_get_agent_by_type_and_host',
return_value=l3_agent_on_vm_host)) as (_,
mock_get_port_binding_host, _,
mock_get_ports,
mock_get_dvr_port_binding_by_host,
mock__get_agent_by_type_and_host):
routers = self.dut.dvr_deletens_if_no_port(
my_context, deleted_vm_port_id)
expected_router = {
'router_id': router_id,
'host': vm_port_host,
'agent_id': agent_id
}
self.assertEqual([expected_router], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, deleted_vm_port_id)
self.assertTrue(mock_get_ports.called)
mock_get_dvr_port_binding_by_host.assert_called_once_with(
my_context.session, dvr_port_id, vm_port_host)
def test_dvr_deletens_if_no_ports_delete_admin_routers(self):
# test to see whether the last VM using a router created
# by the admin will be unscheduled on the compute node
self._test_dvr_deletens_if_no_ports_delete_routers(
'tenant-1', 'admin-tenant')
def test_dvr_deletens_if_no_ports_delete_tenant_routers(self):
# test to see whether the last VM using a tenant's private
# router will be unscheduled on the compute node
self._test_dvr_deletens_if_no_ports_delete_routers(
'tenant-1', 'tenant-1')
def test_dvr_serviced_dhcp_port_exists_on_subnet(self):
dhcp_port = {
'id': 'dhcp-port1',
'device_id': 'dhcp-net-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.2'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=dhcp_port)
def _prepare_schedule_snat_tests(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
return agent, router
def test_schedule_snat_router_duplicate_entry(self):
self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'bind_snat_servicenode',
side_effect=db_exc.DBDuplicateEntry()),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode')
) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr):
self.dut.schedule_snat_router(self.adminContext, 'foo', 'bar')
self.assertTrue(mock_bind_snat.called)
self.assertFalse(mock_bind_dvr.called)
def test_schedule_snat_router_return_value(self):
agent, router = self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'bind_snat_servicenode'),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode')
) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr):
mock_snat_canidates.return_value = [agent]
mock_bind_snat.return_value = [agent]
mock_bind_dvr.return_value = [agent]
chosen_agent = self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', router)
self.assertEqual(chosen_agent, [agent])
def test_schedule_router_unbind_snat_servicenode_negativetest(self):
router = {
'id': 'foo_router_id',
'distributed': True
}
with contextlib.nested(
mock.patch.object(self.dut, 'get_router'),
mock.patch.object(self.dut, 'get_snat_bindings'),
mock.patch.object(self.dut, 'unbind_snat_servicenode')
) as (mock_rd, mock_snat_bind, mock_unbind):
mock_rd.return_value = router
mock_snat_bind.return_value = False
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', router)
self.assertFalse(mock_unbind.called)
def test_schedule_snat_router_with_snat_candidates(self):
agent, router = self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(query.Query, 'first'),
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'get_router'),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode'),
mock.patch.object(self.dut, 'bind_snat_servicenode')) as (
mock_query, mock_agents,
mock_candidates, mock_rd, mock_dvr, mock_bind):
mock_rd.return_value = router
mock_query.return_value = []
mock_agents.return_value = [agent]
mock_candidates.return_value = [agent]
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', mock.ANY)
mock_bind.assert_called_once_with(
self.adminContext, 'foo_router_id', [agent])
def test_unbind_snat_servicenode(self):
router_id = 'foo_router_id'
core_plugin = mock.PropertyMock()
type(self.dut)._core_plugin = core_plugin
(self.dut._core_plugin.get_ports_on_host_by_subnet.
return_value) = []
core_plugin.reset_mock()
l3_notifier = mock.PropertyMock()
type(self.dut).l3_rpc_notifier = l3_notifier
binding = l3_dvrscheduler_db.CentralizedSnatL3AgentBinding(
router_id=router_id, l3_agent_id='foo_l3_agent_id',
l3_agent=agents_db.Agent())
with contextlib.nested(
mock.patch.object(query.Query, 'one'),
mock.patch.object(self.adminContext.session, 'delete'),
mock.patch.object(query.Query, 'delete'),
mock.patch.object(self.dut, 'get_subnet_ids_on_router')) as (
mock_query, mock_session, mock_delete, mock_get_subnets):
mock_query.return_value = binding
mock_get_subnets.return_value = ['foo_subnet_id']
self.dut.unbind_snat_servicenode(self.adminContext, router_id)
mock_get_subnets.assert_called_with(self.adminContext, router_id)
self.assertTrue(mock_session.call_count)
self.assertTrue(mock_delete.call_count)
core_plugin.assert_called_once_with()
l3_notifier.assert_called_once_with()
class L3HAPlugin(db_v2.NeutronDbPluginV2,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_hascheduler_db.L3_HA_scheduler_db_mixin):
supported_extension_aliases = ["l3-ha"]
class L3HATestCaseMixin(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
def setUp(self):
super(L3HATestCaseMixin, self).setUp()
self.adminContext = q_context.get_admin_context()
self.plugin = L3HAPlugin()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_ha_interfaces_updated').start()
cfg.CONF.set_override('max_l3_agents_per_router', 0)
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
def _create_ha_router(self, ha=True, tenant_id='tenant1'):
self.adminContext.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True}
if ha is not None:
router['ha'] = ha
return self.plugin.create_router(self.adminContext,
{'router': router})
class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3_HA_scheduler_db_mixinTestCase,
self)._register_l3_agents(plugin=plugin)
self.agent3 = self._register_l3_agent('host_3', plugin=plugin)
self.agent_id3 = self.agent3.id
self.agent4 = self._register_l3_agent('host_4', plugin=plugin)
self.agent_id4 = self.agent4.id
def test_get_ha_routers_l3_agents_count(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router1['id'])
self.plugin.schedule_router(self.adminContext, router2['id'])
self.plugin.schedule_router(self.adminContext, router3['id'])
result = self.plugin.get_ha_routers_l3_agents_count(
self.adminContext).all()
self.assertEqual(2, len(result))
self.assertIn((router1['id'], router1['tenant_id'], 4), result)
self.assertIn((router2['id'], router2['tenant_id'], 4), result)
self.assertNotIn((router3['id'], router3['tenant_id'], mock.ANY),
result)
def test_get_ordered_l3_agents_by_num_routers(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
router4 = self._create_ha_router(ha=False)
# Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will
# host 2, and agent 4 will host 3.
self.plugin.schedule_router(self.adminContext, router1['id'],
candidates=[self.agent2, self.agent4])
self.plugin.schedule_router(self.adminContext, router2['id'],
candidates=[self.agent3, self.agent4])
self.plugin.schedule_router(self.adminContext, router3['id'],
candidates=[self.agent3])
self.plugin.schedule_router(self.adminContext, router4['id'],
candidates=[self.agent4])
agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3,
self.agent_id4]
result = self.plugin.get_l3_agents_ordered_by_num_routers(
self.adminContext, agent_ids)
self.assertEqual(agent_ids, [record['id'] for record in result])
class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin):
def test_reschedule_ha_routers_from_down_agents(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
self._set_l3_agent_dead(self.agent_id1)
with mock.patch.object(self.plugin, 'reschedule_router') as reschedule:
self.plugin.reschedule_routers_from_down_agents()
self.assertFalse(reschedule.called)
def test_list_l3_agents_hosting_ha_router(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertEqual('standby', agent['ha_state'])
self.plugin.update_routers_states(
self.adminContext, {router['id']: 'active'}, self.agent1.host)
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
expected_state = ('active' if agent['host'] == self.agent1.host
else 'standby')
self.assertEqual(expected_state, agent['ha_state'])
def test_list_l3_agents_hosting_legacy_router(self):
router = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertIsNone(agent['ha_state'])
def test_get_agents_dict_for_router_unscheduled_returns_empty_list(self):
self.assertEqual({'agents': []},
self.plugin._get_agents_dict_for_router([]))
class L3HAChanceSchedulerTestCase(L3HATestCaseMixin):
def test_scheduler_with_ha_enabled(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
for agent in agents:
sync_data = self.plugin.get_ha_sync_data_for_host(
self.adminContext, router_ids=[router['id']],
host=agent.host)
self.assertEqual(1, len(sync_data))
interface = sync_data[0][constants.HA_INTERFACE_KEY]
self.assertIsNotNone(interface)
def test_auto_schedule(self):
router = self._create_ha_router()
self.plugin.auto_schedule_routers(
self.adminContext, self.agent1.host, None)
self.plugin.auto_schedule_routers(
self.adminContext, self.agent2.host, None)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']])
self.assertEqual(2, len(agents))
def test_auto_schedule_specific_router_when_agent_added(self):
self._auto_schedule_when_agent_added(True)
def test_auto_schedule_all_routers_when_agent_added(self):
self._auto_schedule_when_agent_added(False)
def _auto_schedule_when_agent_added(self, specific_router):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
agent = self._register_l3_agent('host_3')
self.agent_id3 = agent.id
routers_to_auto_schedule = [router['id']] if specific_router else []
self.plugin.auto_schedule_routers(self.adminContext,
'host_3',
routers_to_auto_schedule)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(3, len(agents))
# Simulate agent restart to make sure we don't try to re-bind
self.plugin.auto_schedule_routers(self.adminContext,
'host_3',
routers_to_auto_schedule)
def test_scheduler_with_ha_enabled_not_enough_agent(self):
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
r2 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3HALeastRoutersSchedulerTestCase,
self)._register_l3_agents(plugin=plugin)
agent = self._register_l3_agent('host_3', plugin=plugin)
self.agent_id3 = agent.id
agent = self._register_l3_agent('host_4', plugin=plugin)
self.agent_id4 = agent.id
def setUp(self):
super(L3HALeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
# disable the third agent to be sure that the router will
# be scheduled of the two firsts
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, False)
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, True)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, True)
r2 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id3, agent_ids)
self.assertIn(self.agent_id4, agent_ids)
class TestGetL3AgentsWithAgentModeFilter(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
"""Test cases to test get_l3_agents.
This class tests the L3AgentSchedulerDbMixin.get_l3_agents()
for the 'agent_mode' filter with various values.
5 l3 agents are registered in the order - legacy, dvr_snat, dvr, fake_mode
and legacy
"""
scenarios = [
('no filter',
dict(agent_modes=[],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'fake_mode', 'legacy'])),
('legacy',
dict(agent_modes=['legacy'],
expected_agent_modes=['legacy', 'legacy'])),
('dvr_snat',
dict(agent_modes=['dvr_snat'],
expected_agent_modes=['dvr_snat'])),
('dvr ',
dict(agent_modes=['dvr'],
expected_agent_modes=['dvr'])),
('legacy and dvr snat',
dict(agent_modes=['legacy', 'dvr_snat', 'legacy'],
expected_agent_modes=['legacy', 'dvr_snat', 'legacy'])),
('legacy and dvr',
dict(agent_modes=['legacy', 'dvr'],
expected_agent_modes=['legacy', 'dvr', 'legacy'])),
('dvr_snat and dvr',
dict(agent_modes=['dvr_snat', 'dvr'],
expected_agent_modes=['dvr_snat', 'dvr'])),
('legacy, dvr_snat and dvr',
dict(agent_modes=['legacy', 'dvr_snat', 'dvr'],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'legacy'])),
('invalid',
dict(agent_modes=['invalid'],
expected_agent_modes=[])),
]
def setUp(self):
super(TestGetL3AgentsWithAgentModeFilter, self).setUp()
self.plugin = L3HAPlugin()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
self.adminContext = q_context.get_admin_context()
hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5']
agent_modes = ['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy']
for host, agent_mode in zip(hosts, agent_modes):
self._register_l3_agent(host, agent_mode, self.plugin)
def _get_agent_mode(self, agent):
agent_conf = self.plugin.get_configuration_dict(agent)
return agent_conf.get('agent_mode', 'None')
def test_get_l3_agents(self):
l3_agents = self.plugin.get_l3_agents(
self.adminContext, filters={'agent_modes': self.agent_modes})
self.assertEqual(len(self.expected_agent_modes), len(l3_agents))
returned_agent_modes = [self._get_agent_mode(agent)
for agent in l3_agents]
self.assertEqual(self.expected_agent_modes, returned_agent_modes)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing images.
"""
from django.conf import settings
from django.core import validators
from django.forms import ValidationError
from django.forms.widgets import HiddenInput
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
import six
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard import policy
IMAGE_BACKEND_SETTINGS = getattr(settings, 'OPENSTACK_IMAGE_BACKEND', {})
IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS.get('image_formats', [])
class ImageURLField(forms.URLField):
default_validators = [validators.URLValidator(schemes=["http", "https"])]
def create_image_metadata(data):
"""Use the given dict of image form data to generate the metadata used for
creating the image in glance.
"""
# Glance does not really do anything with container_format at the
# moment. It requires it is set to the same disk_format for the three
# Amazon image types, otherwise it just treats them as 'bare.' As such
# we will just set that to be that here instead of bothering the user
# with asking them for information we can already determine.
disk_format = data['disk_format']
if disk_format in ('ami', 'aki', 'ari',):
container_format = disk_format
elif disk_format == 'docker':
# To support docker containers we allow the user to specify
# 'docker' as the format. In that case we really want to use
# 'raw' as the disk format and 'docker' as the container format.
disk_format = 'raw'
container_format = 'docker'
elif disk_format == 'ova':
# If the user wishes to upload an OVA using Horizon, then
# 'ova' must be the container format and 'vmdk' must be the disk
# format.
container_format = 'ova'
disk_format = 'vmdk'
else:
container_format = 'bare'
meta = {'protected': data['protected'],
'disk_format': disk_format,
'container_format': container_format,
'min_disk': (data['minimum_disk'] or 0),
'min_ram': (data['minimum_ram'] or 0),
'name': data['name']}
is_public = data.get('is_public', data.get('public', False))
properties = {}
# NOTE(tsufiev): in V2 the way how empty non-base attributes (AKA metadata)
# are handled has changed: in V2 empty metadata is kept in image
# properties, while in V1 they were omitted. Skip empty description (which
# is metadata) to keep the same behavior between V1 and V2
if data.get('description'):
properties['description'] = data['description']
if data.get('kernel'):
properties['kernel_id'] = data['kernel']
if data.get('ramdisk'):
properties['ramdisk_id'] = data['ramdisk']
if data.get('architecture'):
properties['architecture'] = data['architecture']
if api.glance.VERSIONS.active < 2:
meta.update({'is_public': is_public, 'properties': properties})
else:
meta['visibility'] = 'public' if is_public else 'private'
meta.update(properties)
return meta
if api.glance.get_image_upload_mode() == 'direct':
FileField = forms.ExternalFileField
CreateParent = six.with_metaclass(forms.ExternalUploadMeta,
forms.SelfHandlingForm)
else:
FileField = forms.FileField
CreateParent = forms.SelfHandlingForm
class CreateImageForm(CreateParent):
name = forms.CharField(max_length=255, label=_("Name"))
description = forms.CharField(
max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
source_type = forms.ChoiceField(
label=_('Image Source'),
required=False,
choices=[('url', _('Image Location')),
('file', _('Image File'))],
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'data-slug': 'source'}))
image_url_attrs = {
'class': 'switched',
'data-switch-on': 'source',
'data-source-url': _('Image Location'),
'ng-model': 'ctrl.copyFrom',
'ng-change': 'ctrl.selectImageFormat(ctrl.copyFrom)',
'placeholder': 'http://example.com/image.img'
}
image_url = ImageURLField(label=_("Image Location"),
help_text=_("An external (HTTP/HTTPS) URL to "
"load the image from."),
widget=forms.TextInput(attrs=image_url_attrs),
required=False)
image_attrs = {
'class': 'switched',
'data-switch-on': 'source',
'data-source-file': _('Image File'),
'ng-model': 'ctrl.imageFile',
'ng-change': 'ctrl.selectImageFormat(ctrl.imageFile.name)',
'image-file-on-change': None
}
image_file = FileField(label=_("Image File"),
help_text=_("A local image to upload."),
widget=forms.FileInput(attrs=image_attrs),
required=False)
kernel = forms.ChoiceField(
label=_('Kernel'),
required=False,
widget=forms.ThemableSelectWidget(
transform=lambda x: "%s (%s)" % (
x.name, defaultfilters.filesizeformat(x.size))))
ramdisk = forms.ChoiceField(
label=_('Ramdisk'),
required=False,
widget=forms.ThemableSelectWidget(
transform=lambda x: "%s (%s)" % (
x.name, defaultfilters.filesizeformat(x.size))))
disk_format = forms.ChoiceField(label=_('Format'),
choices=[],
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'ng-model': 'ctrl.diskFormat'}))
architecture = forms.CharField(
max_length=255,
label=_("Architecture"),
help_text=_('CPU architecture of the image.'),
required=False)
minimum_disk = forms.IntegerField(
label=_("Minimum Disk (GB)"),
min_value=0,
help_text=_('The minimum disk size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
minimum_ram = forms.IntegerField(
label=_("Minimum RAM (MB)"),
min_value=0,
help_text=_('The minimum memory size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
is_copying = forms.BooleanField(
label=_("Copy Data"), initial=True, required=False,
help_text=_('Specify this option to copy image data to the image '
'service. If unspecified, image data will be used in its '
'current location.'),
widget=forms.CheckboxInput(attrs={
'class': 'switched',
'data-source-url': _('Image Location'),
'data-switch-on': 'source'}))
is_public = forms.BooleanField(
label=_("Public"),
help_text=_('Make the image visible across projects.'),
required=False)
protected = forms.BooleanField(
label=_("Protected"),
help_text=_('Prevent the deletion of the image.'),
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateImageForm, self).__init__(request, *args, **kwargs)
if (api.glance.get_image_upload_mode() == 'off' or
not policy.check((("image", "upload_image"),), request)):
self._hide_file_source_type()
if not policy.check((("image", "set_image_location"),), request):
self._hide_url_source_type()
# GlanceV2 feature removals
if api.glance.VERSIONS.active >= 2:
# NOTE: GlanceV2 doesn't support copy-from feature, sorry!
self._hide_is_copying()
if not getattr(settings, 'IMAGES_ALLOW_LOCATION', False):
self._hide_url_source_type()
if (api.glance.get_image_upload_mode() == 'off' or not
policy.check((("image", "upload_image"),), request)):
# Neither setting a location nor uploading image data is
# allowed, so throw an error.
msg = _('The current Horizon settings indicate no valid '
'image creation methods are available. Providing '
'an image location and/or uploading from the '
'local file system must be allowed to support '
'image creation.')
messages.error(request, msg)
raise ValidationError(msg)
if not policy.check((("image", "publicize_image"),), request):
self._hide_is_public()
self.fields['disk_format'].choices = IMAGE_FORMAT_CHOICES
try:
kernel_images = api.glance.image_list_detailed(
request, filters={'disk_format': 'aki'})[0]
except Exception:
kernel_images = []
msg = _('Unable to retrieve image list.')
messages.error(request, msg)
if kernel_images:
choices = [('', _("Choose an image"))]
for image in kernel_images:
choices.append((image.id, image))
self.fields['kernel'].choices = choices
else:
del self.fields['kernel']
try:
ramdisk_images = api.glance.image_list_detailed(
request, filters={'disk_format': 'ari'})[0]
except Exception:
ramdisk_images = []
msg = _('Unable to retrieve image list.')
messages.error(request, msg)
if ramdisk_images:
choices = [('', _("Choose an image"))]
for image in ramdisk_images:
choices.append((image.id, image))
self.fields['ramdisk'].choices = choices
else:
del self.fields['ramdisk']
def _hide_file_source_type(self):
self.fields['image_file'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'file']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_url_source_type(self):
self.fields['image_url'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'url']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_is_public(self):
self.fields['is_public'].widget = HiddenInput()
self.fields['is_public'].initial = False
def _hide_is_copying(self):
self.fields['is_copying'].widget = HiddenInput()
self.fields['is_copying'].initial = False
def clean(self):
data = super(CreateImageForm, self).clean()
# The image_file key can be missing based on particular upload
# conditions. Code defensively for it here...
source_type = data.get('source_type', None)
image_file = data.get('image_file', None)
image_url = data.get('image_url', None)
if not image_url and not image_file:
msg = _("An image file or an external location must be specified.")
if source_type == 'file':
raise ValidationError({'image_file': [msg, ]})
else:
raise ValidationError({'image_url': [msg, ]})
else:
return data
def handle(self, request, data):
meta = create_image_metadata(data)
# Add image source file or URL to metadata
if (api.glance.get_image_upload_mode() != 'off' and
policy.check((("image", "upload_image"),), request) and
data.get('image_file', None)):
meta['data'] = data['image_file']
elif data.get('is_copying'):
meta['copy_from'] = data['image_url']
else:
meta['location'] = data['image_url']
try:
image = api.glance.image_create(request, **meta)
messages.info(request,
_('Your image %s has been queued for creation.') %
meta['name'])
return image
except Exception as e:
msg = _('Unable to create new image')
# TODO(nikunj2512): Fix this once it is fixed in glance client
if hasattr(e, 'code') and e.code == 400:
if "Invalid disk format" in e.details:
msg = _('Unable to create new image: Invalid disk format '
'%s for image.') % meta['disk_format']
elif "Image name too long" in e.details:
msg = _('Unable to create new image: Image name too long.')
elif "not supported" in e.details:
msg = _('Unable to create new image: URL scheme not '
'supported.')
exceptions.handle(request, msg)
return False
class UpdateImageForm(forms.SelfHandlingForm):
image_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("Name"))
description = forms.CharField(
max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
kernel = forms.CharField(
max_length=36,
label=_("Kernel ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
ramdisk = forms.CharField(
max_length=36,
label=_("Ramdisk ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
architecture = forms.CharField(
label=_("Architecture"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
disk_format = forms.ThemableChoiceField(
label=_("Format"),
)
minimum_disk = forms.IntegerField(label=_("Minimum Disk (GB)"),
min_value=0,
help_text=_('The minimum disk size'
' required to boot the'
' image. If unspecified,'
' this value defaults to'
' 0 (no minimum).'),
required=False)
minimum_ram = forms.IntegerField(label=_("Minimum RAM (MB)"),
min_value=0,
help_text=_('The minimum memory size'
' required to boot the'
' image. If unspecified,'
' this value defaults to'
' 0 (no minimum).'),
required=False)
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super(UpdateImageForm, self).__init__(request, *args, **kwargs)
self.fields['disk_format'].choices = [(value, name) for value,
name in IMAGE_FORMAT_CHOICES
if value]
if not policy.check((("image", "publicize_image"),), request):
self.fields['public'].widget = forms.CheckboxInput(
attrs={'readonly': 'readonly', 'disabled': 'disabled'})
self.fields['public'].help_text = _(
'Non admin users are not allowed to make images public.')
def handle(self, request, data):
image_id = data['image_id']
error_updating = _('Unable to update image "%s".')
meta = create_image_metadata(data)
try:
image = api.glance.image_update(request, image_id, **meta)
messages.success(request, _('Image was successfully updated.'))
return image
except Exception:
exceptions.handle(request, error_updating % image_id)
|
|
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tool to test sigma rules.
This tool can be used to verify your rules before running an analyzer.
It also does not require you to have a full blown Timesketch instance.
Default this tool will show only the rules that cause problems.
Example way of running the tool:
$ PYTHONPATH=. python3 test_tools/sigma_verify_rules.py --config_file
data/sigma_config.yaml --debug data/sigma/rules/windows/
--move data/sigma/rules/problematic/
"""
import logging
import os
import argparse
import sys
import pandas as pd
from timesketch.lib import sigma_util # pylint: disable=no-name-in-module
logger = logging.getLogger('timesketch.test_tool.sigma-verify')
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO'))
def get_sigma_blocklist(blocklist_path='./data/sigma_blocklist.csv'):
"""Get a dataframe of sigma rules to ignore.
This includes filenames, paths, ids.
Args:
blocklist_path(str): Path to a blocklist file.
The default value is './data/sigma_blocklist.csv'
Returns:
Pandas dataframe with blocklist
Raises:
ValueError: Sigma blocklist file is not readabale.
"""
if blocklist_path is None or blocklist_path == '':
blocklist_path = './data/sigma_blocklist.csv'
if not blocklist_path:
raise ValueError('No blocklist_file_path set via param or config file')
if not os.path.isfile(blocklist_path):
raise ValueError(
'Unable to open file: [{0:s}], it does not exist.'.format(
blocklist_path))
if not os.access(blocklist_path, os.R_OK):
raise ValueError(
'Unable to open file: [{0:s}], cannot open it for '
'read, please check permissions.'.format(blocklist_path))
return pd.read_csv(blocklist_path)
def run_verifier(rules_path, config_file_path, blocklist_path=None):
"""Run an sigma parsing test on a dir and returns results from the run.
Args:
rules_path (str): Path to the Sigma rules.
config_file_path (str): Path to a config file with Sigma mapping data.
blocklist_path (str): Optional path to a blocklist file.
The default value is none.
Raises:
IOError: if the path to either test or analyzer file does not exist
or if the analyzer module or class cannot be loaded.
Returns:
a tuple of lists:
- sigma_verified_rules with rules that can be added
- sigma_rules_with_problems with rules that should not be added
"""
if not config_file_path:
raise IOError('No config_file_path given')
if not os.path.isdir(rules_path):
raise IOError('Rules not found at path: {0:s}'.format(
rules_path))
if not os.path.isfile(config_file_path):
raise IOError('Config file path not found at path: {0:s}'.format(
config_file_path))
sigma_config = sigma_util.get_sigma_config_file(
config_file=config_file_path)
return_verified_rules = []
return_rules_with_problems = []
ignore = get_sigma_blocklist(blocklist_path)
ignore_list = list(ignore['path'].unique())
for dirpath, dirnames, files in os.walk(rules_path):
if 'deprecated' in [x.lower() for x in dirnames]:
dirnames.remove('deprecated')
for rule_filename in files:
if rule_filename.lower().endswith('.yml'):
# if a sub dir is found, do not try to parse it.
if os.path.isdir(os.path.join(dirpath, rule_filename)):
continue
rule_file_path = os.path.join(dirpath, rule_filename)
block_because_csv = False
if any(x in rule_file_path for x in ignore_list):
return_rules_with_problems.append(rule_file_path)
block_because_csv = True
if block_because_csv:
continue
try:
parsed_rule = sigma_util.get_sigma_rule(
rule_file_path, sigma_config)
print(parsed_rule)
# This except is to keep the unknown exceptions
# this function is made to catch them and document
# them the broad exception is needed
except Exception: # pylint: disable=broad-except
logger.debug('Rule parsing error', exc_info=True)
return_rules_with_problems.append(rule_file_path)
if parsed_rule:
return_verified_rules.append(rule_file_path)
else:
return_rules_with_problems.append(rule_file_path)
return return_verified_rules, return_rules_with_problems
def move_problematic_rule(filepath, move_to_path, reason=None):
"""Moves a problematic rule to a subfolder so it is not used again
Args:
filepath: path to the sigma rule that caused problems
move_to_path: path to move the problematic rules to
reason: optional reason why file is moved
"""
logging.info('Moving the rule: {0:s} to {1:s}'.format(
filepath, move_to_path))
try:
os.makedirs(move_to_path, exist_ok=True)
debug_path = os.path.join(move_to_path, 'debug.log')
with open(debug_path, 'a') as file_objec:
file_objec.write(f'{filepath}\n{reason}\n\n')
base_path = os.path.basename(filepath)
logging.info('Moving the rule: {0:s} to {1:s}'.format(
filepath, f'{move_to_path}{base_path}'))
os.rename(filepath, os.path.join(move_to_path, base_path))
except OSError:
logger.error('OS Error - rule not moved', exc_info=True)
if __name__ == '__main__':
description = (
'Mock an sigma parser run. This tool is intended for developers '
'of sigma rules as well as Timesketch server admins. '
'The tool can also be used for automatic testing to make sure the '
'rules are still working as intended.')
epilog = (
'Remember to feed the tool with proper rule data.'
)
arguments = argparse.ArgumentParser(
description=description, allow_abbrev=True)
arguments.add_argument(
'--config_file', '--file', dest='config_file_path', action='store',
default='', type=str, metavar='PATH_TO_TEST_FILE', help=(
'Path to the file containing the config data to feed sigma '
))
arguments.add_argument(
'--blocklist_file', dest='blocklist_file_path', action='store',
default='', type=str, metavar='PATH_TO_BLOCK_FILE', help=(
'Path to the file containing the blocklist '
))
arguments.add_argument(
'rules_path', action='store', default='', type=str,
metavar='PATH_TO_RULES', help='Path to the rules to test.')
arguments.add_argument(
'--debug', action='store_true', help='print debug messages ')
arguments.add_argument(
'--info', action='store_true', help='print info messages ')
arguments.add_argument(
'--move', dest='move_to_path', action='store',
default='', type=str, help=(
'Move problematic rules to this path'
))
try:
options = arguments.parse_args()
except UnicodeEncodeError:
print(arguments.format_help())
sys.exit(1)
if options.debug:
logger.setLevel(logging.DEBUG)
if options.info:
logger.setLevel(logging.INFO)
if not os.path.isfile(options.config_file_path):
print('Config file not found.')
sys.exit(1)
if not os.path.isdir(options.rules_path):
print('The path to the rules does not exist ({0:s})'.format(
options.rules_path))
sys.exit(1)
if len(options.blocklist_file_path) > 0:
if not os.path.isfile(options.blocklist_file_path):
print('Blocklist file not found.')
sys.exit(1)
sigma_verified_rules, sigma_rules_with_problems = run_verifier(
rules_path=options.rules_path,
config_file_path=options.config_file_path,
blocklist_path=options.blocklist_file_path)
if len(sigma_rules_with_problems) > 0:
print('### Do NOT import below.###')
for badrule in sigma_rules_with_problems:
if options.move_to_path:
move_problematic_rule(
badrule, options.move_to_path,
'sigma_verify_rules.py found an issue')
print(badrule)
if len(sigma_verified_rules) > 0:
logging.info('### You can import the following rules ###')
for goodrule in sigma_verified_rules:
logging.info(goodrule)
|
|
import sublime
import sublime_plugin
import fnmatch
import os
import shutil
import sys
import threading
import time
try:
from . import tools
except ValueError:
from package_sync_helpers import tools
class Queue(object):
current = None
pool = []
def __init__(self):
pass
def start(self):
# Clear old thread
if self.current and self.current["thread"].is_alive():
sublime.set_timeout(lambda: self.start(), 500)
else:
# Reset current thread, since it ended
self.current = None
# Check for elements in pool
if self.pool:
self.current = self.pool.pop(0)
self.current["thread"].start()
# Attemp a new start of the thread
sublime.set_timeout(lambda: self.start(), 500)
def has(self, key):
pool = self.pool + [self.current] if self.current else []
return any([item for item in pool if item["key"] == key])
def add(self, thread, key=None):
self.pool += [{"key": key if key else thread.name, "thread": thread}]
self.start()
class Sync(threading.Thread):
def __init__(self, mode=["pull", "push"], override=False, item=None):
psync_settings = tools.get_psync_settings()
self.psync_settings = psync_settings
self.mode = mode
self.item = item
self.override = override
threading.Thread.__init__(self)
def run(self):
sync_interval = self.psync_settings.get("online_sync_interval", 1)
# Stop watcher and wait for the poll
tools.pause_watcher(
local="pull" in self.mode, remote="push" in self.mode)
# If no item pull and push all
if not self.item:
tools.log("PackageSync: Complete sync started.", force=True)
# Fetch all items from the remote location
if "pull" in self.mode:
self.pull_all()
# Push all items to the remote location
if "push" in self.mode:
self.push_all()
tools.log("PackageSync: Complete sync done.", force=True)
else:
# Pull the selected item
if "pull" in self.mode:
self.pull(self.item)
# Push the selected item
if "push" in self.mode:
self.push(self.item)
# Restart watcher again
tools.pause_watcher(
False, local="pull" in self.mode, remote="push" in self.mode)
def find_files(self, path):
tools.log("PackageSync: find_files started for %s" % path)
include_files = self.psync_settings["include_files"]
ignore_files = self.psync_settings["ignore_files"]
ignore_dirs = self.psync_settings["ignore_dirs"]
# tools.log("PackageSync: path %s" % path)
# tools.log("PackageSync: include_files %s" % include_files)
# tools.log("PackageSync: ignore_files %s" % ignore_files)
# tools.log("PackageSync: ignore_dirs %s" % ignore_dirs)
resources = {}
for root, dirs, files in os.walk(path):
[dirs.remove(dir)
for dir in dirs if dir in ignore_dirs]
for file in files:
absolute_path = os.path.join(root, file)
relative_path = os.path.relpath(absolute_path, path)
include_matches = [
fnmatch.fnmatch(relative_path, p) for p in include_files]
ignore_matches = [
fnmatch.fnmatch(relative_path, p) for p in ignore_files]
if any(ignore_matches) or not any(include_matches):
continue
resources[relative_path] = {"version": os.path.getmtime(
absolute_path), "path": absolute_path, "dir": os.path.dirname(relative_path)}
return resources
def pull_all(self):
tools.log("PackageSync: pull_all started with override = %s" %
self.override)
local_dir = os.path.join(sublime.packages_path(), "User")
remote_dir = self.psync_settings["online_sync_folder"]
local_data = self.find_files(local_dir)
remote_data = self.find_files(remote_dir)
# Get data of last sync
last_run_data = tools.load_last_run_data()
last_run_data_local = last_run_data.get("last_run_data_local", {})
last_run_data_remote = last_run_data.get("last_run_data_remote", {})
deleted_local_data = [
key for key in last_run_data_local if key not in local_data]
deleted_remote_data = [
key for key in last_run_data_remote if key not in remote_data]
# tools.log("PackageSync: local_data: %s" % local_data)
# tools.log("PackageSync: remote_data: %s" % remote_data)
# tools.log("PackageSync: deleted_local_data: %s" % deleted_local_data)
# tools.log("PackageSync: deleted_remote_data: %s" % deleted_remote_data)
diff = [{"type": "d", "key": key}
for key in last_run_data_remote if key not in remote_data]
for key, value in remote_data.items():
if key in deleted_local_data:
pass
elif key not in local_data:
diff += [dict({"type": "c", "key": key}, **value)]
elif int(value["version"]) > int(local_data[key]["version"]) or self.override:
diff += [dict({"type": "m", "key": key}, **value)]
for item in diff:
self.pull(item)
# Set data for next last sync
tools.save_last_run_data(
last_run_data_local=self.find_files(local_dir),
last_run_data_remote=self.find_files(remote_dir))
def pull(self, item):
tools.log("PackageSync: pull started for %s" % item)
local_dir = os.path.join(sublime.packages_path(), "User")
remote_dir = self.psync_settings.get("sync_folder")
# Get data of last sync
last_run_data = tools.load_last_run_data()
last_run_data_local = last_run_data.get("last_run_data_local", {})
last_run_data_remote = last_run_data.get("last_run_data_remote", {})
# Make target file path and directory
target = os.path.join(local_dir, item["key"])
target_dir = os.path.dirname(target)
# TODO -- Added for error mitigation but theoretically this was not needed
# Verify why the error is occuring for these variables
try:
previous_installed_packages
installed_packages
except NameError:
previous_installed_packages = []
installed_packages = []
# Skip if file was just pushed
try:
if item["type"] == "c" or item["type"] == "m":
# Check for an updated Package Control setting file and backup
# old file
if item["key"] == "Package Control.sublime-settings":
previous_installed_packages = tools.load_installed_packages(
target)
installed_packages = tools.load_installed_packages(
item["path"])
# Check if the watcher detects a file again
if last_run_data_local[item["key"]]["version"] == item["version"]:
# tools.log("PackageSync: Already pulled")
return
except:
pass
# If a file was created
if item["type"] == "c":
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
shutil.copy2(item["path"], target)
tools.log("PackageSync: Created %s" % target)
#
last_run_data_local[item["key"]] = {
"path": target, "dir": item["dir"], "version": item["version"]}
last_run_data_remote[item["key"]] = {
"path": item["path"], "dir": item["dir"], "version": item["version"]}
# If a file was delated
elif item["type"] == "d":
if os.path.isfile(target):
os.remove(target)
tools.log("PackageSync: Deleted %s" % target)
try:
del last_run_data_local[item["key"]]
del last_run_data_remote[item["key"]]
except:
pass
# Check if directory is empty and remove it if, just cosmetic issue
if os.path.isdir(target_dir) and not os.listdir(target_dir):
os.rmdir(target_dir)
# If a file was modified
elif item["type"] == "m":
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
shutil.copy2(item["path"], target)
tools.log("PackageSync: Updated %s" % target)
#
last_run_data_local[item["key"]] = {
"path": target, "dir": item["dir"], "version": item["version"]}
last_run_data_remote[item["key"]] = {
"path": item["path"], "dir": item["dir"], "version": item["version"]}
# Set data for next last sync
tools.save_last_run_data(
last_run_data_local=last_run_data_local,
last_run_data_remote=last_run_data_remote)
if item["type"] != "d" and item["key"] == "Package Control.sublime-settings":
# Handle Package Control
self.pull_package_control(
last_run_data, previous_installed_packages, installed_packages)
def pull_package_control(self, last_run_data, previous_installed_packages, installed_packages):
# Save items to remove
to_install = [
item for item in installed_packages if item not in previous_installed_packages]
to_remove = [
item for item in previous_installed_packages if item not in installed_packages]
tools.log("PackageSync: install: %s", to_install)
tools.log("PackageSync: remove: %s", to_remove)
# Check for old remove_packages
packages_to_remove = last_run_data.get("packages_to_remove", [])
packages_to_remove += [item for item in to_remove if item !=
"Package Control" and item not in packages_to_remove]
tools.log("PackageSync: packages_to_remove %s", packages_to_remove)
if packages_to_remove:
removed_packages = tools.remove_packages(packages_to_remove)
else:
removed_packages = []
# Check if new packages are available and run package cleanup to
# install missing packages
if to_install:
sublime.set_timeout(tools.install_new_packages(), 1000)
tools.save_last_run_data(
packages_to_remove=[item for item in packages_to_remove if item not in removed_packages])
def push_all(self):
tools.log("PackageSync: push_all started with override = %s" %
self.override)
local_dir = os.path.join(sublime.packages_path(), "User")
remote_dir = self.psync_settings.get("online_sync_folder")
local_data = self.find_files(local_dir)
remote_data = self.find_files(remote_dir)
# Get data of last sync
last_run_data = tools.load_last_run_data()
last_run_data_local = last_run_data.get("last_run_data_local", {})
last_run_data_remote = last_run_data.get("last_run_data_remote", {})
deleted_local_data = [
key for key in last_run_data_local if key not in local_data]
deleted_remote_data = [
key for key in last_run_data_remote if key not in remote_data]
# tools.log("PackageSync: local_data: %s" % local_data)
# tools.log("PackageSync: remote_data: %s" % remote_data)
# tools.log("PackageSync: deleted_local_data: %s" % deleted_local_data)
# tools.log("PackageSync: deleted_remote_data: %s" % deleted_remote_data)
diff = [{"type": "d", "key": key}
for key in last_run_data_local if key not in local_data]
for key, value in local_data.items():
if key in deleted_remote_data:
pass
elif key not in remote_data:
diff += [dict({"type": "c", "key": key}, **value)]
elif int(value["version"]) > int(remote_data[key]["version"]) or self.override:
diff += [dict({"type": "m", "key": key}, **value)]
for item in diff:
self.push(item)
# Set data for next last sync
tools.save_last_run_data(
last_run_data_local=self.find_files(local_dir),
last_run_data_remote=self.find_files(remote_dir))
def push(self, item):
tools.log("PackageSync: push started for %s" % item)
local_dir = os.path.join(sublime.packages_path(), "User")
remote_dir = self.psync_settings.get("online_sync_folder")
# Get data of last sync
last_run_data = tools.load_last_run_data()
last_run_data_local = last_run_data.get("last_run_data_local", {})
last_run_data_remote = last_run_data.get("last_run_data_remote", {})
# Skip if file was just copied
try:
if item["type"] == "c" or item["type"] == "m":
if last_run_data_remote[item["key"]]["version"] == item["version"]:
tools.log("PackageSync: Already pushed")
return
except:
pass
# Make target file path and dir
target = os.path.join(remote_dir, item["key"])
target_dir = os.path.dirname(target)
if item["type"] == "c":
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
shutil.copy2(item["path"], target)
tools.log("PackageSync: Created %s" % target)
#
last_run_data_local[item["key"]] = {
"path": item["path"], "dir": item["dir"], "version": item["version"]}
last_run_data_remote[item["key"]] = {
"path": target, "dir": item["dir"], "version": item["version"]}
elif item["type"] == "d":
if os.path.isfile(target):
os.remove(target)
tools.log("PackageSync: Deleted %s" % target)
try:
del last_run_data_local[item["key"]]
del last_run_data_remote[item["key"]]
except:
pass
# Check if dir is empty and remove it if
if os.path.isdir(target_dir) and not os.listdir(target_dir):
os.rmdir(target_dir)
elif item["type"] == "m":
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
shutil.copy2(item["path"], target)
tools.log("PackageSync: Updated %s" % target)
#
last_run_data_local[item["key"]] = {
"path": item["path"], "dir": item["dir"], "version": item["version"]}
last_run_data_remote[item["key"]] = {
"path": target, "dir": item["dir"], "version": item["version"]}
# Set data for next last sync
tools.save_last_run_data(
last_run_data_local=last_run_data_local,
last_run_data_remote=last_run_data_remote)
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A lightweight wrapper around MySQLdb.
Originally part of the Tornado framework. The tornado.database module
is slated for removal in Tornado 3.0, and it is now available separately
as torndb.
"""
from __future__ import absolute_import, division, with_statement
import copy
import itertools
import logging
import os
import time
try:
import MySQLdb.constants
import MySQLdb.converters
import MySQLdb.cursors
except ImportError:
# If MySQLdb isn't available this module won't actually be useable,
# but we want it to at least be importable on readthedocs.org,
# which has limitations on third-party modules.
if 'READTHEDOCS' in os.environ:
MySQLdb = None
else:
raise
version = "0.3"
version_info = (0, 3, 0, 0)
class Connection(object):
"""A lightweight wrapper around MySQLdb DB-API connections.
The main value we provide is wrapping rows in a dict/object so that
columns can be accessed by name. Typical usage::
db = torndb.Connection("localhost", "mydatabase")
for article in db.query("SELECT * FROM articles"):
print article.title
Cursors are hidden by the implementation, but other than that, the methods
are very similar to the DB-API.
We explicitly set the timezone to UTC and assume the character encoding to
UTF-8 (can be changed) on all connections to avoid time zone and encoding errors.
The sql_mode parameter is set by default to "traditional", which "gives an error instead of a warning"
(http://dev.mysql.com/doc/refman/5.0/en/server-sql-mode.html). However, it can be set to
any other mode including blank (None) thereby explicitly clearing the SQL mode.
"""
def __init__(self, host, database, user=None, password=None,
max_idle_time=7 * 3600, connect_timeout=0,
time_zone="+0:00", charset = "utf8", sql_mode="TRADITIONAL"):
self.host = host
self.database = database
self.max_idle_time = float(max_idle_time)
args = dict(conv=CONVERSIONS, use_unicode=True, charset=charset,
db=database, init_command=('SET time_zone = "%s"' % time_zone),
connect_timeout=connect_timeout, sql_mode=sql_mode)
if user is not None:
args["user"] = user
if password is not None:
args["passwd"] = password
# We accept a path to a MySQL socket file or a host(:port) string
if "/" in host:
args["unix_socket"] = host
else:
self.socket = None
pair = host.split(":")
if len(pair) == 2:
args["host"] = pair[0]
args["port"] = int(pair[1])
else:
args["host"] = host
args["port"] = 3306
self._db = None
self._db_args = args
self._last_use_time = time.time()
try:
self.reconnect()
except Exception:
logging.error("Cannot connect to MySQL on %s", self.host,
exc_info=True)
def __del__(self):
self.close()
def close(self):
"""Closes this database connection."""
if getattr(self, "_db", None) is not None:
self._db.close()
self._db = None
def reconnect(self):
"""Closes the existing database connection and re-opens it."""
self.close()
self._db = MySQLdb.connect(**self._db_args)
self._db.autocommit(True)
def iter(self, query, *parameters, **kwparameters):
"""Returns an iterator for the given query and parameters."""
self._ensure_connected()
cursor = MySQLdb.cursors.SSCursor(self._db)
try:
self._execute(cursor, query, parameters, kwparameters)
column_names = [d[0] for d in cursor.description]
for row in cursor:
yield Row(zip(column_names, row))
finally:
cursor.close()
def query(self, query, *parameters, **kwparameters):
"""Returns a row list for the given query and parameters."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters, kwparameters)
column_names = [d[0] for d in cursor.description]
return [Row(itertools.izip(column_names, row)) for row in cursor]
finally:
cursor.close()
def get(self, query, *parameters, **kwparameters):
"""Returns the (singular) row returned by the given query.
If the query has no results, returns None. If it has
more than one result, raises an exception.
"""
rows = self.query(query, *parameters, **kwparameters)
if not rows:
return None
elif len(rows) > 1:
raise Exception("Multiple rows returned for Database.get() query")
else:
return rows[0]
# rowcount is a more reasonable default return value than lastrowid,
# but for historical compatibility execute() must return lastrowid.
def execute(self, query, *parameters, **kwparameters):
"""Executes the given query, returning the lastrowid from the query."""
return self.execute_lastrowid(query, *parameters, **kwparameters)
def execute_lastrowid(self, query, *parameters, **kwparameters):
"""Executes the given query, returning the lastrowid from the query."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters, kwparameters)
return cursor.lastrowid
finally:
cursor.close()
def execute_rowcount(self, query, *parameters, **kwparameters):
"""Executes the given query, returning the rowcount from the query."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters, kwparameters)
return cursor.rowcount
finally:
cursor.close()
def executemany(self, query, parameters):
"""Executes the given query against all the given param sequences.
We return the lastrowid from the query.
"""
return self.executemany_lastrowid(query, parameters)
def executemany_lastrowid(self, query, parameters):
"""Executes the given query against all the given param sequences.
We return the lastrowid from the query.
"""
cursor = self._cursor()
try:
cursor.executemany(query, parameters)
return cursor.lastrowid
finally:
cursor.close()
def executemany_rowcount(self, query, parameters):
"""Executes the given query against all the given param sequences.
We return the rowcount from the query.
"""
cursor = self._cursor()
try:
cursor.executemany(query, parameters)
return cursor.rowcount
finally:
cursor.close()
update = execute_rowcount
updatemany = executemany_rowcount
insert = execute_lastrowid
insertmany = executemany_lastrowid
def _ensure_connected(self):
# Mysql by default closes client connections that are idle for
# 8 hours, but the client library does not report this fact until
# you try to perform a query and it fails. Protect against this
# case by preemptively closing and reopening the connection
# if it has been idle for too long (7 hours by default).
if (self._db is None or
(time.time() - self._last_use_time > self.max_idle_time)):
self.reconnect()
self._last_use_time = time.time()
def _cursor(self):
self._ensure_connected()
return self._db.cursor()
def _execute(self, cursor, query, parameters, kwparameters):
try:
return cursor.execute(query, kwparameters or parameters)
except OperationalError:
logging.error("Error connecting to MySQL on %s", self.host)
self.close()
raise
class Row(dict):
"""A dict that allows for object-like property access syntax."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
if MySQLdb is not None:
# Fix the access conversions to properly recognize unicode/binary
FIELD_TYPE = MySQLdb.constants.FIELD_TYPE
FLAG = MySQLdb.constants.FLAG
CONVERSIONS = copy.copy(MySQLdb.converters.conversions)
field_types = [FIELD_TYPE.BLOB, FIELD_TYPE.STRING, FIELD_TYPE.VAR_STRING]
if 'VARCHAR' in vars(FIELD_TYPE):
field_types.append(FIELD_TYPE.VARCHAR)
for field_type in field_types:
CONVERSIONS[field_type] = [(FLAG.BINARY, str)] + CONVERSIONS[field_type]
# Alias some common MySQL exceptions
IntegrityError = MySQLdb.IntegrityError
OperationalError = MySQLdb.OperationalError
|
|
# -*- coding: utf-8 -*-
import unittest
import os # noqa: F401
import time
import hashlib
import inspect
import requests
import shutil
import zipfile
import glob
from datetime import datetime
from string import Template
from os import environ
try:
from ConfigParser import ConfigParser # py2
except BaseException:
from configparser import ConfigParser # py3
from pprint import pprint # noqa: F401
from biokbase.workspace.client import Workspace as workspaceService
from GenomeFileUtil.GenomeFileUtilClient import GenomeFileUtil
from ReadsUtils.ReadsUtilsClient import ReadsUtils
from ReadsAlignmentUtils.ReadsAlignmentUtilsClient import ReadsAlignmentUtils
from DataFileUtil.DataFileUtilClient import DataFileUtil
from DifferentialExpressionUtils.DifferentialExpressionUtilsClient import DifferentialExpressionUtils
from kb_cufflinks.kb_cufflinksImpl import kb_cufflinks
from kb_cufflinks.kb_cufflinksServer import MethodContext
from kb_cufflinks.authclient import KBaseAuth as _KBaseAuth
from kb_stringtie.kb_stringtieClient import kb_stringtie
class CuffdiffTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_cufflinks'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
# authServiceUrlAllowInsecure = cls.cfg['auth_service_url_allow_insecure']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'kb_cufflinks',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL)
cls.serviceImpl = kb_cufflinks(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
cls.gfu = GenomeFileUtil(cls.callback_url)
cls.dfu = DataFileUtil(cls.callback_url)
cls.ru = ReadsUtils(cls.callback_url)
cls.rau = ReadsAlignmentUtils(cls.callback_url, service_ver='dev')
cls.deu = DifferentialExpressionUtils(cls.callback_url, service_ver='dev')
suffix = int(time.time() * 1000)
cls.wsName = "test_cuffdiff_" + str(suffix)
cls.wsClient.create_workspace({'workspace': cls.wsName})
cls.stringtie = kb_stringtie(cls.callback_url, service_ver='dev')
#cls.prepare_data()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
return self.__class__.wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
@classmethod
def prepare_data(cls):
# upload genome object
genbank_file_name = 'minimal.gbff'
genbank_file_path = os.path.join(cls.scratch, genbank_file_name)
shutil.copy(os.path.join('data', genbank_file_name), genbank_file_path)
genome_object_name = 'test_Genome'
cls.genome_ref = cls.gfu.genbank_to_genome({'file': {'path': genbank_file_path},
'workspace_name': cls.wsName,
'genome_name': genome_object_name
})['genome_ref']
# upload reads object
reads_file_name = 'Sample1.fastq'
reads_file_path = os.path.join(cls.scratch, reads_file_name)
shutil.copy(os.path.join('data', reads_file_name), reads_file_path)
reads_object_name_1 = 'test_Reads_1'
cls.reads_ref_1 = cls.ru.upload_reads({'fwd_file': reads_file_path,
'wsname': cls.wsName,
'sequencing_tech': 'Unknown',
'interleaved': 0,
'name': reads_object_name_1
})['obj_ref']
reads_object_name_2 = 'test_Reads_2'
cls.reads_ref_2 = cls.ru.upload_reads({'fwd_file': reads_file_path,
'wsname': cls.wsName,
'sequencing_tech': 'Unknown',
'interleaved': 0,
'name': reads_object_name_2
})['obj_ref']
reads_object_name_3 = 'test_Reads_3'
cls.reads_ref_3 = cls.ru.upload_reads({'fwd_file': reads_file_path,
'wsname': cls.wsName,
'sequencing_tech': 'Unknown',
'interleaved': 0,
'name': reads_object_name_3
})['obj_ref']
# upload alignment object
alignment_file_name = 'accepted_hits.bam'
# alignment_file_name = 'Ath_WT_R1.fastq.sorted.bam'
alignment_file_path = os.path.join(cls.scratch, alignment_file_name)
shutil.copy(os.path.join('data', alignment_file_name), alignment_file_path)
alignment_object_name_1 = 'test_Alignment_1'
cls.condition_1 = 'test_condition_1'
cls.alignment_ref_1 = cls.rau.upload_alignment(
{'file_path': alignment_file_path,
'destination_ref': cls.wsName + '/' + alignment_object_name_1,
'read_library_ref': cls.reads_ref_1,
'condition': cls.condition_1,
'library_type': 'single_end',
'assembly_or_genome_ref': cls.genome_ref
})['obj_ref']
alignment_object_name_2 = 'test_Alignment_2'
cls.condition_2 = 'test_condition_2'
cls.alignment_ref_2 = cls.rau.upload_alignment(
{'file_path': alignment_file_path,
'destination_ref': cls.wsName + '/' + alignment_object_name_2,
'read_library_ref': cls.reads_ref_2,
'condition': cls.condition_2,
'library_type': 'single_end',
'assembly_or_genome_ref': cls.genome_ref
})['obj_ref']
alignment_object_name_3 = 'test_Alignment_3'
cls.condition_3 = 'test_condition_3'
cls.alignment_ref_3 = cls.rau.upload_alignment(
{'file_path': alignment_file_path,
'destination_ref': cls.wsName + '/' + alignment_object_name_3,
'read_library_ref': cls.reads_ref_3,
'condition': cls.condition_3,
'library_type': 'single_end',
'assembly_or_genome_ref': cls.genome_ref,
'library_type': 'single_end'
})['obj_ref']
# upload sample_set object
workspace_id = cls.dfu.ws_name_to_id(cls.wsName)
sample_set_object_name = 'test_Sample_Set'
sample_set_data = {
'sampleset_id': sample_set_object_name,
'sampleset_desc': 'test sampleset object',
'Library_type': 'SingleEnd',
'condition': [cls.condition_1, cls.condition_2, cls.condition_3],
'domain': 'Unknown',
'num_samples': 3,
'platform': 'Unknown'}
save_object_params = {
'id': workspace_id,
'objects': [{
'type': 'KBaseRNASeq.RNASeqSampleSet',
'data': sample_set_data,
'name': sample_set_object_name
}]
}
dfu_oi = cls.dfu.save_objects(save_object_params)[0]
cls.sample_set_ref = str(dfu_oi[6]) + '/' + str(dfu_oi[0]) + '/' + str(dfu_oi[4])
# upload alignment_set object
object_type = 'KBaseRNASeq.RNASeqAlignmentSet'
alignment_set_object_name = 'test_Alignment_Set'
alignment_set_data = {
'genome_id': cls.genome_ref,
'read_sample_ids': [reads_object_name_1,
reads_object_name_2,
reads_object_name_3],
'mapped_rnaseq_alignments': [{reads_object_name_1: alignment_object_name_1},
{reads_object_name_2: alignment_object_name_2},
{reads_object_name_3: alignment_object_name_3}],
'mapped_alignments_ids': [{reads_object_name_1: cls.alignment_ref_1},
{reads_object_name_2: cls.alignment_ref_2},
{reads_object_name_3: cls.alignment_ref_3}],
'sample_alignments': [cls.alignment_ref_1,
cls.alignment_ref_2,
cls.alignment_ref_3],
'sampleset_id': cls.sample_set_ref}
save_object_params = {
'id': workspace_id,
'objects': [{
'type': object_type,
'data': alignment_set_data,
'name': alignment_set_object_name
}]
}
dfu_oi = cls.dfu.save_objects(save_object_params)[0]
cls.alignment_set_ref = str(dfu_oi[6]) + '/' + str(dfu_oi[0]) + '/' + str(dfu_oi[4])
# upload expression_set object
cls.expressionset_ref = cls.stringtie.run_stringtie_app(
{'alignment_object_ref': cls.alignment_set_ref,
'workspace_name': cls.wsName,
"min_read_coverage": 2.5,
"junction_base": 10,
"num_threads": 3,
"min_isoform_abundance": 0.1,
"min_length": 200,
"skip_reads_with_no_ref": 1,
"merge": 0,
"junction_coverage": 1,
"ballgown_mode": 1,
"min_locus_gap_sep_value": 50,
"disable_trimming": 1})['expression_obj_ref']
@classmethod
def getSize(cls, filename):
return os.path.getsize(filename)
@classmethod
def md5(cls, filename):
with open(filename, 'rb') as file_:
hash_md5 = hashlib.md5()
buf = file_.read(65536)
while len(buf) > 0:
hash_md5.update(buf)
buf = file_.read(65536)
return hash_md5.hexdigest()
def check_files(self, new_dir, orig_dir):
self.assertEqual(len(os.listdir(new_dir)),
len(os.listdir(orig_dir)))
for new_file in os.listdir(new_dir):
new_file_path = os.path.join(new_dir, new_file)
orig_file_path = os.path.join(orig_dir, new_file)
if not zipfile.is_zipfile(new_file_path):
print
print("%%%%%%%%%%%%%%%%%%%% new_file_path: ", new_file_path)
print("%%%%%%%%%%%%%%%%%%%% orig_file_path: ", orig_file_path)
if self.getSize(new_file_path) != self.getSize(orig_file_path):
print('************** sizes differ ************')
if self.md5(new_file_path) != self.md5(orig_file_path):
print('************** md5s differ **************')
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
# Following test uses object refs from a narrative. Comment the next line to run the test
@unittest.skip("skipped test_cuffdiff_RNASeq_objects_success")
def test_cuffdiff_RNASeq_objects_success(self):
"""
Input object: downsized_AT_reads_tophat_AlignmentSet_cufflinks_ExpressionSet (4389/45/1)
Expected output object: downsized_AT_tophat_cufflinks_cuffdiff_output (4389/58/1)
Files in output object should be the same as in expected output object
"""
input_obj_ref = '4389/45/1'
expected_obj_ref = '4389/58/1'
params = {'expressionset_ref': input_obj_ref,
'workspace_name': self.getWsName(),
'output_obj_name': 'test_cuffdiff_rnaseqExprSet',
'filtered_expression_matrix_name': 'test_output_expmatrix',
'library_norm_method': 'classic-fpkm',
'library_type': 'fr-unstranded'
}
cuffdiff_retVal = self.getImpl().run_Cuffdiff(self.ctx, params)[0]
inputObj = self.dfu.get_objects(
{'object_refs': [input_obj_ref]})['data'][0]
print("============ INPUT EXPRESSION SET OBJECT ==============")
pprint(inputObj)
print("==========================================================")
outputObj = self.dfu.get_objects(
{'object_refs': [cuffdiff_retVal.get('diffExprMatrixSet_ref')]})['data'][0]
print("============ OUTPUT FROM CUFFDIFF ==============")
pprint(cuffdiff_retVal)
print("============ DIFFERENTIAL EXPRESSION MATRIX SET OUTPUT ==============")
pprint(outputObj)
print("==========================================================")
"""
Get files from expected object ref
"""
timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
expected_dir = os.path.join(self.scratch, 'expected_' + str(timestamp))
os.mkdir(expected_dir)
expectedObj = self.dfu.get_objects(
{'object_refs': [expected_obj_ref]})['data'][0]
expectedFile = expectedObj['data']['file']
expectedFile_ret = self.dfu.shock_to_file({
'shock_id': expectedFile['id'],
'file_path': expected_dir,
'unpack': 'unpack'
})
for f in glob.glob(expected_dir + '/*.zip'):
os.remove(f)
'''
self.assertEqual(outputObj['info'][2].startswith('KBaseRNASeq.RNASeqDifferentialExpression'), True)
inputData = inputObj['data']
outputData = outputObj['data']
self.assertEqual(outputData['genome_id'], inputData['genome_id'])
self.assertEqual(outputData['expressionSet_id'], input_obj_ref)
self.assertEqual(outputData['alignmentSet_id'], inputData['alignmentSet_id'])
self.assertEqual(outputData['sampleset_id'], inputData['sampleset_id'])
output_dir = self.deu.download_differentialExpression(
{'source_ref': cuffdiff_retVal.get('diffexpr_obj_ref')}).get('destination_dir')
self.check_files(output_dir, expected_dir)
'''
def test_cuffdiff_success(self):
inputObj = self.dfu.get_objects(
{'object_refs': [self.expressionset_ref]})['data'][0]
print("============ EXPRESSION SET OBJECT FROM STRINGTIE ==============")
pprint(inputObj)
print("==========================================================")
cuffdiff_params = { 'expressionset_ref': self.expressionset_ref,
'workspace_name': self.getWsName(),
'output_obj_name': 'test_cuffdiff_createdExprSet',
'filtered_expression_matrix_name': 'test_output_expmatrix',
'library_norm_method': 'classic-fpkm',
'library_type': 'fr-unstranded'
}
cuffdiff_retVal = self.getImpl().run_Cuffdiff(self.ctx, cuffdiff_params)[0]
outputObj = self.dfu.get_objects(
{'object_refs': [cuffdiff_retVal.get('diffExprMatrixSet_ref')]})['data'][0]
print("============ DIFFERENTIAL EXPRESSION MATRIX SET OBJECT FROM CUFFDIFF ==============")
pprint(outputObj)
print("================================================================")
def fail_cuffdiff(self, params, error, exception=ValueError, do_startswith=False):
test_name = inspect.stack()[1][3]
print('\n*** starting expected cuffdiff fail test: ' + test_name + ' **********************')
with self.assertRaises(exception) as context:
self.getImpl().run_Cuffdiff(self.ctx, params)
if do_startswith:
self.assertTrue(str(context.exception.message).startswith(error),
"Error message {} does not start with {}".format(
str(context.exception.message),
error))
else:
self.assertEqual(error, str(context.exception.message))
def test_cuffdiff_fail_no_ws_name(self):
self.fail_cuffdiff(
{
'expressionset_ref': self.expressionset_ref,
'output_obj_name': 'test_createdExprSet'
},
'"workspace_name" parameter is required, but missing')
def test_cuffdiff_fail_no_obj_name(self):
self.fail_cuffdiff(
{
'workspace_name': self.getWsName(),
'expressionset_ref': self.expressionset_ref
},
'"output_obj_name" parameter is required, but missing')
def test_cuffdiff_fail_no_exprset_ref(self):
self.fail_cuffdiff(
{
'workspace_name': self.getWsName(),
'output_obj_name': 'test_createdExprSet'
},
'"expressionset_ref" parameter is required, but missing')
def test_cuffdiff_fail_bad_wsname(self):
self.fail_cuffdiff(
{
'workspace_name': '&bad',
'expressionset_ref': self.expressionset_ref,
'output_obj_name': 'test_createdExprSet'
},
'Illegal character in workspace name &bad: &')
def test_cuffdiff_fail_non_existant_wsname(self):
self.fail_cuffdiff(
{
'workspace_name': '1s',
'expressionset_ref': self.expressionset_ref,
'output_obj_name': 'test_createdExprSet'
},
'No workspace with name 1s exists')
def test_cuffdiff_fail_non_expset_ref(self):
self.fail_cuffdiff(
{
'workspace_name': self.getWsName(),
'expressionset_ref': self.reads_ref_1,
'output_obj_name': 'test_createdExprSet'
},
'"expressionset_ref" should be of type KBaseRNASeq.RNASeqExpressionSet',
exception=TypeError)
|
|
#!/usr/bin/python
#Created : Fri 24 Jul 2015 09:18:22 AM UTC
#Last Modified : Tue 11 Aug 2015 10:38:22 AM MYT
#qpy:console
import site
import os
import sys
import time
import sqlite3
from peewee import *
import datetime
import io
db = SqliteDatabase('lessonplan2010.db', **{})
#db = SqliteDatabase('/storage/extSdCard/englishdb/lessonplan2010.db', **{})
class BaseModel(Model):
class Meta:
database = db
class F5Comprehension(BaseModel):
text = TextField(null=True)
topic = CharField(null=True)
class Meta:
db_table = 'f5comprehension'
class F5Literature(BaseModel):
chapter = CharField(null=True)
explain1 = CharField(null=True)
explain2 = CharField(null=True)
explain3 = CharField(null=True)
explain4 = CharField(null=True)
optionfour = CharField(null=True)
optionone = CharField(null=True)
optionthree = CharField(null=True)
optiontwo = CharField(null=True)
question = CharField(null=True)
tickfour = CharField(null=True)
tickone = CharField(null=True)
tickthree = CharField(null=True)
ticktwo = CharField(null=True)
class Meta:
db_table = 'f5literature'
class F5Litno34(BaseModel):
text = TextField(null=True)
topic = CharField(null=True)
class Meta:
db_table = 'f5litno34'
class F5Notesessay(BaseModel):
text = TextField(null=True)
topic = CharField(null=True)
class Meta:
db_table = 'f5notesessay'
class Lessonplan2010(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
content = CharField(null=True)
date = IntegerField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
timeend = CharField(null=True)
timestart = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = CharField(null=True)
class Meta:
db_table = 'lessonplan2010'
class Lessonplan2011(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
content = CharField(null=True)
date = IntegerField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
timeend = CharField(null=True)
timestart = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = CharField(null=True)
class Meta:
db_table = 'lessonplan2011'
class Lessonplan2012(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
content = CharField(null=True)
date = IntegerField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
timeend = CharField(null=True)
timestart = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = CharField(null=True)
class Meta:
db_table = 'lessonplan2012'
class Lessonplan2013(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
content = CharField(null=True)
date = IntegerField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
timeend = CharField(null=True)
timestart = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = CharField(null=True)
class Meta:
db_table = 'lessonplan2013'
class Lessonplan2015(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
content = CharField(null=True)
date = IntegerField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
timeend = CharField(null=True)
timestart = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = CharField(null=True)
class Meta:
db_table = 'lessonplan2015'
class Lessonplanbank(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
bank = PrimaryKeyField(db_column='bank_id', null=True)
content = CharField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
level = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = IntegerField(null=True)
class Meta:
db_table = 'lessonplanbank'
class Lit2010(BaseModel):
component = CharField(null=True)
content = TextField(null=True)
part = CharField(null=True)
title = CharField(null=True)
type = CharField(null=True)
class Meta:
db_table = 'lit2010'
class Muet(BaseModel):
component = CharField(null=True)
content = TextField(null=True)
title = CharField(null=True)
type = CharField(null=True)
class Meta:
db_table = 'muet'
class Ting42011(BaseModel):
comprehension = CharField(null=True)
continuous = CharField(null=True)
guided = CharField(null=True)
info = CharField(null=True)
kp = CharField(null=True)
lit = CharField(null=True)
mcq = CharField(null=True)
nama = CharField(null=True)
ting = CharField(null=True)
class Meta:
db_table = 'ting42011'
db.connect()
if len(sys.argv) < 2:
print "Begini boh: %s minggu (WW)" % sys.argv[0]
sys.exit(1)
week = sys.argv[1]
#month = 06
#hb = 21
#tahunini = datetime.datetime.now().year
lpweeksun = Lessonplan2015.get(Lessonplan2015.week == week)
datesun = int(lpweeksun.date)
#datesun = str(tahunini)+str(month)+str(hb)
#sdir = "/tmp/"
sdir = "/storage/extSdCard/lp2015/"
failtex = sdir+"weekly-week-"+str(week)+"-"+str(datesun)+".tex"
failtexlog = sdir+"weekly"+str(datesun)+".log"
failtexaux = sdir+"weekly"+str(datesun)+".aux"
failtexpdf = sdir+"weekly"+str(datesun)+".pdf"
failkeluar = open(failtex, "w")
tdatemon = datetime.datetime.strptime(str(datesun), '%Y%m%d') + datetime.timedelta(days=1)
tdatetue = datetime.datetime.strptime(str(datesun), '%Y%m%d') + datetime.timedelta(days=2)
tdatewed = datetime.datetime.strptime(str(datesun), '%Y%m%d') + datetime.timedelta(days=3)
tdatethu = datetime.datetime.strptime(str(datesun), '%Y%m%d') + datetime.timedelta(days=4)
datemon = tdatemon.strftime('%Y%m%d')
datetue = tdatetue.strftime('%Y%m%d')
datewed = tdatewed.strftime('%Y%m%d')
datethu = tdatethu.strftime('%Y%m%d')
#lpweeksun = Lessonplan2015.select().where(Lessonplan2015.week ==\
# week).order_by(Lessonplan2015.date).get()
print datesun
print >>failkeluar,"\\documentclass[a4paper,12pt]{article}\n\
\\usepackage{palatino}\n\
\\usepackage{fancyvrb,pifont,enumerate,url,graphicx,tabularx,longtable,quotes,setspace,floatflt,umoline,rotating,soul}\n\
\\usepackage[top=1.8cm,bottom=2cm,left=1.5cm,right=1.5cm]{geometry}\n\
\\usepackage{fancyhdr} \\pagestyle{fancy}\n"
print >>failkeluar,"\\usepackage{nopageno}"
print >>failkeluar,"\\usepackage{onepagem}\n\
\\usepackage{pstricks}\n\
\\setlength\\parindent{0pt}\n\
\\begin{document}\n"
namahari = time.strftime("%A",time.strptime(str(datesun),"%Y%m%d"))
tarikh_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(str(datesun),"%Y%m%d"))
print >>failkeluar,"%s \\hspace{7cm} Week %s \\hfill %s" % (namahari, week,tarikh_dalam_perkataan)
print >>failkeluar,"\\begin{longtable}{|p{2.3cm}|p{3.9cm}p{0.3cm}p{9.8cm}|}\\hline\n\
\\centerline{TIME/CLASS}&\\multicolumn{3}{c|}{\\textit{TOPIC / LEARNING\
OUTCOME / CONTENT / ACTIVITIES /}}\\\\\n\
\n\\centerline{SUBJECT}&\\multicolumn{3}{c|}{\\textit{ASSIMILATION /\
EVALUATION}}\\\\\n\
&&&\\\\\n\
\\hline"
weeksun = Lessonplan2015.select().where(Lessonplan2015.date == datesun)
for i in weeksun:
if i.theme.startswith("PEPERIKSAAN"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.theme.upper())
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\ \
&&&\\\\" % topic
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{[%s]}} \\\\" % i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline"
elif 'Cuti' in i.theme:
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\so{%s}}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\"\
% i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" %\
i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo2
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("***"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{} \\\\" % (i.timestart,i.timeend)
theme = i.theme.upper()
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % i.theme
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo1
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline \n"
elif i.theme.startswith('---'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith('+++'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak & \
\\multicolumn{3}{c|}{%s}\\\\" % (i.tingkatan,theme)
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.topic)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("\ding{90}"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{{\\textcolor{blue}{%s}}}\
\\\\" % i.theme
print >>failkeluar,"&&&\\\\"
tarikh_akhir_cuti_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(lo2,"%Y%m%d"))
print >>failkeluar," & \\multicolumn{3}{c|}{{%s ---- %s}}\\\\" %\
(tarikh_dalam_perkataan,tarikh_akhir_cuti_dalam_perkataan)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&\\multicolumn{3}{c|}{\\textcolor{blue}{\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}}}\\\\"
print >>failkeluar,"\\hline\n"
else:
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\\n"
print >>failkeluar,"\\centerline{%s-%s}&Theme / Topic&:& %s -\
\\textit{%s}\\\\" % (i.timestart,i.timeend,i.theme,i.topic)
print >>failkeluar,"\\centerline{English}&Learning\
objective(s)&:&Students will be able to:\\\\"
print >>failkeluar,"&&&(i) %s\\\\\n" % i.lo1
print >>failkeluar,"&&&(ii) %s\\\\\n" % i.lo2
print >>failkeluar,"&&&(iii) %s\\\\\n" % i.lo3
print >>failkeluar,"&Content&:& %s\\\\\n" % i.content
print >>failkeluar,"&Activities&:& \\ding{172} %s, \\ding{173}\
%s\\\\\n" % (i.activity1,i.activity2)
print >>failkeluar,"&Assimilation&:& %s\\\\" % i.assimilation
print >>failkeluar,"&Impact/Reflection&:& \\textit{%s}\\\\\n" % i.impact
print >>failkeluar,"\\hline\n"
print >>failkeluar,"\\end{longtable}\n"
print >>failkeluar,"\\vfill"
print\
>>failkeluar,".........................................\\hspace{8.8cm}Tarikh/\\textit{Date}.........................\n"
print >>failkeluar,"Tandatangan Pengetua\n"
print >>failkeluar,"\\textit{Principal's Signature}"
print >>failkeluar,"\\newpage"
weekmon = Lessonplan2015.select().where(Lessonplan2015.date == datemon)
namahari = time.strftime("%A",time.strptime(str(datemon),"%Y%m%d"))
tarikh_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(str(datemon),"%Y%m%d"))
print >>failkeluar,"%s \\hspace{7cm} Week %s \\hfill %s" % (namahari, week,tarikh_dalam_perkataan)
print >>failkeluar,"\\begin{longtable}{|p{2.3cm}|p{3.9cm}p{0.3cm}p{9.8cm}|}\\hline\n\
\\centerline{TIME/CLASS}&\\multicolumn{3}{c|}{\\textit{TOPIC / LEARNING\
OUTCOME / CONTENT / ACTIVITIES /}}\\\\\n\
\n\\centerline{SUBJECT}&\\multicolumn{3}{c|}{\\textit{ASSIMILATION /\
EVALUATION}}\\\\\n\
&&&\\\\\n\
\\hline"
for i in weekmon:
if i.theme.startswith("PEPERIKSAAN"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.theme.upper())
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\ \
&&&\\\\" % i.topic
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{[%s]}} \\\\" % i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline"
elif 'Cuti' in i.theme:
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\"\
% i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" %\
i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo2
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("***"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{} \\\\" % (i.timestart,i.timeend)
theme = i.theme.upper()
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % i.theme
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo1
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline \n"
elif i.theme.startswith('---'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith('+++'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak & \
\\multicolumn{3}{c|}{%s}\\\\" % (i.tingkatan,theme)
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.topic)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("\ding{90}"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{{\\textcolor{blue}{%s}}}\
\\\\" % i.theme
print >>failkeluar,"&&&\\\\"
tarikh_akhir_cuti_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(lo2,"%Y%m%d"))
print >>failkeluar," & \\multicolumn{3}{c|}{{%s ---- %s}}\\\\" %\
(tarikh_dalam_perkataan,tarikh_akhir_cuti_dalam_perkataan)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&\\multicolumn{3}{c|}{\\textcolor{blue}{\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}}}\\\\"
print >>failkeluar,"\\hline\n"
else:
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\\n"
print >>failkeluar,"\\centerline{%s-%s}&Theme / Topic&:& %s -\
\\textit{%s}\\\\" % (i.timestart,i.timeend,i.theme,i.topic)
print >>failkeluar,"\\centerline{English}&Learning\
objective(s)&:&Students will be able to:\\\\"
print >>failkeluar,"&&&(i) %s\\\\\n" % i.lo1
print >>failkeluar,"&&&(ii) %s\\\\\n" % i.lo2
print >>failkeluar,"&&&(iii) %s\\\\\n" % i.lo3
print >>failkeluar,"&Content&:& %s\\\\\n" % i.content
print >>failkeluar,"&Activities&:& \\ding{172} %s, \\ding{173}\
%s\\\\\n" % (i.activity1,i.activity2)
print >>failkeluar,"&Assimilation&:& %s\\\\" % i.assimilation
print >>failkeluar,"&Impact/Reflection&:& \\textit{%s}\\\\\n" % i.impact
print >>failkeluar,"\\hline\n"
print >>failkeluar,"\\end{longtable}\n"
print >>failkeluar,"\\vfill"
print\
>>failkeluar,".........................................\\hspace{8.8cm}Tarikh/\\textit{Date}.........................\n"
print >>failkeluar,"Tandatangan Pengetua\n"
print >>failkeluar,"\\textit{Principal's Signature}"
print >>failkeluar,"\\newpage"
weektue = Lessonplan2015.select().where(Lessonplan2015.date == datetue)
namahari = time.strftime("%A",time.strptime(str(datetue),"%Y%m%d"))
tarikh_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(str(datetue),"%Y%m%d"))
print >>failkeluar,"%s \\hspace{7cm} Week %s \\hfill %s" % (namahari, week,tarikh_dalam_perkataan)
print >>failkeluar,"\\begin{longtable}{|p{2.3cm}|p{3.9cm}p{0.3cm}p{9.8cm}|}\\hline\n\
\\centerline{TIME/CLASS}&\\multicolumn{3}{c|}{\\textit{TOPIC / LEARNING\
OUTCOME / CONTENT / ACTIVITIES /}}\\\\\n\
\n\\centerline{SUBJECT}&\\multicolumn{3}{c|}{\\textit{ASSIMILATION /\
EVALUATION}}\\\\\n\
&&&\\\\\n\
\\hline"
for i in weektue:
if i.theme.startswith("PEPERIKSAAN"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.theme.upper())
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\ \
&&&\\\\" % i.topic
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{[%s]}} \\\\" % i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline"
elif 'Cuti' in i.theme:
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\"\
% i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" %\
i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo2
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("***"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{} \\\\" % (i.timestart,i.timeend)
theme = i.theme.upper()
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % i.theme
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo1
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline \n"
elif i.theme.startswith('---'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith('+++'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak & \
\\multicolumn{3}{c|}{%s}\\\\" % (i.tingkatan,theme)
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.topic)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("\ding{90}"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{{\\textcolor{blue}{%s}}}\
\\\\" % i.theme
print >>failkeluar,"&&&\\\\"
tarikh_akhir_cuti_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(lo2,"%Y%m%d"))
print >>failkeluar," & \\multicolumn{3}{c|}{{%s ---- %s}}\\\\" %\
(tarikh_dalam_perkataan,tarikh_akhir_cuti_dalam_perkataan)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&\\multicolumn{3}{c|}{\\textcolor{blue}{\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}}}\\\\"
print >>failkeluar,"\\hline\n"
else:
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\\n"
print >>failkeluar,"\\centerline{%s-%s}&Theme / Topic&:& %s -\
\\textit{%s}\\\\" % (i.timestart,i.timeend,i.theme,i.topic)
print >>failkeluar,"\\centerline{English}&Learning\
objective(s)&:&Students will be able to:\\\\"
print >>failkeluar,"&&&(i) %s\\\\\n" % i.lo1
print >>failkeluar,"&&&(ii) %s\\\\\n" % i.lo2
print >>failkeluar,"&&&(iii) %s\\\\\n" % i.lo3
print >>failkeluar,"&Content&:& %s\\\\\n" % i.content
print >>failkeluar,"&Activities&:& \\ding{172} %s, \\ding{173}\
%s\\\\\n" % (i.activity1,i.activity2)
print >>failkeluar,"&Assimilation&:& %s\\\\" % i.assimilation
print >>failkeluar,"&Impact/Reflection&:& \\textit{%s}\\\\\n" % i.impact
print >>failkeluar,"\\hline\n"
print >>failkeluar,"\\end{longtable}\n"
print >>failkeluar,"\\vfill"
print\
>>failkeluar,".........................................\\hspace{8.8cm}Tarikh/\\textit{Date}.........................\n"
print >>failkeluar,"Tandatangan Pengetua\n"
print >>failkeluar,"\\textit{Principal's Signature}"
print >>failkeluar,"\\newpage"
weekwed = Lessonplan2015.select().where(Lessonplan2015.date == datewed)
namahari = time.strftime("%A",time.strptime(str(datewed),"%Y%m%d"))
tarikh_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(str(datewed),"%Y%m%d"))
print >>failkeluar,"%s \\hspace{7cm} Week %s \\hfill %s" % (namahari, week,tarikh_dalam_perkataan)
print >>failkeluar,"\\begin{longtable}{|p{2.3cm}|p{3.9cm}p{0.3cm}p{9.8cm}|}\\hline\n\
\\centerline{TIME/CLASS}&\\multicolumn{3}{c|}{\\textit{TOPIC / LEARNING\
OUTCOME / CONTENT / ACTIVITIES /}}\\\\\n\
\n\\centerline{SUBJECT}&\\multicolumn{3}{c|}{\\textit{ASSIMILATION /\
EVALUATION}}\\\\\n\
&&&\\\\\n\
\\hline"
for i in weekwed:
if i.theme.startswith("PEPERIKSAAN"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.theme.upper())
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\ \
&&&\\\\" % topic
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{[%s]}} \\\\" % i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline"
elif 'Cuti' in i.theme:
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\"\
% i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" %\
i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo2
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("***"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{} \\\\" % (i.timestart,i.timeend)
theme = i.theme.upper()
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % i.theme
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo1
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline \n"
elif i.theme.startswith('---'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith('+++'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak & \
\\multicolumn{3}{c|}{%s}\\\\" % (i.tingkatan,theme)
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.topic)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("\ding{90}"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{{\\textcolor{blue}{%s}}}\
\\\\" % i.theme
print >>failkeluar,"&&&\\\\"
tarikh_akhir_cuti_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(lo2,"%Y%m%d"))
print >>failkeluar," & \\multicolumn{3}{c|}{{%s ---- %s}}\\\\" %\
(tarikh_dalam_perkataan,tarikh_akhir_cuti_dalam_perkataan)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&\\multicolumn{3}{c|}{\\textcolor{blue}{\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}}}\\\\"
print >>failkeluar,"\\hline\n"
else:
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\\n"
print >>failkeluar,"\\centerline{%s-%s}&Theme / Topic&:& %s -\
\\textit{%s}\\\\" % (i.timestart,i.timeend,i.theme,i.topic)
print >>failkeluar,"\\centerline{English}&Learning\
objective(s)&:&Students will be able to:\\\\"
print >>failkeluar,"&&&(i) %s\\\\\n" % i.lo1
print >>failkeluar,"&&&(ii) %s\\\\\n" % i.lo2
print >>failkeluar,"&&&(iii) %s\\\\\n" % i.lo3
print >>failkeluar,"&Content&:& %s\\\\\n" % i.content
print >>failkeluar,"&Activities&:& \\ding{172} %s, \\ding{173}\
%s\\\\\n" % (i.activity1,i.activity2)
print >>failkeluar,"&Assimilation&:& %s\\\\" % i.assimilation
print >>failkeluar,"&Impact/Reflection&:& \\textit{%s}\\\\\n" % i.impact
print >>failkeluar,"\\hline\n"
print >>failkeluar,"\\end{longtable}\n"
print >>failkeluar,"\\vfill"
print\
>>failkeluar,".........................................\\hspace{8.8cm}Tarikh/\\textit{Date}.........................\n"
print >>failkeluar,"Tandatangan Pengetua\n"
print >>failkeluar,"\\textit{Principal's Signature}"
print >>failkeluar,"\\newpage"
weekthu = Lessonplan2015.select().where(Lessonplan2015.date == datethu)
namahari = time.strftime("%A",time.strptime(str(datethu),"%Y%m%d"))
tarikh_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(str(datethu),"%Y%m%d"))
print >>failkeluar,"%s \\hspace{7cm} Week %s \\hfill %s" % (namahari, week,tarikh_dalam_perkataan)
print >>failkeluar,"\\begin{longtable}{|p{2.3cm}|p{3.9cm}p{0.3cm}p{9.8cm}|}\\hline\n\
\\centerline{TIME/CLASS}&\\multicolumn{3}{c|}{\\textit{TOPIC / LEARNING\
OUTCOME / CONTENT / ACTIVITIES /}}\\\\\n\
\n\\centerline{SUBJECT}&\\multicolumn{3}{c|}{\\textit{ASSIMILATION /\
EVALUATION}}\\\\\n\
&&&\\\\\n\
\\hline"
for i in weekthu:
if i.theme.startswith("PEPERIKSAAN"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.theme.upper())
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\ \
&&&\\\\" % i.topic
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{[%s]}} \\\\" % i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline"
elif 'Cuti' in i.theme:
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\"\
% i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" %\
i.lo1
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo2
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}}\\\\" % i.lo3
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("***"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{} \\\\" % (i.timestart,i.timeend)
theme = i.theme.upper()
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % i.theme
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo1
print >>failkeluar,"\n & \\multicolumn{3}{c|}{\\textit{%s}} \\\\" % i.lo2
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"&&& \\\\"
print >>failkeluar,"\\hline \n"
elif i.theme.startswith('---'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{%s}\
\\\\" % theme
print >>failkeluar,"\n& \\multicolumn{3}{c|}{\\textit{%s}} \\\\" %\
i.topic
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith('+++'):
theme = i.theme.upper()
print >>failkeluar,"\\centerline{%s}\\linebreak & \
\\multicolumn{3}{c|}{%s}\\\\" % (i.tingkatan,theme)
print >>failkeluar,"\n\\centerline{%s-%s}&\
\\multicolumn{3}{c|}{%s} \\\\" % (i.timestart,i.timeend,i.topic)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\\hline\n"
elif i.theme.startswith("\ding{90}"):
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"\n & \\multicolumn{3}{c|}{{\\textcolor{blue}{%s}}}\
\\\\" % i.theme
print >>failkeluar,"&&&\\\\"
tarikh_akhir_cuti_dalam_perkataan = time.strftime("%d %B %Y",time.strptime(lo2,"%Y%m%d"))
print >>failkeluar," & \\multicolumn{3}{c|}{{%s ---- %s}}\\\\" %\
(tarikh_dalam_perkataan,tarikh_akhir_cuti_dalam_perkataan)
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&&&\\\\"
print >>failkeluar,"&\\multicolumn{3}{c|}{\\textcolor{blue}{\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}\ding{90}}}\\\\"
print >>failkeluar,"\\hline\n"
else:
print >>failkeluar,"\\centerline{%s}\\linebreak" % i.tingkatan
print >>failkeluar,"&&&\\\\\n"
print >>failkeluar,"\\centerline{%s-%s}&Theme / Topic&:& %s -\
\\textit{%s}\\\\" % (i.timestart,i.timeend,i.theme,i.topic)
print >>failkeluar,"\\centerline{English}&Learning\
objective(s)&:&Students will be able to:\\\\"
print >>failkeluar,"&&&(i) %s\\\\\n" % i.lo1
print >>failkeluar,"&&&(ii) %s\\\\\n" % i.lo2
print >>failkeluar,"&&&(iii) %s\\\\\n" % i.lo3
print >>failkeluar,"&Content&:& %s\\\\\n" % i.content
print >>failkeluar,"&Activities&:& \\ding{172} %s, \\ding{173}\
%s\\\\\n" % (i.activity1,i.activity2)
print >>failkeluar,"&Assimilation&:& %s\\\\" % i.assimilation
print >>failkeluar,"&Impact/Reflection&:& \\textit{%s}\\\\\n" % i.impact
print >>failkeluar,"\\hline\n"
print >>failkeluar,"\\end{longtable}\n"
print >>failkeluar,"\\vfill"
print\
>>failkeluar,".........................................\\hspace{8.8cm}Tarikh/\\textit{Date}.........................\n"
print >>failkeluar,"Tandatangan Pengetua\n"
print >>failkeluar,"\\textit{Principal's Signature}"
print >>failkeluar,"\\end{document}\n"
failkeluar.close()
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1DeploymentSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'min_ready_seconds': 'int',
'paused': 'bool',
'progress_deadline_seconds': 'int',
'replicas': 'int',
'revision_history_limit': 'int',
'selector': 'V1LabelSelector',
'strategy': 'V1DeploymentStrategy',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'paused': 'paused',
'progress_deadline_seconds': 'progressDeadlineSeconds',
'replicas': 'replicas',
'revision_history_limit': 'revisionHistoryLimit',
'selector': 'selector',
'strategy': 'strategy',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, paused=None, progress_deadline_seconds=None, replicas=None, revision_history_limit=None, selector=None, strategy=None, template=None, local_vars_configuration=None): # noqa: E501
"""V1DeploymentSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._min_ready_seconds = None
self._paused = None
self._progress_deadline_seconds = None
self._replicas = None
self._revision_history_limit = None
self._selector = None
self._strategy = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if paused is not None:
self.paused = paused
if progress_deadline_seconds is not None:
self.progress_deadline_seconds = progress_deadline_seconds
if replicas is not None:
self.replicas = replicas
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
self.selector = selector
if strategy is not None:
self.strategy = strategy
self.template = template
@property
def min_ready_seconds(self):
"""Gets the min_ready_seconds of this V1DeploymentSpec. # noqa: E501
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:return: The min_ready_seconds of this V1DeploymentSpec. # noqa: E501
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""Sets the min_ready_seconds of this V1DeploymentSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) # noqa: E501
:param min_ready_seconds: The min_ready_seconds of this V1DeploymentSpec. # noqa: E501
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def paused(self):
"""Gets the paused of this V1DeploymentSpec. # noqa: E501
Indicates that the deployment is paused. # noqa: E501
:return: The paused of this V1DeploymentSpec. # noqa: E501
:rtype: bool
"""
return self._paused
@paused.setter
def paused(self, paused):
"""Sets the paused of this V1DeploymentSpec.
Indicates that the deployment is paused. # noqa: E501
:param paused: The paused of this V1DeploymentSpec. # noqa: E501
:type: bool
"""
self._paused = paused
@property
def progress_deadline_seconds(self):
"""Gets the progress_deadline_seconds of this V1DeploymentSpec. # noqa: E501
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s. # noqa: E501
:return: The progress_deadline_seconds of this V1DeploymentSpec. # noqa: E501
:rtype: int
"""
return self._progress_deadline_seconds
@progress_deadline_seconds.setter
def progress_deadline_seconds(self, progress_deadline_seconds):
"""Sets the progress_deadline_seconds of this V1DeploymentSpec.
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s. # noqa: E501
:param progress_deadline_seconds: The progress_deadline_seconds of this V1DeploymentSpec. # noqa: E501
:type: int
"""
self._progress_deadline_seconds = progress_deadline_seconds
@property
def replicas(self):
"""Gets the replicas of this V1DeploymentSpec. # noqa: E501
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
:return: The replicas of this V1DeploymentSpec. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1DeploymentSpec.
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
:param replicas: The replicas of this V1DeploymentSpec. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def revision_history_limit(self):
"""Gets the revision_history_limit of this V1DeploymentSpec. # noqa: E501
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
:return: The revision_history_limit of this V1DeploymentSpec. # noqa: E501
:rtype: int
"""
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, revision_history_limit):
"""Sets the revision_history_limit of this V1DeploymentSpec.
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
:param revision_history_limit: The revision_history_limit of this V1DeploymentSpec. # noqa: E501
:type: int
"""
self._revision_history_limit = revision_history_limit
@property
def selector(self):
"""Gets the selector of this V1DeploymentSpec. # noqa: E501
:return: The selector of this V1DeploymentSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1DeploymentSpec.
:param selector: The selector of this V1DeploymentSpec. # noqa: E501
:type: V1LabelSelector
"""
if self.local_vars_configuration.client_side_validation and selector is None: # noqa: E501
raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
self._selector = selector
@property
def strategy(self):
"""Gets the strategy of this V1DeploymentSpec. # noqa: E501
:return: The strategy of this V1DeploymentSpec. # noqa: E501
:rtype: V1DeploymentStrategy
"""
return self._strategy
@strategy.setter
def strategy(self, strategy):
"""Sets the strategy of this V1DeploymentSpec.
:param strategy: The strategy of this V1DeploymentSpec. # noqa: E501
:type: V1DeploymentStrategy
"""
self._strategy = strategy
@property
def template(self):
"""Gets the template of this V1DeploymentSpec. # noqa: E501
:return: The template of this V1DeploymentSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1DeploymentSpec.
:param template: The template of this V1DeploymentSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
self._template = template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeploymentSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeploymentSpec):
return True
return self.to_dict() != other.to_dict()
|
|
import matplotlib
matplotlib.use("GTK")
import scipy, numpy, pylab, time, matplotlib.mlab, matplotlib.pyplot, matplotlib.cm
import os, gtk, gobject, bisect, psyco
import FemIo, FilenameComposer
class FemResultsViewer:
RootFilename = "Kolbka2"
ExperimentNumber = "2"
XSaptialDelta = 0.002
YSaptialDelta = 0.002
Dpi = 120
TimeStep = 1E-4
EndTime = None
StartImage = 0
def __init__(self):
self.simulationDir = FilenameComposer.GetResultsGraphicsDir(FemResultsViewer.RootFilename, FemResultsViewer.ExperimentNumber)
self.eleFilename = FilenameComposer.GetMeshFilename(FemResultsViewer.RootFilename)
self.femFilename = FilenameComposer.GetResultsFilename(FemResultsViewer.RootFilename, FemResultsViewer.ExperimentNumber)
def run(self):
self.ReportMeshLoadingStarted()
self.pslg = FemIo.loadEle(self.eleFilename)
self.ReportMeshLoadingFinished()
self.ReportFemResultsLoadingStarted()
self.results = FemIo.loadFem(self.femFilename)
self.ReportFemResultsLoadingFinished()
#Compute grid points
self.ReportGridPreparingStarted()
xGridPoints = numpy.arange(0.0, 1.0 + FemResultsViewer.XSaptialDelta, FemResultsViewer.XSaptialDelta)
yGridPoints = numpy.arange(0.0, 1.0 + FemResultsViewer.YSaptialDelta, FemResultsViewer.YSaptialDelta)
x, y = numpy.meshgrid(xGridPoints, yGridPoints)
self.ReportGridPreparingFinished()
#Compute display data
self.ReportComputingGridValuesStarted()
self.ComputeDisplayData(x,y)
self.ReportComputingGridValuesFinished()
#Make movie
self.ReportMovieMakingStarted()
self.MakeMovie()
self.ReportMovieMakingFinished()
#Return
return
def MakeMovie(self):
os.chdir(self.simulationDir)
os.system("mencoder mf://tmp*.png -mf type=png:fps=100 -ovc lavc -of lavf -lavcopts vcodec=mpeg4 -oac copy -o simulation.avi")
return
def ComputeDisplayData(self, xGrid, yGrid):
#Compute times
if(FemResultsViewer.TimeStep is None):
FemResultsViewer.TimeStep = self.results[1][0]
if(FemResultsViewer.EndTime is None):
FemResultsViewer.EndTime = self.results[-1][0] + FemResultsViewer.TimeStep
#Compute the sizes
xSize = len(xGrid[0])
ySize = len(xGrid)
timeSteps = int(FemResultsViewer.EndTime/FemResultsViewer.TimeStep)
#Compute point element assignment
self.ReportComputingPointElementAssignmentStarted()
pointElementAssignment = [[None for k in range(0,xSize)] for j in range(0,ySize)]
for element in self.pslg.elements:
xMin = yMin = 1000000000000
xMax = yMax = -1000000000000
for point in element.points:
xMin = min(xMin, point.x)
yMin = min(yMin, point.y)
xMax = max(xMax, point.x)
yMax = max(yMax, point.y)
rectangle = self.GetBoundingRectangle(xMin, yMin, xMax, yMax)
for xIndex in range(rectangle[0], rectangle[2]+1):
for yIndex in range(rectangle[1], rectangle[3]+1):
if(pointElementAssignment[yIndex][xIndex] is None and self.CheckPointInElement(element, xGrid[yIndex][xIndex], yGrid[yIndex][xIndex])):
pointElementAssignment[yIndex][xIndex] = element
self.ReportComputingPointElementAssignmentFinished()
#Compute time coefficients
self.ReportCachingResultsInTimeStarted()
coeffs = []
for tIndex in range(0,timeSteps):
t = tIndex*FemResultsViewer.TimeStep
coeffs.append(self.FindCoeffInTime(t))
self.ReportCachingResultsInTimeFinished()
#Compute min/max values
displayMax = -1000000000000
displayMin = 1000000000000
#Compute min/max values
self.ReportComputingMinMaxStarted()
percent = 0
for tIndex in range(0,timeSteps):
coeff = coeffs[tIndex]
for coeffValue in coeff:
displayMax = max(displayMax, coeffValue)
displayMin = min(displayMin, coeffValue)
self.ReportComputingMinMaxFinished(displayMin, displayMax)
#Compute grid values
self.ReportRenderingStarted()
for tIndex in range(FemResultsViewer.StartImage,timeSteps):
displayData = [[0.0 for k in range(0,xSize)] for j in range(0,ySize)]
coeff = coeffs[tIndex]
for xIndex in range(0,xSize):
for yIndex in range(0,ySize):
x,y = xGrid[yIndex][xIndex], yGrid[yIndex][xIndex]
e = pointElementAssignment[yIndex][xIndex]
value = self.GetValue(coeff, e, x, y)
displayData[yIndex][xIndex] = value
self.RenderFrame(displayData, displayMin, displayMax, tIndex)
self.ReportRenderingFinished()
#Return
return
def RenderFrame(self, currentFrameData, min, max, frameNumber):
#Report
self.ReportRenderingFrame(frameNumber)
#Render
figure = matplotlib.pyplot.figure(1)
axes = figure.gca()
image = matplotlib.pyplot.imshow(currentFrameData,
interpolation='bilinear',
origin='lower',
cmap=matplotlib.cm.hot,
extent=(0,1,0,1),
axes=axes,
animated=True,
vmin=min,
vmax=max)
colorbar = matplotlib.pyplot.colorbar(image,
orientation='horizontal')
#Save & clean up
filename = self.simulationDir + ("\\tmp%06d.png" % (frameNumber + 1))
figure.savefig(filename, dpi=FemResultsViewer.Dpi)
figure.clf()
return
def GetBoundingRectangle(self, xMin, yMin, xMax, yMax):
#Get indexes
xMinIndex = int(xMin/FemResultsViewer.XSaptialDelta)
yMinIndex = int(yMin/FemResultsViewer.YSaptialDelta)
xMaxIndex = int(xMax/FemResultsViewer.XSaptialDelta + 1.0)
yMaxIndex = int(yMax/FemResultsViewer.YSaptialDelta + 1.0)
#Get nearest
xMax = xMaxIndex * FemResultsViewer.XSaptialDelta
yMax = yMaxIndex * FemResultsViewer.YSaptialDelta
#Make sure the uppor bound is valid
if(xMax > 1.0):
xMaxIndex -= 1
if(yMax > 1.0):
yMaxIndex -= 1
return (xMinIndex, yMinIndex, xMaxIndex, yMaxIndex)
def CheckPointInElement(self, element, x, y):
u, v = self.FindTriangleCoords(element, x, y)
return (u >= 0.0) and (v >= 0.0) and (u + v <= 1.0)
def FindCoeffInTime(self, t):
coeff = None
for tIndex in range(0, len(self.results)-1):
currentCoeff = self.results[tIndex]
nextCoeff = self.results[tIndex+1]
if (currentCoeff[0] <= t and t <= nextCoeff[0]):
timeDiff = nextCoeff[0] - currentCoeff[0]
timeShift = t - currentCoeff[0]
percent = timeShift / timeDiff
coeff = [(1 - percent) * currentCoeff[1][i] + percent * nextCoeff[1][i] for i in range(0,len(currentCoeff[1]))]
break
return coeff
def GetValue(self, coeff, e, x, y):
if(e is None):
return 0.0
if(coeff is None):
return 0.0
u,v = self.FindTriangleCoords(e, x, y)
value = (1.0 - u - v) * coeff[e.x1.index] + u * coeff[e.x2.index] + v * coeff[e.x3.index]
return value
def FindTriangleCoords(self, element, x, y):
v0 = (element.x2.x - element.x1.x, element.x2.y - element.x1.y)
v1 = (element.x3.x - element.x1.x, element.x3.y - element.x1.y)
v2 = (x - element.x1.x, y - element.x1.y)
dot00 = self.DotProduct(v0, v0)
dot01 = self.DotProduct(v0, v1)
dot02 = self.DotProduct(v0, v2)
dot11 = self.DotProduct(v1, v1)
dot12 = self.DotProduct(v1, v2)
invDenom = 1.0 / (dot00 * dot11 - dot01 * dot01)
u = (dot11 * dot02 - dot01 * dot12) * invDenom
v = (dot00 * dot12 - dot01 * dot02) * invDenom
return u, v
def DotProduct(self, u, v):
return u[0] * v[0] + u[1] * v[1]
def ReportMeshLoadingStarted(self):
print "Loading mesh from file [" + self.eleFilename + "]"
def ReportMeshLoadingFinished(self):
print "Loading mesh from file finished..."
print "Nodes: " + str(len(self.pslg.points))
print "Segments: " + str(len(self.pslg.segments))
print "Elements: " + str(len(self.pslg.elements))
def ReportFemResultsLoadingStarted(self):
print "Loading FEM results from file [" + self.femFilename + "]"
def ReportFemResultsLoadingFinished(self):
print "Loading FEM results from file finished..."
print "Time steps: " + str(len(self.results))
print "Start time: " + str(self.results[0][0])
print "End time: " + str(self.results[-1][0])
print "Variables: " + str(len(self.results[0][1]))
def ReportGridPreparingStarted(self):
print "Preparing grid..."
def ReportGridPreparingFinished(self):
print "Preparing finished..."
def ReportComputingGridValuesStarted(self):
print "Computing grid values..."
def ReportComputingPointElementAssignmentStarted(self):
print "Computing point element assignment started..."
def ReportComputingPointElementAssignmentFinished(self):
print "Computing point element assignment finished..."
def ReportCachingResultsInTimeStarted(self):
print "Caching results in time started..."
print "Animation start time: " + str(0.0)
print "Animation time step: " + str(FemResultsViewer.TimeStep)
print "Animation x spatial step: " + str(FemResultsViewer.XSaptialDelta)
print "Animation y spatial step: " + str(FemResultsViewer.YSaptialDelta)
def ReportCachingResultsInTimeFinished(self):
print "Caching results in time finished..."
def ReportComputingMinMaxStarted(self):
print "Computing min/max density values started..."
def ReportComputingMinMaxFinished(self, min, max):
print "Computing min/max density values started..."
print "Min: " + str(min)
print "Max: " + str(max)
def ReportRenderingStarted(self):
print "Rendering started..."
def ReportRenderingFrame(self, frameNumber):
print "Rendering frame #%06d..." % (frameNumber + 1)
def ReportRenderingFinished(self):
print "Rendering finished..."
def ReportComputingGridValuesFinished(self):
print "Computing grid values finished..."
def ReportMovieMakingStarted(self):
print "Making movie [simulation.mpg] started..."
def ReportMovieMakingFinished(self):
print "Making movie [simulation.mpg] finished..."
if __name__ == '__main__':
psyco.full()
viewer = FemResultsViewer()
viewer.run()
|
|
#!/usr/bin/env python
import logging
import re
import sys
import cssselect
from collections import defaultdict
from collections import deque
from lxml.etree import tostring
from lxml.etree import tounicode
from lxml.html import document_fromstring
from lxml.html import fragment_fromstring
from .cleaners import clean_attributes
from .cleaners import html_cleaner
from .htmls import build_doc
from .htmls import get_body
from .htmls import get_title
from .htmls import shorten_title
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
REGEXES = {
'unlikelyCandidatesRe': re.compile('combx|comment|community|disqus|extra|foot|header|menu|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|popup|tweet|twitter', re.I),
'okMaybeItsACandidateRe': re.compile('and|article|body|column|main|shadow', re.I),
'positiveRe': re.compile('ad-post|category|article|body|content|entry|hentry|main|page|pagination|post|text|blog|story', re.I),
'negativeRe': re.compile('combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|widget', re.I),
'divToPElementsRe': re.compile('<(a|blockquote|dl|div|img|ol|p|pre|table|ul)', re.I),
'socialNetworksRe': re.compile('facebook|twitter|disqus|tumblr|linkedin|flickr'),
#'replaceBrsRe': re.compile('(<br[^>]*>[ \n\r\t]*){2,}',re.I),
#'replaceFontsRe': re.compile('<(\/?)font[^>]*>',re.I),
#'trimRe': re.compile('^\s+|\s+$/'),
#'normalizeRe': re.compile('\s{2,}/'),
#'killBreaksRe': re.compile('(<br\s*\/?>(\s| ?)*){1,}/'),
'videoRe': re.compile('http:\/\/(www\.)?(youtube|vimeo)\.com', re.I),
#skipFootnoteLink: /^\s*(\[?[a-z0-9]{1,2}\]?|^|edit|citation needed)\s*$/i,
}
class Unparseable(ValueError):
pass
def describe(node, depth=1):
if not hasattr(node, 'tag'):
return "[%s]" % type(node)
name = node.tag
if node.get('id', ''):
name += '#' + node.get('id')
if node.get('class', ''):
name += '.' + node.get('class').replace(' ', '.')
if name[:4] in ['div#', 'div.']:
name = name[3:]
if depth and node.getparent() is not None:
return name + ' - ' + describe(node.getparent(), depth - 1)
return name
def to_int(x):
if not x:
return None
x = x.strip()
if x.endswith('px'):
return int(x[:-2])
if x.endswith('em'):
return int(x[:-2]) * 12
return int(x)
def clean(text):
text = re.sub('\s*\n\s*', '\n', text)
text = re.sub('[ \t]{2,}', ' ', text)
return text.strip()
def text_length(i):
return len(clean(i.text_content() or ""))
regexp_type = type(re.compile('hello, world'))
def compile_pattern(elements):
if not elements:
return None
if isinstance(elements, regexp_type):
return elements
if isinstance(elements, str):
elements = elements.split(',')
return re.compile(u'|'.join([re.escape(x.lower()) for x in elements]), re.U)
class Document:
"""Class to build a etree document out of html."""
TEXT_LENGTH_THRESHOLD = 5
RETRY_LENGTH = 25
def __init__(self, input, positive_keywords=None, negative_keywords=None, recallPriority=False, **options):
"""Generate the document
:param input: string of the html content.
kwargs:
- attributes:
- debug: output debug messages
- min_text_length:
- retry_length:
- url: will allow adjusting links to be absolute
- positive_keywords: the list of positive search patterns in classes and ids, for example: ["news-item", "block"]
- negative_keywords: the list of negative search patterns in classes and ids, for example: ["mysidebar", "related", "ads"]
Also positive_keywords and negative_keywords could be a regexp.
"""
self.input = input
self.recallPriority = recallPriority
self.options = options
self.html = None
self.encoding = None
self.positive_keywords = compile_pattern(positive_keywords)
self.negative_keywords = compile_pattern(negative_keywords)
def _html(self, force=False):
if force or self.html is None:
self.html = self._parse(self.input)
return self.html
def _parse(self, input):
doc, self.encoding = build_doc(input)
doc = html_cleaner.clean_html(doc)
base_href = self.options.get('url', None)
if base_href:
doc.make_links_absolute(base_href, resolve_base_href=True)
else:
doc.resolve_base_href()
return doc
def content(self):
return get_body(self._html(True))
def title(self):
return get_title(self._html(True))
def short_title(self):
return shorten_title(self._html(True))
def get_clean_html(self):
return clean_attributes(tounicode(self.html))
def summary(self, html_partial=False):
"""Generate the summary of the html docuemnt
:param html_partial: return only the div of the document, don't wrap
in html and body tags.
"""
try:
ruthless = True
#Added recall priority flag
recallPriority = self.recallPriority
if recallPriority:
ruthless = False
self.TEXT_LENGTH_THRESHOLD = 2
self.RETRY_LENGTH = 25
while True:
self._html(True)
for i in self.tags(self.html, 'script', 'style'):
i.drop_tree()
for i in self.tags(self.html, 'body'):
i.set('id', 'readabilityBody')
if ruthless:
self.remove_unlikely_candidates()
self.transform_misused_divs_into_paragraphs()
candidates = self.score_paragraphs()
best_candidates = self.select_best_candidates(candidates)
if best_candidates and not recallPriority:
article = self.get_article_from_candidates(candidates,best_candidates,html_partial)
else:
if ruthless and not recallPriority:
log.debug("ruthless removal did not work. ")
ruthless = False
self.debug(
("ended up stripping too much - "
"going for a safer _parse"))
# try again
continue
else:
log.debug(
("Ruthless and lenient parsing did not work. "
"Returning raw html"))
article = self.html.find('body')
if article is None:
article = self.html
cleaned_article = self.sanitize(article, candidates)
# print(cleaned_article)
article_length = len(cleaned_article or '')
retry_length = self.options.get(
'retry_length',
self.RETRY_LENGTH)
of_acceptable_length = article_length >= retry_length
if ruthless and not of_acceptable_length:
ruthless = False
continue
else:
return cleaned_article
except Exception as e:
print("error: %s", e)
log.exception('error getting summary: ')
raise Exception(Unparseable(str(e)), None, sys.exc_info()[2])
def get_article_from_candidates(self,candidates,best_candidates,html_partial=False):
nodes = []
if html_partial:
output = fragment_fromstring('<div/>')
else:
output = document_fromstring('<div/>')
for best_candidate in best_candidates:
article, nodes = self.get_article(candidates,best_candidate,nodes,html_partial)
output.append(article)
return output
def get_article(self, candidates, best_candidate,nodes, html_partial=False):
#print nodes
# Now that we have the top candidate, look through its siblings for
# content that might also be related.
# Things like preambles, content split by ads that we removed, etc.
sibling_score_threshold = max([
10,
best_candidate['content_score'] * 0.2])
# create a new html document with a html->body->div
if html_partial:
output = fragment_fromstring('<div/>')
else:
output = document_fromstring('<div/>')
best_elem = best_candidate['elem']
parent = best_elem.getparent()
siblings = parent.getchildren() if parent is not None else [best_elem]
for sibling in siblings:
# in lxml there no concept of simple text
# if isinstance(sibling, NavigableString): continue
append = False
if sibling is best_elem:
append = True
sibling_key = sibling # HashableElement(sibling)
if sibling_key in candidates and \
candidates[sibling_key]['content_score'] >= sibling_score_threshold:
append = True
if sibling.tag == "p":
link_density = self.get_link_density(sibling)
node_content = sibling.text or ""
node_length = len(node_content)
if node_length > 20 and link_density < 0.25:
append = True
elif node_length <= 20 \
and link_density == 0 \
and re.search('\.( |$)', node_content):
append = True
if append:
if not sibling in nodes:
nodes += self.get_all_child_nodes(sibling)
if html_partial:
output.append(sibling)
else:
output.getchildren()[0].getchildren()[0].append(sibling)
return output,nodes
def get_all_child_nodes(self,candidate):
#Gets all child nodes
queue,nodes = deque(),[]
for sibling in candidate.getchildren():
queue.append(sibling)
while queue:
current = queue.popleft()
for sib in current.getchildren():
queue.append(sib)
nodes.append(current)
return nodes
def select_best_candidates(self,candidates):
#Gets all candidate
sorted_candidates = sorted(candidates.values(), key=lambda x: x['content_score'], reverse=True)
if len(sorted_candidates) == 0:
return None
output = []
for candidate in sorted_candidates:
if not candidate['content_score'] >0:
break
output.append(candidate)
return output
def select_best_candidate(self, candidates):
sorted_candidates = sorted(candidates.values(), key=lambda x: x['content_score'], reverse=True)
for candidate in sorted_candidates[:5]:
elem = candidate['elem']
self.debug("Top 5 : %6.3f %s" % (
candidate['content_score'],
describe(elem)))
if len(sorted_candidates) == 0:
return None
best_candidate = sorted_candidates[0]
return best_candidate
def get_link_density(self, elem):
link_length = 0
for i in elem.findall(".//a"):
link_length += text_length(i)
total_length = text_length(elem)
return float(link_length) / max(total_length, 1)
def score_paragraphs(self, ):
MIN_LEN = self.options.get(
'min_text_length',
self.TEXT_LENGTH_THRESHOLD)
candidates = {}
ordered = []
for elem in self.tags(self._html(), "p", "pre", "td", "ul"):
parent_node = elem.getparent()
if parent_node is None:
continue
grand_parent_node = parent_node.getparent()
inner_text = clean(elem.text_content() or "")
inner_text_len = len(inner_text)
# If this paragraph is less than 25 characters
# don't even count it.
if inner_text_len < MIN_LEN:
continue
if parent_node not in candidates:
candidates[parent_node] = self.score_node(parent_node)
ordered.append(parent_node)
if grand_parent_node is not None and grand_parent_node not in candidates:
candidates[grand_parent_node] = self.score_node(
grand_parent_node)
ordered.append(grand_parent_node)
content_score = 1
content_score += len(inner_text.split(','))
content_score += min((inner_text_len / 100), 3)
#WTF? candidates[elem]['content_score'] += content_score
candidates[parent_node]['content_score'] += content_score
if grand_parent_node is not None:
candidates[grand_parent_node]['content_score'] += content_score / 2.0
# Scale the final candidates score based on link density. Good content
# should have a relatively small link density (5% or less) and be
# mostly unaffected by this operation.
for elem in ordered:
candidate = candidates[elem]
ld = self.get_link_density(elem)
score = candidate['content_score']
self.debug("Candid: %6.3f %s link density %.3f -> %6.3f" % (
score,
describe(elem),
ld,
score * (1 - ld)))
candidate['content_score'] *= (1 - ld)
return candidates
def class_weight(self, e):
weight = 0
for feature in [e.get('class', None), e.get('id', None)]:
if feature:
if REGEXES['negativeRe'].search(feature):
weight -= 25
if REGEXES['positiveRe'].search(feature):
weight += 25
if self.positive_keywords and self.positive_keywords.search(feature):
weight += 25
if self.negative_keywords and self.negative_keywords.search(feature):
weight -= 25
if self.positive_keywords and self.positive_keywords.match('tag-'+e.tag):
weight += 25
if self.negative_keywords and self.negative_keywords.match('tag-'+e.tag):
weight -= 25
return weight
def score_node(self, elem):
content_score = self.class_weight(elem)
name = elem.tag.lower()
if name == "div":
content_score += 5
elif name in ["pre", "td", "blockquote"]:
content_score += 3
elif name in ["address", "ol", "ul", "dl", "dd", "dt", "li", "form"]:
content_score -= 3
elif name in ["h1", "h2", "h3", "h4", "h5", "h6", "th"]:
content_score -= 5
return {
'content_score': content_score,
'elem': elem
}
def debug(self, *a):
if self.options.get('debug', False):
log.debug(*a)
def remove_unlikely_candidates(self):
for elem in self.html.iter():
s = "%s %s" % (elem.get('class', ''), elem.get('id', ''))
if len(s) < 2:
continue
if REGEXES['unlikelyCandidatesRe'].search(s) and (not REGEXES['okMaybeItsACandidateRe'].search(s)) and elem.tag not in ['html', 'body']:
self.debug("Removing unlikely candidate - %s" % describe(elem))
elem.drop_tree()
def transform_misused_divs_into_paragraphs(self):
for elem in self.tags(self.html, 'div'):
# transform <div>s that do not contain other block elements into
# <p>s
#FIXME: The current implementation ignores all descendants that
# are not direct children of elem
# This results in incorrect results in case there is an <img>
# buried within an <a> for example
if not REGEXES['divToPElementsRe'].search(
''.join(map(lambda x: x.decode("utf-8"), map(tostring, list(elem))))):
#self.debug("Altering %s to p" % (describe(elem)))
elem.tag = "p"
#print "Fixed element "+describe(elem)
for elem in self.tags(self.html, 'div'):
if elem.text and elem.text.strip():
p = fragment_fromstring('<p/>')
p.text = elem.text
elem.text = None
elem.insert(0, p)
#print "Appended "+tounicode(p)+" to "+describe(elem)
for pos, child in reversed(list(enumerate(elem))):
if child.tail and child.tail.strip():
p = fragment_fromstring('<p/>')
p.text = child.tail
child.tail = None
elem.insert(pos + 1, p)
#print "Inserted "+tounicode(p)+" to "+describe(elem)
if child.tag == 'br':
#print 'Dropped <br> at '+describe(elem)
child.drop_tree()
def tags(self, node, *tag_names):
for tag_name in tag_names:
for e in node.findall('.//%s' % tag_name):
yield e
def reverse_tags(self, node, *tag_names):
for tag_name in tag_names:
for e in reversed(node.findall('.//%s' % tag_name)):
yield e
def sanitize(self, node, candidates):
MIN_LEN = self.options.get('min_text_length',
self.TEXT_LENGTH_THRESHOLD)
for header in self.tags(node, "h1", "h2", "h3", "h4", "h5", "h6"):
if self.class_weight(header) < 0 or self.get_link_density(header) > 0.33:
header.drop_tree()
for elem in self.tags(node, "form", "textarea"):
elem.drop_tree()
for elem in self.tags(node, "iframe"):
if "src" in elem.attrib and REGEXES["videoRe"].search(elem.attrib["src"]):
elem.text = "VIDEO" # ADD content to iframe text node to force <iframe></iframe> proper output
else:
elem.drop_tree()
allowed = {}
# Conditionally clean <table>s, <ul>s, and <div>s
for el in self.reverse_tags(node, "table", "ul", "div"):
if el in allowed:
continue
weight = self.class_weight(el)
if el in candidates:
content_score = candidates[el]['content_score']
#print '!',el, '-> %6.3f' % content_score
else:
content_score = 0
tag = el.tag
if weight + content_score < 0:
self.debug("Cleaned %s with score %6.3f and weight %-3s" %
(describe(el), content_score, weight, ))
el.drop_tree()
elif el.text_content().count(",") < 10:
counts = {}
for kind in ['p','td', 'img', 'li', 'a', 'embed', 'input']:
counts[kind] = len(el.findall('.//%s' % kind))
counts["li"] -= 100
counts["input"] -= len(el.findall('.//input[@type="hidden"]'))
# Count the text length excluding any surrounding whitespace
content_length = text_length(el)
link_density = self.get_link_density(el)
parent_node = el.getparent()
if parent_node is not None:
if parent_node in candidates:
content_score = candidates[parent_node]['content_score']
else:
content_score = 0
to_remove = False
reason = ""
if el.tag == 'div' and counts["img"] >= 1 and content_length == 0:
reason = "Just images (%s)" % counts["img"]
#print reason,el
to_remove = True
# if el.tag == "ul":
# print el.text_content()
# continue
if counts["td"] and counts["img"] > 1+counts["td"]*1.3:
reason = "too many images (%s)" % counts["img"]
to_remove = True
elif counts["li"] > counts["p"] and tag != "ul" and tag != "ol":
reason = "more <li>s than <p>s"
to_remove = True
elif counts["input"] > (counts["p"] / 3):
reason = "less than 3x <p>s than <input>s"
to_remove = True
elif content_length < (MIN_LEN) and (counts["img"] == 0 or counts["img"] > 2):
reason = "too short content length %s without a single image" % content_length
to_remove = True
elif weight < 25 and link_density > 0.2:
reason = "too many links %.3f for its weight %s" % (
link_density, weight)
to_remove = True
elif weight >= 25 and link_density > 0.5:
reason = "too many links %.3f for its weight %s" % (
link_density, weight)
to_remove = True
elif (counts["embed"] == 1 and content_length < 75) or counts["embed"] > 1:
reason = "<embed>s with too short content length, or too many <embed>s"
to_remove = True
#Removes all social network link matches and removes the links from output
if el.findall('.//a'):
for ele in el.cssselect("a"):
link = ele.get('href')
try:
if link is not None:
if REGEXES['socialNetworksRe'].search(link):
ele.drop_tree()
except Exception as e:
print("Skip:", link, "cant be parsed")
if el.tag == 'div' and counts['img'] >= 1 and to_remove:
imgs = el.findall('.//img')
valid_img = False
self.debug(tounicode(el))
for img in imgs:
height = img.get('height')
text_len = img.get('text_length')
self.debug ("height %s text_length %s" %(repr(height), repr(text_len)))
if type(to_int(height)) == int and to_int(text_len) == int:
if to_int(height) >= 100 or to_int(text_len) >= 25:
valid_img = True
self.debug("valid image" + tounicode(img))
break
if valid_img:
to_remove = False
#print ("Allowing %s" %el.text_content())
self.debug("Allowing %s" %el.text_content())
for desnode in self.tags(el, "table", "ul", "div"):
allowed[desnode] = True
#find x non empty preceding and succeeding siblings
i, j = 0, 0
x = 1
siblings = []
for sib in el.itersiblings():
#self.debug(sib.text_content())
sib_content_length = text_length(sib)
if sib_content_length:
i =+ 1
siblings.append(sib_content_length)
if i == x:
break
for sib in el.itersiblings(preceding=True):
#self.debug(sib.text_content())
sib_content_length = text_length(sib)
if sib_content_length:
j =+ 1
siblings.append(sib_content_length)
if j == x:
break
#self.debug(str(siblings))
if siblings and sum(siblings) > 1000:
to_remove = False
self.debug("Allowing %s" % describe(el))
for desnode in self.tags(el, "table", "ul", "div"):
allowed[desnode] = True
if to_remove:
self.debug("Cleaned %6.3f %s with weight %s cause it has %s." %
(content_score, describe(el), weight, reason))
el.drop_tree()
for el in ([node] + [n for n in node.iter()]):
if not self.options.get('attributes', None):
#el.attrib = {} #FIXME:Checkout the effects of disabling this
pass
self.html = node
return self.get_clean_html()
class HashableElement():
def __init__(self, node):
self.node = node
self._path = None
def _get_path(self):
if self._path is None:
reverse_path = []
node = self.node
while node is not None:
node_id = (node.tag, tuple(node.attrib.items()), node.text)
reverse_path.append(node_id)
node = node.getparent()
self._path = tuple(reverse_path)
return self._path
path = property(_get_path)
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
return self.path == other.path
def __getattr__(self, tag):
return getattr(self.node, tag)
def main():
from optparse import OptionParser
parser = OptionParser(usage="%prog: [options] [file]")
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option('-u', '--url', default=None, help="use URL instead of a local file")
parser.add_option('-p', '--positive-keywords', default=None, help="positive keywords (separated with comma)", action='store')
parser.add_option('-n', '--negative-keywords', default=None, help="negative keywords (separated with comma)", action='store')
(options, args) = parser.parse_args()
if not (len(args) == 1 or options.url):
parser.print_help()
sys.exit(1)
file = None
if options.url:
import urllib.request
file = urllib.request.urlopen(options.url)
else:
file = open(args[0], 'rt')
enc = sys.__stdout__.encoding or 'utf-8' # XXX: this hack could not always work, better to set PYTHONIOENCODING
try:
print(Document(file.read(),
debug=options.verbose,
url=options.url,
positive_keywords = options.positive_keywords,
negative_keywords = options.negative_keywords,
).summary().encode(enc, 'replace'))
finally:
file.close()
if __name__ == '__main__':
main()
|
|
'''
This is a extended unittest module for Kivy, to make unittest based on
graphics with OpenGL context.
The idea is to let user render a Widget tree, and after 1, 2 or x frame, a
screenshot will be done, and be compared to the original one.
If no screenshot exist for the current test, the very first one will be used.
The screenshots lives in kivy/tests/results, in PNG format, 320x240.
'''
__all__ = ('GraphicUnitTest', )
import unittest
import logging
import os
log = logging.getLogger('unittest')
_base = object
if not bool(int(os.environ.get('USE_OPENGL_MOCK', 0))):
_base = unittest.TestCase
class GraphicUnitTest(_base):
def render(self, root, framecount=1):
'''Call rendering process using the `root` widget.
The screenshot will be done in `framecount` frames.
'''
from kivy.base import runTouchApp
self.framecount = framecount
runTouchApp(root)
# reset for the next test, but nobody will know if it will be used :/
if self.test_counter != 0:
self.tearDown(fake=True)
self.setUp()
def run(self, name):
'''Extend the run of unittest, to check if results directory have been
found. If no results directory exists, the test will be ignored.
'''
from os.path import join, dirname, exists
results_dir = join(dirname(__file__), 'results')
if not exists(results_dir):
log.warning('No result directory found, cancel test.')
return
self.test_counter = 0
self.results_dir = results_dir
self.test_failed = False
return super(GraphicUnitTest, self).run(name)
def setUp(self):
'''Prepare the graphic test, with:
- Window size fixed to 320x240
- Default kivy configuration
- Without any kivy input
'''
# use default kivy configuration (don't load user file.)
from os import environ
environ['KIVY_USE_DEFAULTCONFIG'] = '1'
# force window size + remove all inputs
from kivy.config import Config
Config.set('graphics', 'width', '320')
Config.set('graphics', 'height', '240')
for items in Config.items('input'):
Config.remove_option('input', items[0])
# bind ourself for the later screenshot
from kivy.core.window import Window
Window.bind(on_flip=self.on_window_flip)
# ensure our window is correcly created
Window.create_window()
Window.canvas.clear()
def on_window_flip(self, window):
'''Internal method to be called when the window have just displayed an
image.
When an image is showed, we decrement our framecount. If framecount is
come to 0, we are taking the screenshot.
The screenshot is done in a temporary place, and is compared to the
original one -> test ok/ko.
If no screenshot is available in the results directory, a new one will
be created.
'''
from kivy.base import EventLoop
from tempfile import mkstemp
from os.path import join, exists
from os import unlink, close
from shutil import move, copy
# don't save screenshot until we have enough frames.
#log.debug('framecount %d' % self.framecount)
self.framecount -= 1
if self.framecount > 0:
return
reffn = None
match = False
try:
# just get a temporary name
fd, tmpfn = mkstemp(suffix='.png', prefix='kivyunit-')
close(fd)
unlink(tmpfn)
# get a filename for the current unit test
self.test_counter += 1
test_uid = '%s-%d.png' % (
'_'.join(self.id().split('.')[-2:]),
self.test_counter)
# capture the screen
log.info('Capturing screenshot for %s' % test_uid)
tmpfn = window.screenshot(tmpfn)
log.info('Capture saved at %s' % tmpfn)
# search the file to compare to
reffn = join(self.results_dir, test_uid)
log.info('Compare with %s' % reffn)
# get sourcecode
import inspect
frame = inspect.getouterframes(inspect.currentframe())[6]
sourcecodetab, line = inspect.getsourcelines(frame[0])
line = frame[2] - line
currentline = sourcecodetab[line]
sourcecodetab[line] = '<span style="color: red;">%s</span>' % (
currentline)
sourcecode = ''.join(sourcecodetab)
sourcecodetab[line] = '>>>>>>>>\n%s<<<<<<<<\n' % currentline
sourcecodeask = ''.join(sourcecodetab)
if not exists(reffn):
log.info('No image reference, move %s as ref ?' % test_uid)
if self.interactive_ask_ref(sourcecodeask, tmpfn, self.id()):
move(tmpfn, reffn)
tmpfn = reffn
log.info('Image used as reference')
match = True
else:
log.info('Image discarded')
else:
import pygame
s1 = pygame.image.load(tmpfn)
s2 = pygame.image.load(reffn)
sd1 = pygame.image.tostring(s1, 'RGB')
sd2 = pygame.image.tostring(s2, 'RGB')
if sd1 != sd2:
log.critical(
'%s at render() #%d, images are different.' % (
self.id(), self.test_counter))
if self.interactive_ask_diff(sourcecodeask,
tmpfn, reffn, self.id()):
log.critical('user ask to use it as ref.')
move(tmpfn, reffn)
tmpfn = reffn
match = True
else:
self.test_failed = True
else:
match = True
# generate html
from os.path import join, dirname, exists, basename
from os import mkdir
build_dir = join(dirname(__file__), 'build')
if not exists(build_dir):
mkdir(build_dir)
copy(reffn, join(build_dir, 'ref_%s' % basename(reffn)))
if tmpfn != reffn:
copy(tmpfn, join(build_dir, 'test_%s' % basename(reffn)))
with open(join(build_dir, 'index.html'), 'at') as fd:
color = '#ffdddd' if not match else '#ffffff'
fd.write('<div style="background-color: %s">' % color)
fd.write('<h2>%s #%d</h2>' % (self.id(), self.test_counter))
fd.write('<table><tr><th>Reference</th>'
'<th>Test</th>'
'<th>Comment</th>')
fd.write('<tr><td><img src="ref_%s"/></td>' %
basename(reffn))
if tmpfn != reffn:
fd.write('<td><img src="test_%s"/></td>' %
basename(reffn))
else:
fd.write('<td>First time, no comparaison.</td>')
fd.write('<td><pre>%s</pre></td>' % sourcecode)
fd.write('</table></div>')
finally:
try:
if reffn != tmpfn:
unlink(tmpfn)
except:
pass
EventLoop.stop()
def tearDown(self, fake=False):
'''When the test is finished, stop the application, and unbind our
current flip callback.
'''
from kivy.base import stopTouchApp
from kivy.core.window import Window
Window.unbind(on_flip=self.on_window_flip)
stopTouchApp()
if not fake and self.test_failed:
self.assertTrue(False)
super(GraphicUnitTest, self).tearDown()
def interactive_ask_ref(self, code, imagefn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return True
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
image = Image.open(imagefn)
photo = ImageTk.PhotoImage(image)
Label(root, text='The test %s\nhave no reference.' % testid).pack()
Label(root, text='Use this image as a reference ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=photo).pack(side=LEFT)
Button(root, text='Use as reference', command=do_yes).pack(side=BOTTOM)
Button(root, text='Discard', command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def interactive_ask_diff(self, code, tmpfn, reffn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return False
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
phototmp = ImageTk.PhotoImage(Image.open(tmpfn))
photoref = ImageTk.PhotoImage(Image.open(reffn))
Label(root, text='The test %s\nhave generated an different'
'image as the reference one..' % testid).pack()
Label(root, text='Which one is good ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=phototmp).pack(side=RIGHT)
Label(root, image=photoref).pack(side=LEFT)
Button(root, text='Use the new image -->',
command=do_yes).pack(side=BOTTOM)
Button(root, text='<-- Use the reference',
command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extremely random forest graph builder. go/brain-tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import sys
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.tensor_forest.python import constants
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
# A convenience class for holding random forest hyperparameters.
#
# To just get some good default parameters, use:
# hparams = ForestHParams(num_classes=2, num_features=40).fill()
#
# Note that num_classes can not be inferred and so must always be specified.
# Also, either num_splits_to_consider or num_features should be set.
#
# To override specific values, pass them to the constructor:
# hparams = ForestHParams(num_classes=5, num_trees=10, num_features=5).fill()
#
# TODO(thomaswc): Inherit from tf.HParams when that is publicly available.
class ForestHParams(object):
"""A base class for holding hyperparameters and calculating good defaults."""
def __init__(self,
num_trees=100,
max_nodes=10000,
bagging_fraction=1.0,
num_splits_to_consider=0,
feature_bagging_fraction=1.0,
max_fertile_nodes=0,
split_after_samples=250,
min_split_samples=5,
valid_leaf_threshold=1,
dominate_method='bootstrap',
dominate_fraction=0.99,
**kwargs):
self.num_trees = num_trees
self.max_nodes = max_nodes
self.bagging_fraction = bagging_fraction
self.feature_bagging_fraction = feature_bagging_fraction
self.num_splits_to_consider = num_splits_to_consider
self.max_fertile_nodes = max_fertile_nodes
self.split_after_samples = split_after_samples
self.min_split_samples = min_split_samples
self.valid_leaf_threshold = valid_leaf_threshold
self.dominate_method = dominate_method
self.dominate_fraction = dominate_fraction
for name, value in kwargs.items():
setattr(self, name, value)
def values(self):
return self.__dict__
def fill(self):
"""Intelligently sets any non-specific parameters."""
# Fail fast if num_classes or num_features isn't set.
_ = getattr(self, 'num_classes')
_ = getattr(self, 'num_features')
self.bagged_num_features = int(self.feature_bagging_fraction *
self.num_features)
self.bagged_features = None
if self.feature_bagging_fraction < 1.0:
self.bagged_features = [random.sample(
range(self.num_features),
self.bagged_num_features) for _ in range(self.num_trees)]
self.regression = getattr(self, 'regression', False)
# Num_outputs is the actual number of outputs (a single prediction for
# classification, a N-dimenensional point for regression).
self.num_outputs = self.num_classes if self.regression else 1
# Add an extra column to classes for storing counts, which is needed for
# regression and avoids having to recompute sums for classification.
self.num_output_columns = self.num_classes + 1
# Our experiments have found that num_splits_to_consider = num_features
# gives good accuracy.
self.num_splits_to_consider = self.num_splits_to_consider or min(
self.num_features, 1000)
self.max_fertile_nodes = (self.max_fertile_nodes or
int(math.ceil(self.max_nodes / 2.0)))
# We have num_splits_to_consider slots to fill, and we want to spend
# approximately split_after_samples samples initializing them.
num_split_initializiations_per_input = max(1, int(math.floor(
self.num_splits_to_consider / self.split_after_samples)))
self.split_initializations_per_input = getattr(
self, 'split_initializations_per_input',
num_split_initializiations_per_input)
# If base_random_seed is 0, the current time will be used to seed the
# random number generators for each tree. If non-zero, the i-th tree
# will be seeded with base_random_seed + i.
self.base_random_seed = getattr(self, 'base_random_seed', 0)
return self
def get_epoch_variable():
"""Returns the epoch variable, or [0] if not defined."""
# Grab epoch variable defined in
# //third_party/tensorflow/python/training/input.py::limit_epochs
for v in tf_variables.local_variables():
if 'limit_epochs/epoch' in v.op.name:
return array_ops.reshape(v, [1])
# TODO(thomaswc): Access epoch from the data feeder.
return [0]
# A simple container to hold the training variables for a single tree.
class TreeTrainingVariables(object):
"""Stores tf.Variables for training a single random tree.
Uses tf.get_variable to get tree-specific names so that this can be used
with a tf.learn-style implementation (one that trains a model, saves it,
then relies on restoring that model to evaluate).
"""
def __init__(self, params, tree_num, training):
self.tree = variable_scope.get_variable(
name=self.get_tree_name('tree', tree_num), dtype=dtypes.int32,
shape=[params.max_nodes, 2],
initializer=init_ops.constant_initializer(-2))
self.tree_thresholds = variable_scope.get_variable(
name=self.get_tree_name('tree_thresholds', tree_num),
shape=[params.max_nodes],
initializer=init_ops.constant_initializer(-1.0))
self.end_of_tree = variable_scope.get_variable(
name=self.get_tree_name('end_of_tree', tree_num),
dtype=dtypes.int32,
initializer=constant_op.constant([1]))
self.start_epoch = variable_scope.get_variable(
name=self.get_tree_name('start_epoch', tree_num),
dtype=dtypes.int32, shape=[params.max_nodes],
initializer=init_ops.constant_initializer(0))
if training:
self.node_to_accumulator_map = variable_scope.get_variable(
name=self.get_tree_name('node_to_accumulator_map', tree_num),
shape=[params.max_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.accumulator_to_node_map = variable_scope.get_variable(
name=self.get_tree_name('accumulator_to_node_map', tree_num),
shape=[params.max_fertile_nodes],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_features = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_features', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(-1))
self.candidate_split_thresholds = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_thresholds', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider],
initializer=init_ops.constant_initializer(0.0))
# Statistics shared by classification and regression.
self.node_sums = variable_scope.get_variable(
name=self.get_tree_name('node_sums', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
if training:
self.candidate_split_sums = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_sums = variable_scope.get_variable(
name=self.get_tree_name('accumulator_sums', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
# Regression also tracks second order stats.
if params.regression:
self.node_squares = variable_scope.get_variable(
name=self.get_tree_name('node_squares', tree_num),
shape=[params.max_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.candidate_split_squares = variable_scope.get_variable(
name=self.get_tree_name('candidate_split_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_splits_to_consider,
params.num_output_columns],
initializer=init_ops.constant_initializer(0.0))
self.accumulator_squares = variable_scope.get_variable(
name=self.get_tree_name('accumulator_squares', tree_num),
shape=[params.max_fertile_nodes, params.num_output_columns],
initializer=init_ops.constant_initializer(-1.0))
else:
self.node_squares = constant_op.constant(
0.0, name=self.get_tree_name('node_squares', tree_num))
self.candidate_split_squares = constant_op.constant(
0.0, name=self.get_tree_name('candidate_split_squares', tree_num))
self.accumulator_squares = constant_op.constant(
0.0, name=self.get_tree_name('accumulator_squares', tree_num))
def get_tree_name(self, name, num):
return '{0}-{1}'.format(name, num)
class ForestStats(object):
def __init__(self, tree_stats, params):
"""A simple container for stats about a forest."""
self.tree_stats = tree_stats
self.params = params
def get_average(self, thing):
val = 0.0
for i in range(self.params.num_trees):
val += getattr(self.tree_stats[i], thing)
return val / self.params.num_trees
class TreeStats(object):
def __init__(self, num_nodes, num_leaves):
self.num_nodes = num_nodes
self.num_leaves = num_leaves
class ForestTrainingVariables(object):
"""A container for a forests training data, consisting of multiple trees.
Instantiates a TreeTrainingVariables object for each tree. We override the
__getitem__ and __setitem__ function so that usage looks like this:
forest_variables = ForestTrainingVariables(params)
... forest_variables.tree ...
"""
def __init__(self, params, device_assigner, training=True,
tree_variables_class=TreeTrainingVariables):
self.variables = []
for i in range(params.num_trees):
with ops.device(device_assigner.get_device(i)):
self.variables.append(tree_variables_class(params, i, training))
def __setitem__(self, t, val):
self.variables[t] = val
def __getitem__(self, t):
return self.variables[t]
class RandomForestDeviceAssigner(object):
"""A device assigner that uses the default device.
Write subclasses that implement get_device for control over how trees
get assigned to devices. This assumes that whole trees are assigned
to a device.
"""
def __init__(self):
self.cached = None
def get_device(self, unused_tree_num):
if not self.cached:
dummy = constant_op.constant(0)
self.cached = dummy.device
return self.cached
class RandomForestGraphs(object):
"""Builds TF graphs for random forest training and inference."""
def __init__(self, params, device_assigner=None,
variables=None, tree_variables_class=TreeTrainingVariables,
tree_graphs=None, training=True,
t_ops=training_ops,
i_ops=inference_ops):
self.params = params
self.device_assigner = device_assigner or RandomForestDeviceAssigner()
logging.info('Constructing forest with params = ')
logging.info(self.params.__dict__)
self.variables = variables or ForestTrainingVariables(
self.params, device_assigner=self.device_assigner, training=training,
tree_variables_class=tree_variables_class)
tree_graph_class = tree_graphs or RandomTreeGraphs
self.trees = [
tree_graph_class(
self.variables[i], self.params,
t_ops.Load(), i_ops.Load(), i)
for i in range(self.params.num_trees)]
def _bag_features(self, tree_num, input_data):
split_data = array_ops.split(1, self.params.num_features, input_data)
return array_ops.concat_v2(
[split_data[ind] for ind in self.params.bagged_features[tree_num]], 1)
def training_graph(self,
input_data,
input_labels,
data_spec=None,
**tree_kwargs):
"""Constructs a TF graph for training a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
**tree_kwargs: Keyword arguments passed to each tree's training_graph.
Returns:
The last op in the random forest training graph.
"""
data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec
tree_graphs = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
seed = self.params.base_random_seed
if seed != 0:
seed += i
# If using bagging, randomly select some of the input.
tree_data = input_data
tree_labels = input_labels
if self.params.bagging_fraction < 1.0:
# TODO(thomaswc): This does sampling without replacment. Consider
# also allowing sampling with replacement as an option.
batch_size = array_ops.strided_slice(
array_ops.shape(input_data), [0], [1])
r = random_ops.random_uniform(batch_size, seed=seed)
mask = math_ops.less(
r, array_ops.ones_like(r) * self.params.bagging_fraction)
gather_indices = array_ops.squeeze(
array_ops.where(mask), squeeze_dims=[1])
# TODO(thomaswc): Calculate out-of-bag data and labels, and store
# them for use in calculating statistics later.
tree_data = array_ops.gather(input_data, gather_indices)
tree_labels = array_ops.gather(input_labels, gather_indices)
if self.params.bagged_features:
tree_data = self._bag_features(i, tree_data)
initialization = self.trees[i].tree_initialization()
with ops.control_dependencies([initialization]):
tree_graphs.append(
self.trees[i].training_graph(
tree_data, tree_labels, seed, data_spec=data_spec,
**tree_kwargs))
return control_flow_ops.group(*tree_graphs, name='train')
def inference_graph(self, input_data, data_spec=None, **inference_args):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
**inference_args: Keyword arguments to pass through to each tree.
Returns:
The last op in the random forest inference graph.
"""
data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec
probabilities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_data = input_data
if self.params.bagged_features:
tree_data = self._bag_features(i, input_data)
probabilities.append(self.trees[i].inference_graph(
tree_data, data_spec, **inference_args))
with ops.device(self.device_assigner.get_device(0)):
all_predict = array_ops.pack(probabilities)
return math_ops.div(
math_ops.reduce_sum(all_predict, 0), self.params.num_trees,
name='probabilities')
def average_size(self):
"""Constructs a TF graph for evaluating the average size of a forest.
Returns:
The average number of nodes over the trees.
"""
sizes = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(math_ops.to_float(array_ops.pack(sizes)))
# pylint: disable=unused-argument
def training_loss(self, features, labels, data_spec=None,
name='training_loss'):
return math_ops.neg(self.average_size(), name=name)
# pylint: disable=unused-argument
def validation_loss(self, features, labels):
return math_ops.neg(self.average_size())
def average_impurity(self):
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
Returns:
The last op in the graph.
"""
impurities = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
impurities.append(self.trees[i].average_impurity())
return math_ops.reduce_mean(array_ops.pack(impurities))
def get_stats(self, session):
tree_stats = []
for i in range(self.params.num_trees):
with ops.device(self.device_assigner.get_device(i)):
tree_stats.append(self.trees[i].get_stats(session))
return ForestStats(tree_stats, self.params)
def one_hot_wrapper(num_classes, loss_fn):
"""Some loss functions take one-hot labels."""
def _loss(probs, targets):
one_hot_labels = array_ops.one_hot(
math_ops.to_int32(targets), num_classes,
on_value=1., off_value=0., dtype=dtypes.float32)
return loss_fn(probs, one_hot_labels)
return _loss
class TrainingLossForest(RandomForestGraphs):
"""Random Forest that uses training loss as the termination criteria."""
def __init__(self, params, loss_fn=None, **kwargs):
"""Initialize.
Args:
params: Like RandomForestGraphs, a ForestHParams object.
loss_fn: A function that takes probabilities and targets and returns
a loss for each example.
**kwargs: Keyword args to pass to superclass (RandomForestGraphs).
"""
self.loss_fn = loss_fn or one_hot_wrapper(params.num_classes,
loss_ops.log_loss)
self._loss = None
super(TrainingLossForest, self).__init__(params, **kwargs)
def _get_loss(self, features, labels, data_spec=None):
"""Constructs, caches, and returns the inference-based loss."""
if self._loss is not None:
return self._loss
def _average_loss():
probs = self.inference_graph(features, data_spec=data_spec)
return math_ops.reduce_sum(self.loss_fn(
probs, labels)) / math_ops.to_float(
array_ops.shape(features)[0])
self._loss = control_flow_ops.cond(
self.average_size() > 0, _average_loss,
lambda: constant_op.constant(sys.maxsize, dtype=dtypes.float32))
return self._loss
def training_graph(self, input_data, input_labels, data_spec=None,
**kwargs):
loss = self._get_loss(input_data, input_labels, data_spec=data_spec)
with ops.control_dependencies([loss.op]):
return super(TrainingLossForest, self).training_graph(
input_data, input_labels, **kwargs)
def training_loss(self, features, labels, data_spec=None,
name='training_loss'):
return array_ops.identity(
self._get_loss(features, labels, data_spec=data_spec), name=name)
class RandomTreeGraphs(object):
"""Builds TF graphs for random tree training and inference."""
def __init__(self, variables, params, t_ops, i_ops, tree_num):
self.training_ops = t_ops
self.inference_ops = i_ops
self.variables = variables
self.params = params
self.tree_num = tree_num
def tree_initialization(self):
def _init_tree():
return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op
def _nothing():
return control_flow_ops.no_op()
return control_flow_ops.cond(
math_ops.equal(
array_ops.squeeze(
array_ops.strided_slice(self.variables.tree, [0, 0], [1, 1])),
-2), _init_tree, _nothing)
def _gini(self, class_counts):
"""Calculate the Gini impurity.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = 1 - sum_i ( c(i) / c )^2
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return 1.0 - sum_squares / (sums * sums)
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
def _variance(self, sums, squares):
"""Calculate the variance for each row of the input tensors.
Variance is V = E[x^2] - (E[x])^2.
Args:
sums: A tensor containing output sums, usually a slice from
variables.node_sums. Should contain the number of examples seen
in index 0 so we can calculate expected value.
squares: Same as sums, but sums of squares.
Returns:
A 1-D tensor of the variances for each row in the input.
"""
total_count = array_ops.slice(sums, [0, 0], [-1, 1])
e_x = sums / total_count
e_x2 = squares / total_count
return math_ops.reduce_sum(e_x2 - math_ops.square(e_x), 1)
def training_graph(self,
input_data,
input_labels,
random_seed,
data_spec,
input_weights=None):
"""Constructs a TF graph for training a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
input_labels: A tensor or placeholder for labels associated with
input_data.
random_seed: The random number generator seed to use for this tree. 0
means use the current time as the seed.
data_spec: A list of tf.dtype values specifying the original types of
each column.
input_weights: A float tensor or placeholder holding per-input weights,
or None if all inputs are to be weighted equally.
Returns:
The last op in the random tree training graph.
"""
epoch = math_ops.to_int32(get_epoch_variable())
if input_weights is None:
input_weights = []
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, sparse_tensor.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.dense_shape
input_data = []
# Count extremely random stats.
(node_sums, node_squares, splits_indices, splits_sums, splits_squares,
totals_indices, totals_sums, totals_squares,
input_leaves) = (self.training_ops.count_extremely_random_stats(
input_data,
sparse_indices,
sparse_values,
sparse_shape,
data_spec,
input_labels,
input_weights,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
self.variables.start_epoch,
epoch,
num_classes=self.params.num_output_columns,
regression=self.params.regression))
node_update_ops = []
node_update_ops.append(
state_ops.assign_add(self.variables.node_sums, node_sums))
splits_update_ops = []
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_sums,
splits_indices, splits_sums))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_sums, totals_indices,
totals_sums))
if self.params.regression:
node_update_ops.append(state_ops.assign_add(self.variables.node_squares,
node_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.candidate_split_squares,
splits_indices, splits_squares))
splits_update_ops.append(self.training_ops.scatter_add_ndim(
self.variables.accumulator_squares, totals_indices,
totals_squares))
# Sample inputs.
update_indices, feature_updates, threshold_updates = (
self.training_ops.sample_inputs(
input_data,
sparse_indices,
sparse_values,
sparse_shape,
input_weights,
self.variables.node_to_accumulator_map,
input_leaves,
self.variables.candidate_split_features,
self.variables.candidate_split_thresholds,
split_initializations_per_input=(
self.params.split_initializations_per_input),
split_sampling_random_seed=random_seed))
update_features_op = state_ops.scatter_update(
self.variables.candidate_split_features, update_indices,
feature_updates)
update_thresholds_op = state_ops.scatter_update(
self.variables.candidate_split_thresholds, update_indices,
threshold_updates)
# Calculate finished nodes.
with ops.control_dependencies(splits_update_ops):
# Passing input_leaves to finished nodes here means that nodes that
# have become stale won't be deallocated until an input reaches them,
# because we're trying to avoid considering every fertile node for
# performance reasons.
finished, stale = self.training_ops.finished_nodes(
input_leaves,
self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
self.variables.start_epoch,
epoch,
num_split_after_samples=self.params.split_after_samples,
min_split_samples=self.params.min_split_samples,
dominate_method=self.params.dominate_method,
dominate_fraction=self.params.dominate_fraction)
# Update leaf scores.
# TODO(thomaswc): Store the leaf scores in a TopN and only update the
# scores of the leaves that were touched by this batch of input.
children = array_ops.squeeze(
array_ops.slice(self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(
array_ops.squeeze(
array_ops.where(is_leaf), squeeze_dims=[1]))
non_fertile_leaves = array_ops.boolean_mask(
leaves, math_ops.less(array_ops.gather(
self.variables.node_to_accumulator_map, leaves), 0))
# TODO(gilberth): It should be possible to limit the number of non
# fertile leaves we calculate scores for, especially since we can only take
# at most array_ops.shape(finished)[0] of them.
with ops.control_dependencies(node_update_ops):
sums = array_ops.gather(self.variables.node_sums, non_fertile_leaves)
if self.params.regression:
squares = array_ops.gather(self.variables.node_squares,
non_fertile_leaves)
non_fertile_leaf_scores = self._variance(sums, squares)
else:
non_fertile_leaf_scores = self._weighted_gini(sums)
# Calculate best splits.
with ops.control_dependencies(splits_update_ops):
split_indices = self.training_ops.best_splits(
finished, self.variables.node_to_accumulator_map,
self.variables.candidate_split_sums,
self.variables.candidate_split_squares,
self.variables.accumulator_sums,
self.variables.accumulator_squares,
regression=self.params.regression)
# Grow tree.
with ops.control_dependencies([update_features_op, update_thresholds_op]):
(tree_update_indices, tree_children_updates, tree_threshold_updates,
new_eot) = (self.training_ops.grow_tree(
self.variables.end_of_tree, self.variables.node_to_accumulator_map,
finished, split_indices, self.variables.candidate_split_features,
self.variables.candidate_split_thresholds))
tree_update_op = state_ops.scatter_update(
self.variables.tree, tree_update_indices, tree_children_updates)
thresholds_update_op = state_ops.scatter_update(
self.variables.tree_thresholds, tree_update_indices,
tree_threshold_updates)
# TODO(thomaswc): Only update the epoch on the new leaves.
new_epoch_updates = epoch * array_ops.ones_like(tree_threshold_updates,
dtype=dtypes.int32)
epoch_update_op = state_ops.scatter_update(
self.variables.start_epoch, tree_update_indices,
new_epoch_updates)
# Update fertile slots.
with ops.control_dependencies([tree_update_op]):
(n2a_map_updates, a2n_map_updates, accumulators_cleared,
accumulators_allocated) = (self.training_ops.update_fertile_slots(
finished,
non_fertile_leaves,
non_fertile_leaf_scores,
self.variables.end_of_tree,
self.variables.accumulator_sums,
self.variables.node_to_accumulator_map,
stale,
self.variables.node_sums,
regression=self.params.regression))
# Ensure end_of_tree doesn't get updated until UpdateFertileSlots has
# used it to calculate new leaves.
gated_new_eot, = control_flow_ops.tuple(
[new_eot], control_inputs=[n2a_map_updates])
eot_update_op = state_ops.assign(self.variables.end_of_tree, gated_new_eot)
updates = []
updates.append(eot_update_op)
updates.append(tree_update_op)
updates.append(thresholds_update_op)
updates.append(epoch_update_op)
updates.append(
state_ops.scatter_update(self.variables.node_to_accumulator_map,
n2a_map_updates[0], n2a_map_updates[1]))
updates.append(
state_ops.scatter_update(self.variables.accumulator_to_node_map,
a2n_map_updates[0], a2n_map_updates[1]))
cleared_and_allocated_accumulators = array_ops.concat_v2(
[accumulators_cleared, accumulators_allocated], 0)
# Calculate values to put into scatter update for candidate counts.
# Candidate split counts are always reset back to 0 for both cleared
# and allocated accumulators. This means some accumulators might be doubly
# reset to 0 if the were released and not allocated, then later allocated.
split_values = array_ops.tile(
array_ops.expand_dims(array_ops.expand_dims(
array_ops.zeros_like(cleared_and_allocated_accumulators,
dtype=dtypes.float32), 1), 2),
[1, self.params.num_splits_to_consider, self.params.num_output_columns])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_sums,
cleared_and_allocated_accumulators, split_values))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.candidate_split_squares,
cleared_and_allocated_accumulators, split_values))
# Calculate values to put into scatter update for total counts.
total_cleared = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(accumulators_cleared,
dtype=dtypes.float32)), 1),
[1, self.params.num_output_columns])
total_reset = array_ops.tile(
array_ops.expand_dims(
array_ops.zeros_like(accumulators_allocated,
dtype=dtypes.float32), 1),
[1, self.params.num_output_columns])
accumulator_updates = array_ops.concat_v2([total_cleared, total_reset], 0)
updates.append(state_ops.scatter_update(
self.variables.accumulator_sums,
cleared_and_allocated_accumulators, accumulator_updates))
if self.params.regression:
updates.append(state_ops.scatter_update(
self.variables.accumulator_squares,
cleared_and_allocated_accumulators, accumulator_updates))
# Calculate values to put into scatter update for candidate splits.
split_features_updates = array_ops.tile(
array_ops.expand_dims(
math_ops.neg(array_ops.ones_like(
cleared_and_allocated_accumulators)), 1),
[1, self.params.num_splits_to_consider])
updates.append(state_ops.scatter_update(
self.variables.candidate_split_features,
cleared_and_allocated_accumulators, split_features_updates))
updates += self.finish_iteration()
return control_flow_ops.group(*updates)
def finish_iteration(self):
"""Perform any operations that should be done at the end of an iteration.
This is mostly useful for subclasses that need to reset variables after
an iteration, such as ones that are used to finish nodes.
Returns:
A list of operations.
"""
return []
def inference_graph(self, input_data, data_spec):
"""Constructs a TF graph for evaluating a random tree.
Args:
input_data: A tensor or SparseTensor or placeholder for input data.
data_spec: A list of tf.dtype values specifying the original types of
each column.
Returns:
The last op in the random tree inference graph.
"""
sparse_indices = []
sparse_values = []
sparse_shape = []
if isinstance(input_data, sparse_tensor.SparseTensor):
sparse_indices = input_data.indices
sparse_values = input_data.values
sparse_shape = input_data.dense_shape
input_data = []
return self.inference_ops.tree_predictions(
input_data, sparse_indices, sparse_values, sparse_shape, data_spec,
self.variables.tree,
self.variables.tree_thresholds,
self.variables.node_sums,
valid_leaf_threshold=self.params.valid_leaf_threshold)
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
def size(self):
"""Constructs a TF graph for evaluating the current number of nodes.
Returns:
The current number of nodes in the tree.
"""
return self.variables.end_of_tree - 1
def get_stats(self, session):
num_nodes = self.variables.end_of_tree.eval(session=session) - 1
num_leaves = array_ops.where(
math_ops.equal(array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1])), constants.LEAF_NODE)
).eval(session=session).shape[0]
return TreeStats(num_nodes, num_leaves)
|
|
from __future__ import division
import numpy as np
import chainer
from chainercv.links.model.ssd import MultiboxCoder
from chainercv import transforms
class SSD(chainer.Chain):
"""Base class of Single Shot Multibox Detector.
This is a base class of Single Shot Multibox Detector [#]_.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Args:
extractor: A link which extracts feature maps.
This link must have :obj:`insize`, :obj:`grids` and
:meth:`forward`.
* :obj:`insize`: An integer which indicates \
the size of input images. Images are resized to this size before \
feature extraction.
* :obj:`grids`: An iterable of integer. Each integer indicates \
the size of feature map. This value is used by \
:class:`~chainercv.links.model.ssd.MultiBboxCoder`.
* :meth:`__call_`: A method which computes feature maps. \
It must take a batched images and return batched feature maps.
multibox: A link which computes :obj:`mb_locs` and :obj:`mb_confs`
from feature maps.
This link must have :obj:`n_class`, :obj:`aspect_ratios` and
:meth:`forward`.
* :obj:`n_class`: An integer which indicates the number of \
classes. \
This value should include the background class.
* :obj:`aspect_ratios`: An iterable of tuple of integer. \
Each tuple indicates the aspect ratios of default bounding boxes \
at each feature maps. This value is used by \
:class:`~chainercv.links.model.ssd.MultiboxCoder`.
* :meth:`forward`: A method which computes \
:obj:`mb_locs` and :obj:`mb_confs`. \
It must take a batched feature maps and \
return :obj:`mb_locs` and :obj:`mb_confs`.
steps (iterable of float): The step size for each feature map.
This value is used by
:class:`~chainercv.links.model.ssd.MultiboxCoder`.
sizes (iterable of float): The base size of default bounding boxes
for each feature map. This value is used by
:class:`~chainercv.links.model.ssd.MultiboxCoder`.
variance (tuple of floats): Two coefficients for decoding
the locations of bounding boxes.
This value is used by
:class:`~chainercv.links.model.ssd.MultiboxCoder`.
The default value is :obj:`(0.1, 0.2)`.
Parameters:
nms_thresh (float): The threshold value
for :func:`~chainercv.utils.non_maximum_suppression`.
The default value is :obj:`0.45`.
This value can be changed directly or by using :meth:`use_preset`.
score_thresh (float): The threshold value for confidence score.
If a bounding box whose confidence score is lower than this value,
the bounding box will be suppressed.
The default value is :obj:`0.6`.
This value can be changed directly or by using :meth:`use_preset`.
"""
def __init__(
self, extractor, multibox,
steps, sizes, variance=(0.1, 0.2),
mean=0):
self.mean = mean
self.use_preset('visualize')
super(SSD, self).__init__()
with self.init_scope():
self.extractor = extractor
self.multibox = multibox
self.coder = MultiboxCoder(
extractor.grids, multibox.aspect_ratios, steps, sizes, variance)
@property
def insize(self):
return self.extractor.insize
@property
def n_fg_class(self):
return self.multibox.n_class - 1
def to_cpu(self):
super(SSD, self).to_cpu()
self.coder.to_cpu()
def to_gpu(self, device=None):
super(SSD, self).to_gpu(device)
self.coder.to_gpu(device=device)
def forward(self, x):
"""Compute localization and classification from a batch of images.
This method computes two variables, :obj:`mb_locs` and :obj:`mb_confs`.
:func:`self.coder.decode` converts these variables to bounding box
coordinates and confidence scores.
These variables are also used in training SSD.
Args:
x (chainer.Variable): A variable holding a batch of images.
The images are preprocessed by :meth:`_prepare`.
Returns:
tuple of chainer.Variable:
This method returns two variables, :obj:`mb_locs` and
:obj:`mb_confs`.
* **mb_locs**: A variable of float arrays of shape \
:math:`(B, K, 4)`, \
where :math:`B` is the number of samples in the batch and \
:math:`K` is the number of default bounding boxes.
* **mb_confs**: A variable of float arrays of shape \
:math:`(B, K, n\_fg\_class + 1)`.
"""
return self.multibox(self.extractor(x))
def _prepare(self, img):
img = img.astype(np.float32)
img = transforms.resize(img, (self.insize, self.insize))
img -= self.mean
return img
def use_preset(self, preset):
"""Use the given preset during prediction.
This method changes values of :obj:`nms_thresh` and
:obj:`score_thresh`. These values are a threshold value
used for non maximum suppression and a threshold value
to discard low confidence proposals in :meth:`predict`,
respectively.
If the attributes need to be changed to something
other than the values provided in the presets, please modify
them by directly accessing the public attributes.
Args:
preset ({'visualize', 'evaluate'}): A string to determine the
preset to use.
"""
if preset == 'visualize':
self.nms_thresh = 0.45
self.score_thresh = 0.6
elif preset == 'evaluate':
self.nms_thresh = 0.45
self.score_thresh = 0.01
else:
raise ValueError('preset must be visualize or evaluate')
def predict(self, imgs):
"""Detect objects from images.
This method predicts objects for each image.
Args:
imgs (iterable of numpy.ndarray): Arrays holding images.
All images are in CHW and RGB format
and the range of their value is :math:`[0, 255]`.
Returns:
tuple of lists:
This method returns a tuple of three lists,
:obj:`(bboxes, labels, scores)`.
* **bboxes**: A list of float arrays of shape :math:`(R, 4)`, \
where :math:`R` is the number of bounding boxes in a image. \
Each bounding box is organized by \
:math:`(y_{min}, x_{min}, y_{max}, x_{max})` \
in the second axis.
* **labels** : A list of integer arrays of shape :math:`(R,)`. \
Each value indicates the class of the bounding box. \
Values are in range :math:`[0, L - 1]`, where :math:`L` is the \
number of the foreground classes.
* **scores** : A list of float arrays of shape :math:`(R,)`. \
Each value indicates how confident the prediction is.
"""
x = []
sizes = []
for img in imgs:
_, H, W = img.shape
img = self._prepare(img)
x.append(self.xp.array(img))
sizes.append((H, W))
with chainer.using_config('train', False), \
chainer.function.no_backprop_mode():
x = chainer.Variable(self.xp.stack(x))
mb_locs, mb_confs = self.forward(x)
mb_locs, mb_confs = mb_locs.array, mb_confs.array
bboxes = []
labels = []
scores = []
for mb_loc, mb_conf, size in zip(mb_locs, mb_confs, sizes):
bbox, label, score = self.coder.decode(
mb_loc, mb_conf, self.nms_thresh, self.score_thresh)
bbox = transforms.resize_bbox(
bbox, (self.insize, self.insize), size)
bboxes.append(chainer.backends.cuda.to_cpu(bbox))
labels.append(chainer.backends.cuda.to_cpu(label))
scores.append(chainer.backends.cuda.to_cpu(score))
return bboxes, labels, scores
|
|
import astropy.time
import astropy.units as u
from astropy.coordinates import get_sun
# don't think the following is needed
# import sunpy.map
# don't think the following is needed
# from sunpy import sun
import numpy as np
def _get_sun_pos(met):
"""
Convenience wrapper module for getting the solar position
and north pole angle at a given time.
Synax:
----------
sun_pos, sun_np = _get_sun_pos(met)
Parameters
----------
met: Time for the observation, given in MJD.
Returns
-------
sun_pos:
Index of evtdata that passes the filtering.
"""
# Deprecated from sunpy v1 onwards
# from sunpy.coordinates import get_sun_P
# Use new version instead
from sunpy.coordinates import sun
sun_time = astropy.time.Time(met, format = 'mjd')
astro_sun_pos = get_sun(sun_time)
# Get the center of the Sun, and assign it degrees.
sun_pos = np.array([astro_sun_pos.ra.deg, astro_sun_pos.dec.deg])* u.deg
# Solar NP roll angle:
# sun_np = get_sun_P(last_met)
sun_np = sun.P(met)
return sun_pos, sun_np;
def _xy_to_radec(evtdata, hdr):
""" Conversion function to go from X/Y coordinates
in the FITS file to RA/Dec coordinates.
"""
from nustar_pysolar.utils import convert_nustar_time
# Parse the header information
for field in hdr.keys():
if field.find('TYPE') != -1:
if hdr[field] == 'X':
# print(hdr[field][5:8])
xval = field[5:8]
if hdr[field] == 'Y':
# print(hdr[field][5:8])
yval = field[5:8]
ra_ref = hdr['TCRVL'+xval]*u.deg
x0 = hdr['TCRPX'+xval]
delx = hdr['TCDLT'+xval] * u.deg
dec_ref = hdr['TCRVL'+yval]*u.deg
y0 = hdr['TCRPX'+yval]
dely = hdr['TCDLT'+yval]*u.deg
# Make local copies for convenience
x = evtdata['X']
y = evtdata['Y']
# Convert the NuSTAR epoch times to MJD
met = convert_nustar_time(evtdata['Time'], astropy_time=True)
# mjdref=hdr['MJDREFI']
# met = evtdata['TIME']*u.s + mjdref*u.d
# time = astropy.time.Time(mjdref*u.d+met, format = 'mjd')
# Convert X and Y to RA/dec
ra_x = ra_ref + (x - x0) * delx / np.cos(dec_ref)
dec_y = dec_ref + (y - y0) * dely
return ra_x, dec_y, met
def _delta_solar(ra_x, dec_y, met, **kwargs):
""" Function to compute the offsets from the center of
the Sun as a function of time.
Use the tStep argument to define how often you want
to update the solar ephemeris. Default is every 5 seconds.
Inputs: ra_x, dec_y, and met are all arrays that contain the
RA, Dec, and time of arrival of each count, respectively.
Outputs: sun_x, sun_y are the x and y values (in arcseconds)
from the center of the Sun.
"""
# How often you want to update the solar ephemeris:
tStep=kwargs.get('tStep', 5.0)
tStep = tStep * u.s
# How many events do you want to do?
maxEvt=kwargs.get('maxEvt', len(ra_x))
# Keep last time we updated things
last_met = met[0] - tStep * 2.
last_i = 0
sun_x = np.zeros_like(ra_x)
sun_y = np.zeros_like(dec_y)
for i in np.arange(len(ra_x)):
if( (met[i] - last_met) > tStep ):
(sun_pos, sun_np) = _get_sun_pos(last_met)
last_met = met[i]
# Rotation matrix for a counter-clockwise rotation since we're going
# back to celestial north from solar north
rotMatrix = np.array([[np.cos(sun_np), np.sin(sun_np)],
[-np.sin(sun_np), np.cos(sun_np)]])
# Diagnostics
# di = (i -last_i)
# print("Updating Sun position...")
# if di > 0:
# print(i, di)
# dt = toc()
# tic()
# last_i = i
# print("Time per event: ",dt / float(di) )
# From here on we do things for every photon:
ph_pos = np.array([ra_x[i].value, dec_y[i].value]) * u.deg
offset = ph_pos - sun_pos
# Project the offset onto the Sun
delta_offset = ((np.dot(offset, rotMatrix)).to(u.arcsec))
# Account for East->West conversion for +X direction in heliophysics coords
delta_offset = delta_offset*[-1., 1.]
sun_x[i] = delta_offset[0]
sun_y[i] = delta_offset[1]
if (i>maxEvt):
break
return sun_x, sun_y
def _delta_solar_skyfield(ra_x, dec_y, met, **kwargs):
""" Function to compute the offsets from the center of
the Sun as a function of time.
Use the tStep argument to define how often you want
to update the solar ephemeris. Default is every 5 seconds.
Inputs: ra_x, dec_y, and met are all arrays that contain the
RA, Dec, and time of arrival of each count, respectively. The arrival time
must have astropy units attached to it.
Outputs: sun_x, sun_y are the x and y values (in arcseconds)
from the center of the Sun in the +North and +West directions.
"""
import astropy.units as u
from nustar_pysolar.utils import skyfield_ephem
# Don't think this is needed
# from sunpy import sun
# Deprecated from sunpy v1 onwards
# from sunpy.coordinates import get_sun_P
# Use new version instead
from sunpy.coordinates import sun
# How often you want to update the solar ephemeris:
tStep=kwargs.get('tStep', 5.0)
tStep = tStep * u.s
load_path=kwargs.get('load_path', None)
observer, TheSun, ts = skyfield_ephem(load_path = load_path,
parallax_correction=True,
utc=met[0])
# How many events do you want to do?
maxEvt=kwargs.get('maxEvt', len(ra_x))
# Keep last time we updated things
last_met = met[0] - tStep * 2.
last_i = 0
sun_x = np.zeros_like(ra_x)
sun_y = np.zeros_like(dec_y)
for i in np.arange(len(ra_x)):
if( (met[i] - last_met) > tStep ):
last_met = met[i]
tcheck = ts.from_astropy(last_met)
astrometric = observer.at(tcheck).observe(TheSun)
this_ra, this_dec, dist = astrometric.radec()
# Get the center of the Sun, and assign it degrees.
# Doing it this was is necessary to do the vector math below.
sun_pos = np.array([this_ra.to(u.deg).value,
this_dec.to(u.deg).value])*u.deg
# sun_np = get_sun_P(last_met)
sun_np = sun.P(last_met)
# Rotation matrix for a counter-clockwise rotation since we're going
# back to celestial north from solar north
rotMatrix = np.array([[np.cos(sun_np), np.sin(sun_np)],
[-np.sin(sun_np), np.cos(sun_np)]])
# Diagnostics
# di = (i -last_i)
# print("Updating Sun position...")
# if di > 0:
# print(i, di)
# dt = toc()
# tic()
# last_i = i
# print("Time per event: ",dt / float(di) )
# From here on we do things for every photon:
ph_pos = np.array([ra_x[i].value, dec_y[i].value]) * u.deg
offset = ph_pos - sun_pos
# Project the offset onto the Sun
delta_offset = ((np.dot(offset, rotMatrix)).to(u.arcsec))
# Account for East->West conversion for +X direction in heliophysics coords
delta_offset = delta_offset*[-1., 1.]
sun_x[i] = delta_offset[0]
sun_y[i] = delta_offset[1]
if (i>maxEvt):
break
return sun_x, sun_y
def to_solar(evtdata, hdr, **kwargs):
import astropy.time
import astropy.units as u
from astropy.coordinates import get_sun
# don't think the following is needed
# import sunpy.map
# don't think the following is needed
# from sunpy import sun
import numpy as np
""" Main script to convert the events to solar coordinates.
Inputs:
--------
evtdata: input event data from a NuSTAR Level2 FITS file.
(i.e. nu*A06_cl.evt)
hdr: The header from the same NuSTAR FITS file.
Outputs:
--------
tbldata: Binary table with the positions converted from RA/Dec
to heliocentric coordinates.
outhdr: A new FITS header with the appropraite keywords adjusted for
heliocentric coordinates.
Syntax:
--------
tbldata, outhdr = to_solar(evtdata, hdr)
"""
# Convert events to RA/dec coordinates
(ra_x, dec_y, met) = _xy_to_radec(evtdata, hdr)
# Conver to solar coordinates
(sun_x, sun_y) = _delta_solar_skyfield(ra_x, dec_y, met, **kwargs)
# Parse the header information to get the native bin size
for field in list(hdr.keys()):
if field.find('TYPE') != -1:
if hdr[field] == 'X':
# print(hdr[field][5:8])
xval = field[5:8]
if hdr[field] == 'Y':
# print(hdr[field][5:8])
yval = field[5:8]
delx = -1.0 * hdr['TCDLT'+xval] * u.deg
dely = hdr['TCDLT'+yval]*u.deg
# Make output variaibles:
tbldata=evtdata.copy()
outhdr = hdr.copy()
# change to 0-3000 pixels using the same pixel size.
maxX = 3000
maxY = 3000
x0 = maxX / 2.
y0 = maxY / 2.
# Make image coordinates
out_sun_x=(1.0)*(sun_x / delx) + x0
out_sun_y=(sun_y / dely) + y0
# tbldata['X'] = out_sun_x **Removed
# tbldata['Y'] = out_sun_y **Removed
# #Update header information **Removed
# outhdr['TCRVL'+xval] = 0. **Removed
# outhdr['TCRPX'+xval] = x0 **Removed
# outhdr['TCDLT'+xval] = 1.0 * delx.to(u.arcsec).value **Removed
# outhdr['TLMAX'+xval] = maxX **Removed
# outhdr['TCRVL'+yval] = 0. **Removed
# outhdr['TCRPX'+yval] = x0 **Removed
# outhdr['TCDLT'+yval] = dely.to(u.arcsec).value **Removed
# outhdr['TLMAX'+yval] = maxY **Removed
# return tbldata, outhdr **Removed
#**************************** added ***************************************************
# Astropy update (>3. I think) means that the keywords
# that we need to change are now protected.
# This means that they are now determined from the data
# columns and can't just be reassigned.
# **See: https://github.com/astropy/astropy/issues/7145
# This is why lines 70--78 were removed.
from astropy.io import fits
# Remove the keywords so they don't have to be saved out to be updated.
del outhdr['TCRVL'+xval]
del outhdr['TCRPX'+xval]
del outhdr['TCDLT'+xval]
del outhdr['TCRVL'+yval]
del outhdr['TCRPX'+yval]
del outhdr['TCDLT'+yval]
# These ones can still be changed the old way for some reason.
outhdr['TLMAX'+xval] = maxX
outhdr['TLMAX'+yval] = maxY
# Get columns from the data
orig_cols = tbldata.columns
# Make new columns for the solar position coordinates in the X and Y fields.
# This method won't overwrite the X and Y fields if they are already there.
# This is why line 67 and 68 were removed.
# To create the columns, the fooloowing format is used.
# **See: https://docs.astropy.org/en/stable/io/fits/usage/table.html
# fits.Column(name=TTYPE, format=TFORM, unit=TUNIT, null=TNULL,
# coord_ref_point=TCRPX, coord_ref_value=TCRVL, coord_inc=TCDLT,
# coord_type=TCTYP, coord_unit=TCUNI,
# array=data)
# Remove the RA---DEC X and Y data fields from the original columns
orig_cols.del_col('X')
orig_cols.del_col('Y')
# Now create the new columns which contain the keywords we
# need as well as the solar X and Y coordinates
new_cols = fits.ColDefs([fits.Column(name='X', format='1I', unit='pixel', null=-1,
coord_ref_point=x0, coord_ref_value=0.,
coord_inc=1.0 * delx.to(u.arcsec).value,
coord_type="Heliopro", coord_unit="arcsec",
array=out_sun_x),
fits.Column(name='Y', format='1I', unit='pixel', null=-1,
coord_ref_point=y0, coord_ref_value=0.,
coord_inc=dely.to(u.arcsec).value,
coord_type="Heliopro", coord_unit="arcsec",
array=out_sun_y)])
# Combine the old and new columns to create a new hdu structure
hdu = fits.BinTableHDU.from_columns(orig_cols + new_cols)
# separate the new hdu intp its data and header components
# to be able to be consistent with to_solar()
new_tbldata, hdr_from_new_columns = hdu.data, hdu.header
# return the new table data, but also combine the original header with the new updated one
return new_tbldata, outhdr + hdr_from_new_columns
def convert_file(infile, **kwargs):
""" Wrapper to read the input file, convert the data to solar
coordinates and then write the output file.
Inputs:
--------
Path to the file to be converted. Code assumes this is
generated by nupipeline and so will have an ".evt" suffix.
Outputs:
--------
Genreates a new FITS file in the same location as the input
file but with a _sunpos.evt suffix
Usage:
--------
convert_file('path/to/file/nuXXXA06_cl.evt')
"""
from astropy.io import fits
import os, sys
from os.path import isfile, splitext
# Make sure input file exists:
if not isfile(infile):
print("File does not exist:", infile)
sys.exit
# Read in the event data:
hdulist = fits.open(infile)
evtdata = hdulist[1].data
hdr = hdulist[1].header
hdulist.close()
# Convert this to solar coordinates
(newdata, newhdr) = to_solar(evtdata, hdr, **kwargs)
# # Make the new filename:
(sfile, ext)=splitext(infile)
outfile=sfile+'_sunpos.evt'
# Remove output file if necessary
if isfile(outfile):
print(__name__+': '+outfile+' exists! Removing old version...')
os.remove(outfile)
print(__name__+': generating file: '+ outfile)
fits.writeto(outfile, newdata, newhdr)
return
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
from tacker.tests.functional.sol.vnflcm import base as vnflcm_base
from tacker.tests.functional.sol.vnflcm import fake_vnflcm
from tacker.tests.functional.sol_separated_nfvo.vnflcm import fake_grant
from tacker.tests.functional.sol_separated_nfvo.vnflcm import fake_vnfpkgm
class VnfLcmWithNfvoSeparator(vnflcm_base.BaseVnfLcmTest):
def _register_vnf_package_mock_response(self, package_dir="functional6"):
"""Prepare VNF package for test.
Register VNF package response to fake NFVO server and Cleanups.
Returns:
Response: VNF Package information
"""
# Pre Setting: Create vnf package.
sample_name = package_dir
csar_package_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"../../../etc/samples/etsi/nfv",
sample_name))
# Get VNFD id.
tempname, vnfd_id = vnflcm_base._create_csar_with_unique_vnfd_id(
csar_package_path)
with open(tempname, "rb") as f:
vnf_package_hash = hashlib.sha256(f.read()).hexdigest()
vnf_package_info = \
fake_vnfpkgm.VnfPackage.make_individual_response_body(
vnfd_id, vnf_package_hash)
vnf_package_id = vnf_package_info['id']
# Post Setting: Reserve deleting vnf package.
self.addCleanup(vnflcm_base._delete_vnf_package, self.tacker_client,
vnf_package_id)
# Set "VNF Packages" response
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('GET',
fake_vnfpkgm.VnfPackage.VNF_PACKAGE_REQ_PATH, status_code=200,
response_body=[vnf_package_info])
# Set "VNF Package content" response
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('GET',
os.path.join(
fake_vnfpkgm.VnfPackage.VNF_PACKAGE_REQ_PATH,
vnf_package_id,
'package_content'),
status_code=200,
response_headers={"Content-Type": "application/zip"},
content=tempname)
# Set "Individual VNF package artifact" response
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('GET',
os.path.join(
fake_vnfpkgm.VnfPackage.VNF_PACKAGE_REQ_PATH,
vnf_package_id,
'artifacts',
vnf_package_info['additionalArtifacts'][0]['artifactPath']),
status_code=200,
response_headers={"Content-Type": "application/zip"},
content=tempname)
# Set "VNFD of individual VNF package" response
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('GET',
os.path.join(
fake_vnfpkgm.VnfPackage.VNF_PACKAGE_REQ_PATH,
vnf_package_id,
'vnfd'),
status_code=200,
response_headers={"Content-Type": "application/zip"},
content=tempname)
return vnf_package_info
def test_inst_chgextconn_term(self):
"""Test basic life cycle operations with sample VNFD with UserData.
In this test case, we do following steps.
- Create subscription.
- Test notification.
- Create VNF instance.
- Instantiate VNF.
- List VNF instances.
- Show VNF instance.
- Change External VNF Connectivity.
- Get opOccs information.
- Terminate VNF.
- Delete VNF.
- Delete subscription.
- Show subscription.
"""
vnf_package_info = self._register_vnf_package_mock_response()
glance_image = self._list_glance_image()[0]
# Create subscription and register it.
callback_url = os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
request_body = fake_vnflcm.Subscription.make_create_request_body(
'http://localhost:{}{}'.format(
vnflcm_base.FAKE_SERVER_MANAGER.SERVER_PORT,
callback_url))
resp, response_body = self._register_subscription(request_body)
self.assertEqual(201, resp.status_code)
self.assert_http_header_location_for_subscription(resp.headers)
subscription_id = response_body.get('id')
self.addCleanup(self._delete_subscription, subscription_id)
# Test notification
self.assert_notification_get(callback_url)
# Create vnf instance
resp, vnf_instance = self._create_vnf_instance_from_body(
fake_vnflcm.VnfInstances.make_create_request_body(
vnf_package_info['vnfdId']))
vnf_instance_id = vnf_instance.get('id')
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
self._assert_create_vnf(resp, vnf_instance)
vnf_instance_name = vnf_instance['vnfInstanceName']
self.addCleanup(self._delete_vnf_instance, vnf_instance_id)
# Set Fake server response for Grant-Req(Instantiate)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('POST',
fake_grant.Grant.GRANT_REQ_PATH, status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_inst_response_body(req_body,
self.vim['tenant_id'], glance_image.id))
# Instantiate vnf instance
request_body = fake_vnflcm.VnfInstances.\
make_inst_request_body_include_num_dynamic(
self.vim['tenant_id'], self.ext_networks,
self.ext_mngd_networks, self.ext_link_ports, self.ext_subnets)
resp, _ = self._instantiate_vnf_instance(vnf_instance_id, request_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
self._assert_instantiate_vnf(resp, vnf_instance_id)
# List vnf instances
filter_expr = {
'filter': "(eq,id,{});(eq,vnfInstanceName,{})".format(
vnf_instance_id, vnf_instance_name)}
resp, vnf_instances = self._list_vnf_instance(params=filter_expr)
self.assertEqual(200, resp.status_code)
self.assertEqual(1, len(vnf_instances))
# Show vnf instance
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id)
self.assertEqual(200, resp.status_code)
# Set Fake server response for Grant-Req(Chnage-ext-conn)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback(
'POST',
fake_grant.Grant.GRANT_REQ_PATH,
status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_change_ext_conn_response_body(
req_body,
self.vim['tenant_id'],
glance_image.id))
# Change external connectivity
request_body = \
fake_vnflcm.VnfInstances.make_change_ext_conn_request_body(
self.vim['tenant_id'], self.changed_ext_networks,
self.changed_ext_subnets)
before_fixed_ips = self._get_fixed_ips(vnf_instance_id, request_body)
resp, _ = \
self._change_ext_conn_vnf_instance(vnf_instance_id, request_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
after_fixed_ips = self._get_fixed_ips(vnf_instance_id, request_body)
self.assertNotEqual(before_fixed_ips, after_fixed_ips)
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
fake_grant.Grant.GRANT_REQ_PATH)
# get vnflcm_op_occ_id
callback_url = os.path.join(
vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
notify_mock_responses = vnflcm_base.FAKE_SERVER_MANAGER.get_history(
callback_url)
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
callback_url)
vnflcm_op_occ_id = notify_mock_responses[0].request_body.get(
'vnfLcmOpOccId')
self.assertIsNotNone(vnflcm_op_occ_id)
# occ-show(chgextconn)
resp, op_occs_info = self._show_op_occs(vnflcm_op_occ_id)
self._assert_occ_show(resp, op_occs_info)
# Set Fake server response for Grant-Req(Terminate)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('POST',
fake_grant.Grant.GRANT_REQ_PATH, status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_term_response_body(req_body))
# Get stack informations to terminate.
stack = self._get_heat_stack(vnf_instance_id)
resources_list = self._get_heat_resource_list(stack.id)
resource_name_list = [r.resource_name for r in resources_list]
glance_image_id_list = self._get_glance_image_list_from_stack_resource(
stack.id, resource_name_list)
# Terminate VNF
terminate_req_body = fake_vnflcm.VnfInstances.make_term_request_body()
resp, _ = self._terminate_vnf_instance(vnf_instance_id,
terminate_req_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
self._assert_terminate_vnf(resp, vnf_instance_id, stack.id,
resource_name_list, glance_image_id_list)
# Delete VNF
resp, _ = self._delete_vnf_instance(vnf_instance_id)
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
self.assert_delete_vnf(resp, vnf_instance_id)
# Delete Subscription
resp, response_body = self._delete_subscription(subscription_id)
self.assertEqual(204, resp.status_code)
# Check subscription was deleted
resp, show_body = self._show_subscription(subscription_id)
self.assertEqual(404, resp.status_code)
def test_inst_heal_term(self):
"""Test basic life cycle operations with sample VNFD with UserData.
In this test case, we do following steps.
- Create subscription.
- Create VNF instance.
- Instantiate VNF.
- Heal VNF with all VNFc.
- Terminate VNF
- Delete VNF
- Delete subscription
"""
vnf_package_info = self._register_vnf_package_mock_response()
glance_image = self._list_glance_image()[0]
# Create subscription and register it.
callback_url = os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
request_body = fake_vnflcm.Subscription.make_create_request_body(
'http://localhost:{}{}'.format(
vnflcm_base.FAKE_SERVER_MANAGER.SERVER_PORT,
callback_url))
resp, response_body = self._register_subscription(request_body)
self.assertEqual(201, resp.status_code)
self.assert_http_header_location_for_subscription(resp.headers)
self.assert_notification_get(callback_url)
subscription_id = response_body.get('id')
self.addCleanup(self._delete_subscription, subscription_id)
# Create vnf instance
resp, vnf_instance = self._create_vnf_instance_from_body(
fake_vnflcm.VnfInstances.make_create_request_body(
vnf_package_info['vnfdId']))
vnf_instance_id = vnf_instance.get('id')
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
self._assert_create_vnf(resp, vnf_instance)
self.addCleanup(self._delete_vnf_instance, vnf_instance_id)
# Set Fake server response for Grant-Req(Instantiate)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('POST',
fake_grant.Grant.GRANT_REQ_PATH, status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_inst_response_body(req_body,
self.vim['tenant_id'], glance_image.id))
# Instantiate vnf instance
request_body = fake_vnflcm.VnfInstances.\
make_inst_request_body_include_num_dynamic(
self.vim['tenant_id'], self.ext_networks,
self.ext_mngd_networks, self.ext_link_ports, self.ext_subnets)
resp, _ = self._instantiate_vnf_instance(vnf_instance_id, request_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
self._assert_instantiate_vnf(resp, vnf_instance_id)
self._assert_stack_template(vnf_instance_id)
# Show vnf instance
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id)
self.assertEqual(200, resp.status_code)
# Set Fake server response for Grant-Req(Heal)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('POST',
fake_grant.Grant.GRANT_REQ_PATH, status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_heal_response_body(req_body,
self.vim['tenant_id'], glance_image.id))
# Heal vnf (exists vnfc_instace_id)
vnfc_instance_id_list = []
for vnfc in vnf_instance.get('instantiatedVnfInfo', {}).\
get('vnfcResourceInfo', []):
if vnfc.get('vduId') == 'VDU1':
vnfc_instance_id_list.append(vnfc.get('id'))
request_body = fake_vnflcm.VnfInstances.make_heal_request_body(
vnfc_instance_id_list)
resp, _ = self._heal_vnf_instance(vnf_instance_id, request_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
self._assert_heal_vnf(resp, vnf_instance_id)
self._assert_stack_template(vnf_instance_id)
# Set Fake server response for Grant-Req(Terminate)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('POST',
fake_grant.Grant.GRANT_REQ_PATH, status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_term_response_body(req_body))
# Get stack informations to terminate.
stack = self._get_heat_stack(vnf_instance_id)
resources_list = self._get_heat_resource_list(stack.id)
resource_name_list = [r.resource_name for r in resources_list]
glance_image_id_list = self._get_glance_image_list_from_stack_resource(
stack.id, resource_name_list)
# Terminate VNF
terminate_req_body = fake_vnflcm.VnfInstances.make_term_request_body()
resp, _ = self._terminate_vnf_instance(vnf_instance_id,
terminate_req_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
self._assert_terminate_vnf(resp, vnf_instance_id, stack.id,
resource_name_list, glance_image_id_list)
# Delete VNF
resp, _ = self._delete_vnf_instance(vnf_instance_id)
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
self.assert_delete_vnf(resp, vnf_instance_id)
# Delete Subscription
resp, response_body = self._delete_subscription(subscription_id)
self.assertEqual(204, resp.status_code)
def test_inst_scale_term(self):
"""Test basic life cycle operations with sample VNFD with UserData.
In this test case, we do following steps.
- Create subscription.
- Create VNF instance.
- Instantiate VNF.
- Get VNF informations.
- Scale-Out VNF
- Scale-In VNF
- Terminate VNF
- Delete VNF
- Delete subscription
"""
vnf_package_info = self._register_vnf_package_mock_response(
package_dir='functional7')
glance_image = self._list_glance_image()[0]
# Create subscription and register it.
callback_url = os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
self._testMethodName)
request_body = fake_vnflcm.Subscription.make_create_request_body(
'http://localhost:{}{}'.format(
vnflcm_base.FAKE_SERVER_MANAGER.SERVER_PORT,
callback_url))
resp, response_body = self._register_subscription(request_body)
self.assertEqual(201, resp.status_code)
self.assert_http_header_location_for_subscription(resp.headers)
self.assert_notification_get(callback_url)
subscription_id = response_body.get('id')
self.addCleanup(self._delete_subscription, subscription_id)
# Create vnf instance
resp, vnf_instance = self._create_vnf_instance_from_body(
fake_vnflcm.VnfInstances.make_create_request_body(
vnf_package_info['vnfdId']))
vnf_instance_id = vnf_instance.get('id')
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
self._assert_create_vnf(resp, vnf_instance)
self.addCleanup(self._delete_vnf_instance, vnf_instance_id)
# Set Fake server response for Grant-Req(Instantiate)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('POST',
fake_grant.Grant.GRANT_REQ_PATH, status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_inst_response_body(req_body,
self.vim['tenant_id'], glance_image.id))
# Instantiate vnf instance
request_body = fake_vnflcm.VnfInstances.\
make_inst_request_body_include_num_dynamic(
self.vim['tenant_id'], self.ext_networks,
self.ext_mngd_networks, self.ext_link_ports, self.ext_subnets)
resp, _ = self._instantiate_vnf_instance(vnf_instance_id, request_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
self._assert_instantiate_vnf(resp, vnf_instance_id)
self._assert_stack_template_scale(vnf_instance_id)
# Show vnf instance
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id)
self.assertEqual(200, resp.status_code)
# Set Fake server response for Grant-Req(Scale-out)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('POST',
fake_grant.Grant.GRANT_REQ_PATH, status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_scaleout_response_body(req_body,
self.vim['tenant_id'], glance_image.id))
# Scale-out vnf instance
stack = self._get_heat_stack(vnf_instance_id)
pre_stack_resource_list = self._get_heat_resource_list(stack.id, 2)
request_body = fake_vnflcm.VnfInstances.\
make_scale_request_body('SCALE_OUT')
resp, _ = self._scale_vnf_instance(vnf_instance_id, request_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
post_stack_resource_list = self._get_heat_resource_list(stack.id, 2)
self._assert_scale_vnf(resp, vnf_instance_id,
pre_stack_resource_list,
post_stack_resource_list,
scale_type='SCALE_OUT')
# Set Fake server response for Grant-Req(Scale-in)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('POST',
fake_grant.Grant.GRANT_REQ_PATH, status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_scalein_response_body(req_body))
# Scale-in vnf instance
stack = self._get_heat_stack(vnf_instance_id)
pre_stack_resource_list = self._get_heat_resource_list(stack.id, 2)
request_body = fake_vnflcm.VnfInstances.make_scale_request_body(
'SCALE_IN')
resp, _ = self._scale_vnf_instance(vnf_instance_id, request_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
post_stack_resource_list = self._get_heat_resource_list(stack.id, 2)
self._assert_scale_vnf(resp, vnf_instance_id,
pre_stack_resource_list,
post_stack_resource_list,
scale_type='SCALE_IN')
# Set Fake server response for Grant-Req(Terminate)
vnflcm_base.FAKE_SERVER_MANAGER.set_callback('POST',
fake_grant.Grant.GRANT_REQ_PATH, status_code=201,
callback=lambda req_headers,
req_body: fake_grant.Grant.make_term_response_body(req_body))
# Get stack informations to terminate.
stack = self._get_heat_stack(vnf_instance_id)
resources_list = self._get_heat_resource_list(stack.id)
resource_name_list = [r.resource_name for r in resources_list]
glance_image_id_list = self._get_glance_image_list_from_stack_resource(
stack.id, resource_name_list)
# Terminate VNF
terminate_req_body = fake_vnflcm.VnfInstances.make_term_request_body()
resp, _ = self._terminate_vnf_instance(vnf_instance_id,
terminate_req_body)
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
self._assert_terminate_vnf(resp, vnf_instance_id, stack.id,
resource_name_list, glance_image_id_list)
# Delete VNF
resp, _ = self._delete_vnf_instance(vnf_instance_id)
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
self.assert_delete_vnf(resp, vnf_instance_id)
# Delete Subscription
resp, response_body = self._delete_subscription(subscription_id)
self.assertEqual(204, resp.status_code)
def _assert_create_vnf(self, resp, vnf_instance):
"""Assert that VNF was created via fake server.
Args:
resp (Response): HTTP response object.
vnf_instance (Dict): VNF instance information.
"""
super().assert_create_vnf(resp, vnf_instance)
# FT-checkpoint: VnfPkgId
vnf_pkg_mock_responses = vnflcm_base.FAKE_SERVER_MANAGER.get_history(
fake_vnfpkgm.VnfPackage.VNF_PACKAGE_REQ_PATH)
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
fake_vnfpkgm.VnfPackage.VNF_PACKAGE_REQ_PATH)
self.assertEqual(1, len(vnf_pkg_mock_responses))
vnf_pkg_info_list = vnf_pkg_mock_responses[0]
self.assertEqual(vnf_instance['vnfPkgId'],
vnf_pkg_info_list.response_body[0]['id'])
def _assert_instantiate_vnf(self, resp, vnf_instance_id):
"""Assert that VNF was instantiated.
This method calls same name method of super class and that
checks heat resource status 'CREATE_COMPLETE', then assert
notifications of instantiation.
Then, we check Grant response in this method.
Args:
resp (Response): HTTP response object.
vnf_instance_id (str): VNF instance id.
"""
super().assert_instantiate_vnf(resp, vnf_instance_id)
# FT-checkpoint: Grant Response
grant_mock_responses = vnflcm_base.FAKE_SERVER_MANAGER.get_history(
fake_grant.Grant.GRANT_REQ_PATH)
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
fake_grant.Grant.GRANT_REQ_PATH)
self.assertEqual(1, len(grant_mock_responses))
self._assert_grant_mock_response(grant_mock_responses[0])
def _assert_heal_vnf(self, resp, vnf_instance_id,
expected_stack_status='UPDATE_COMPLETE'):
"""Assert that VNF was healed.
This method calls same name method of super class and that
checks heat resource status 'UPDATE_COMPLETE', then assert
notifications of healing.
Then, we check Grant response in this method.
Args:
resp (Response): HTTP response object.
vnf_instance_id (str): VNF instance id.
expected_stack_status (str, optional): self explanatory :)
Defaults to 'UPDATE_COMPLETE'.
"""
super().assert_heal_vnf(
resp, vnf_instance_id, expected_stack_status=expected_stack_status)
# FT-checkpoint: Grant Response
grant_mock_responses = vnflcm_base.FAKE_SERVER_MANAGER.get_history(
fake_grant.Grant.GRANT_REQ_PATH)
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
fake_grant.Grant.GRANT_REQ_PATH)
self.assertEqual(1, len(grant_mock_responses))
self._assert_grant_mock_response(grant_mock_responses[0])
def _assert_scale_vnf(self,
resp,
vnf_instance_id,
pre_stack_resource_list,
post_stack_resource_list,
scale_type):
super().assert_scale_vnf(
resp,
vnf_instance_id,
pre_stack_resource_list,
post_stack_resource_list,
scale_type=scale_type)
# FT-checkpoint: Grant Response
grant_mock_responses = vnflcm_base.FAKE_SERVER_MANAGER.get_history(
fake_grant.Grant.GRANT_REQ_PATH)
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
fake_grant.Grant.GRANT_REQ_PATH)
self.assertEqual(1, len(grant_mock_responses))
self._assert_grant_mock_response(grant_mock_responses[0])
def _assert_terminate_vnf(self, resp, vnf_instance_id, stack_id,
resource_name_list, glance_image_id_list):
"""Assert that VNF was terminated.
This method calls same name method of super class to check specified
VNF instance is 'NOT_INSTANTIATED'
Then, we check Grant response in this method.
Args:
resp (Response): HTTP response object.
vnf_instance_id (str): VNF instance id.
stack_id (str): Resource id of heat stack to check.
resource_name_list (list[str]): List of heat stack resources.
glance_image_id_list (list[str]): List of glance image ids.
"""
super().assert_terminate_vnf(resp, vnf_instance_id, stack_id,
resource_name_list, glance_image_id_list)
# FT-checkpoint: Grant Response
grant_mock_responses = vnflcm_base.FAKE_SERVER_MANAGER.get_history(
fake_grant.Grant.GRANT_REQ_PATH)
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
fake_grant.Grant.GRANT_REQ_PATH)
self.assertEqual(1, len(grant_mock_responses))
self._assert_grant_mock_response(grant_mock_responses[0])
def _assert_grant_mock_response(self, grant_mock_response,
expected_auth_type=None, expected_token_value=None):
"""Assert that HTTP response code is equal to 201 or not.
This method checks response code of grant request and
authorization result.
Args:
grant_mock_response (Response): HTTP response object.
expected_auth_type (str, optional): Authentication type.
Defaults to None.
expected_token_value ([type], optional): Authentication token.
Defaults to None.
"""
self.assertEqual(201, grant_mock_response.status_code)
actual_auth = grant_mock_response.request_headers.get("Authorization")
if expected_auth_type is None:
self.assertIsNone(actual_auth)
return
self.assertEqual(
'{} {}'.format(expected_auth_type, expected_token_value),
actual_auth)
def _assert_occ_show(self, resp, op_occs_info):
self.assertEqual(200, resp.status_code)
# Only check required parameters.
self.assertIsNotNone(op_occs_info.get('id'))
self.assertIsNotNone(op_occs_info.get('operationState'))
self.assertIsNotNone(op_occs_info.get('stateEnteredTime'))
self.assertIsNotNone(op_occs_info.get('vnfInstanceId'))
self.assertIsNotNone(op_occs_info.get('operation'))
self.assertIsNotNone(op_occs_info.get('isAutomaticInvocation'))
self.assertIsNotNone(op_occs_info.get('isCancelPending'))
_links = op_occs_info.get('_links')
self.assertIsNotNone(_links.get('self'))
self.assertIsNotNone(_links.get('self').get('href'))
self.assertIsNotNone(_links.get('vnfInstance'))
self.assertIsNotNone(_links.get('vnfInstance').get('href'))
self.assertIsNotNone(_links.get('grant'))
self.assertIsNotNone(_links.get('grant').get('href'))
def _get_fixed_ips(self, vnf_instance_id, request_body):
res_name = None
for extvirlink in request_body['extVirtualLinks']:
if 'extCps' not in extvirlink:
continue
for extcps in extvirlink['extCps']:
if 'cpdId' in extcps:
if res_name is None:
res_name = list()
res_name.append(extcps['cpdId'])
break
self.assertTrue(res_name)
stack = self._get_heat_stack(vnf_instance_id)
stack_id = stack.id
stack_resource = self._get_heat_resource_list(stack_id, nested_depth=2)
releations = dict()
for elmt in stack_resource:
if elmt.resource_type != 'OS::Neutron::Port':
continue
if elmt.resource_name not in res_name:
continue
releations[elmt.parent_resource] = elmt.resource_name
details = list()
for (parent_name, resource_name) in releations.items():
for elmt in stack_resource:
if parent_name != elmt.resource_name:
continue
detail_stack = self._get_heat_resource(
elmt.physical_resource_id, resource_name)
details.append(detail_stack)
ans_list = list()
for detail in details:
ans_list.append(detail.attributes['fixed_ips'])
return ans_list
def _assert_stack_template(self, vnf_instance_id):
stack = self._get_heat_stack(vnf_instance_id)
resources_list\
= self._get_heat_resource_list(stack.id, nested_depth=2)
stack_name_wd = vnf_instance_id + "-VDU2"
physical_resource_id = [r.physical_resource_id for r
in resources_list if stack_name_wd in r.stack_name]
template = self._get_heat_stack_template(physical_resource_id[0])
template_count = str(template).count("flavor")
self.assertEqual(template_count, 3)
def _assert_stack_template_scale(self, vnf_instance_id):
stack = self._get_heat_stack(vnf_instance_id)
resources_list\
= self._get_heat_resource_list(stack.id, nested_depth=2)
stack_name_wd = vnf_instance_id + "-VDU1"
physical_resource_id = [r.physical_resource_id for r
in resources_list if stack_name_wd in r.stack_name]
template = self._get_heat_stack_template(physical_resource_id[0])
template_count = str(template).count("zone")
self.assertEqual(template_count, 3)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _t
from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, coerce_bool
from desktop.conf import default_ssl_validate
import fnmatch
import logging
import os
DEFAULT_NN_HTTP_PORT = 50070
LOG = logging.getLogger(__name__)
def find_file_recursive(desired_glob, root):
def f():
for dirpath, dirnames, filenames in os.walk(root):
matches = fnmatch.filter(filenames, desired_glob)
if matches:
if len(matches) != 1:
logging.warning("Found multiple jars matching %s: %s" %
(desired_glob, matches))
return os.path.join(dirpath, matches[0])
logging.error("Trouble finding jars matching %s" % (desired_glob,))
return None
f.__doc__ = "Finds %s/%s" % (root, desired_glob)
return f
UPLOAD_CHUNK_SIZE = Config(
key="upload_chunk_size",
help="Size, in bytes, of the 'chunks' Django should store into memory and feed into the handler. Default is 64MB.",
type=int,
default=1024 * 1024 * 64)
HDFS_CLUSTERS = UnspecifiedConfigSection(
"hdfs_clusters",
help="One entry for each HDFS cluster",
each=ConfigSection(
help="Information about a single HDFS cluster",
members=dict(
FS_DEFAULTFS=Config("fs_defaultfs", help="The equivalent of fs.defaultFS (aka fs.default.name)",
default="hdfs://localhost:8020"),
LOGICAL_NAME = Config("logical_name", default="",
type=str, help=_t('NameNode logical name.')),
WEBHDFS_URL=Config("webhdfs_url",
help="The URL to WebHDFS/HttpFS service. Defaults to " +
"the WebHDFS URL on the NameNode.",
type=str, default="http://localhost:50070/webhdfs/v1"),
NN_KERBEROS_PRINCIPAL=Config("nn_kerberos_principal", help="Kerberos principal for NameNode", # Unused
default="hdfs", type=str),
DN_KERBEROS_PRINCIPAL=Config("dn_kerberos_principal", help="Kerberos principal for DataNode", # Unused
default="hdfs", type=str),
SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
default=False, type=coerce_bool),
SSL_CERT_CA_VERIFY=Config("ssl_cert_ca_verify",
help="In secure mode (HTTPS), if SSL certificates from YARN Rest APIs have to be verified against certificate authority",
dynamic_default=default_ssl_validate,
type=coerce_bool),
TEMP_DIR=Config("temp_dir", help="HDFS directory for temporary files",
default='/tmp', type=str),
HADOOP_CONF_DIR = Config(
key="hadoop_conf_dir",
default=os.environ.get("HADOOP_CONF_DIR", "/etc/hadoop/conf"),
help=("Directory of the Hadoop configuration) Defaults to the environment variable " +
"HADOOP_CONF_DIR when set, or '/etc/hadoop/conf'.")
)
)
)
)
MR_CLUSTERS = UnspecifiedConfigSection(
"mapred_clusters",
help="One entry for each MapReduce cluster",
each=ConfigSection(
help="Information about a single MapReduce cluster",
members=dict(
HOST=Config("jobtracker_host", help="Host/IP for JobTracker"),
PORT=Config("jobtracker_port",
default=8021,
help="Service port for the JobTracker",
type=int),
LOGICAL_NAME=Config('logical_name',
default="",
type=str,
help=_t('JobTracker logical name.')),
JT_THRIFT_PORT=Config("thrift_port", help="Thrift port for JobTracker", default=9290,
type=int),
JT_KERBEROS_PRINCIPAL=Config("jt_kerberos_principal", help="Kerberos principal for JobTracker",
default="mapred", type=str),
SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
default=False, type=coerce_bool),
SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
default=True, type=coerce_bool), # True here for backward compatibility
)
)
)
YARN_CLUSTERS = UnspecifiedConfigSection(
"yarn_clusters",
help="One entry for each Yarn cluster",
each=ConfigSection(
help="Information about a single Yarn cluster",
members=dict(
HOST=Config("resourcemanager_host",
default='localhost',
help="Host/IP for the ResourceManager"),
PORT=Config("resourcemanager_port",
default=8032,
type=int,
help="Service port for the ResourceManager"),
LOGICAL_NAME=Config('logical_name',
default="",
type=str,
help=_t('Resource Manager logical name.')),
SECURITY_ENABLED=Config("security_enabled", help="Is running with Kerberos authentication",
default=False, type=coerce_bool),
SUBMIT_TO=Config('submit_to', help="Whether Hue should use this cluster to run jobs",
default=False, type=coerce_bool), # False here for backward compatibility
IS_YARN=Config("is_yarn", help="Attribute set only on YARN clusters and not MR1 ones.",
default=True, type=coerce_bool),
RESOURCE_MANAGER_API_URL=Config("resourcemanager_api_url",
default='http://localhost:8088',
help="URL of the ResourceManager API"),
PROXY_API_URL=Config("proxy_api_url",
default='http://localhost:8088',
help="URL of the ProxyServer API"),
HISTORY_SERVER_API_URL=Config("history_server_api_url",
default='http://localhost:19888',
help="URL of the HistoryServer API"),
SSL_CERT_CA_VERIFY=Config("ssl_cert_ca_verify",
help="In secure mode (HTTPS), if SSL certificates from YARN Rest APIs have to be verified against certificate authority",
dynamic_default=default_ssl_validate,
type=coerce_bool)
)
)
)
def config_validator(user):
"""
config_validator() -> [ (config_variable, error_message) ]
Called by core check_config() view.
"""
from hadoop.fs import webhdfs
from hadoop import job_tracker
res = []
submit_to = []
# HDFS_CLUSTERS
has_default = False
for name in HDFS_CLUSTERS.keys():
cluster = HDFS_CLUSTERS[name]
res.extend(webhdfs.test_fs_configuration(cluster))
if name == 'default':
has_default = True
if not has_default:
res.append(("hadoop.hdfs_clusters", "You should have an HDFS called 'default'."))
# MR_CLUSTERS
mr_down = []
for name in MR_CLUSTERS.keys():
cluster = MR_CLUSTERS[name]
if cluster.SUBMIT_TO.get():
mr_down.extend(job_tracker.test_jt_configuration(cluster))
submit_to.append('mapred_clusters.' + name)
# If HA still failing
if mr_down and len(mr_down) == len(MR_CLUSTERS.keys()):
res.extend(mr_down)
# YARN_CLUSTERS
for name in YARN_CLUSTERS.keys():
cluster = YARN_CLUSTERS[name]
if cluster.SUBMIT_TO.get():
res.extend(test_yarn_configurations(user))
submit_to.append('yarn_clusters.' + name)
if not submit_to:
res.append(("hadoop", "Please designate one of the MapReduce or "
"Yarn clusters with `submit_to=true' in order to run jobs."))
return res
def test_yarn_configurations(user):
# Single cluster for now
from hadoop.yarn.resource_manager_api import get_resource_manager
result = []
try:
url = ''
api = get_resource_manager(user.username)
url = api._url
api.apps()
except Exception, e:
msg = 'Failed to contact Resource Manager at %s: %s' % (url, e)
LOG.exception(msg)
result.append(('Resource Manager', msg))
return result
|
|
import os
import re
import urllib
from django.conf import settings
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.sites.models import Site, RequestSite
from django.contrib.auth.models import User
from django.test import TestCase
from django.core import mail
from django.core.urlresolvers import reverse
from django.http import QueryDict
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def setUp(self):
self.old_LANGUAGES = settings.LANGUAGES
self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE
settings.LANGUAGES = (('en', 'English'),)
settings.LANGUAGE_CODE = 'en'
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
def tearDown(self):
settings.LANGUAGES = self.old_LANGUAGES
settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password
}
)
self.assertEquals(response.status_code, 302)
self.assert_(response['Location'].endswith(settings.LOGIN_REDIRECT_URL))
self.assert_(SESSION_KEY in self.client.session)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"Error is raised if the provided email address isn't currently registered"
response = self.client.get('/password_reset/')
self.assertEquals(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertContains(response, "That e-mail address doesn't have an associated user account")
self.assertEquals(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
self.assert_("http://" in mail.outbox[0].body)
self.assertEquals(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': '[email protected]'})
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals("[email protected]", mail.outbox[0].from_email)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assert_(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertEquals(response.status_code, 200)
self.assert_("Please enter your new password" in response.content)
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0"*4) + path[-1]
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assert_("The password reset link was invalid" in response.content)
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertEquals(response.status_code, 200)
self.assert_("The password reset link was invalid" in response.content)
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertEquals(response.status_code, 200)
self.assert_("The password reset link was invalid" in response.content)
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0"*4) + path[-1]
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2':' anewpassword'})
# Check the password has not been changed
u = User.objects.get(email='[email protected]')
self.assert_(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# It redirects us to a 'complete' page:
self.assertEquals(response.status_code, 302)
# Check the password has been changed
u = User.objects.get(email='[email protected]')
self.assert_(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assert_("The password reset link was invalid" in response.content)
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2':' x'})
self.assertEquals(response.status_code, 200)
self.assert_("The two password fields didn't match" in response.content)
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password
}
)
self.assertEquals(response.status_code, 200)
self.assert_("Please enter a correct username and password. Note that both fields are case-sensitive." in response.content)
def logout(self):
response = self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
}
)
self.assertEquals(response.status_code, 200)
self.assert_("Your old password was entered incorrectly. Please enter it again." in response.content)
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
}
)
self.assertEquals(response.status_code, 200)
self.assert_("The two password fields didn't match." in response.content)
def test_password_change_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
}
)
self.assertEquals(response.status_code, 302)
self.assert_(response['Location'].endswith('/password_change/done/'))
self.fail_login()
self.login(password='password1')
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('django.contrib.auth.views.login'))
self.assertEquals(response.status_code, 200)
if Site._meta.installed:
site = Site.objects.get_current()
self.assertEquals(response.context['site'], site)
self.assertEquals(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assert_(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('django.contrib.auth.views.login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urllib.quote(bad_url)
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
}
)
self.assertEquals(response.status_code, 302)
self.assertFalse(bad_url in response['Location'],
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'//testserver/'):
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urllib.quote(good_url)
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
}
)
self.assertEquals(response.status_code, 302)
self.assertTrue(good_url in response['Location'],
"%s should be allowed" % good_url)
class LoginURLSettings(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls'
def setUp(self):
super(LoginURLSettings, self).setUp()
self.old_LOGIN_URL = settings.LOGIN_URL
def tearDown(self):
super(LoginURLSettings, self).tearDown()
settings.LOGIN_URL = self.old_LOGIN_URL
def get_login_required_url(self, login_url):
settings.LOGIN_URL = login_url
response = self.client.get('/login_required/')
self.assertEquals(response.status_code, 302)
return response['Location']
def test_standard_login_url(self):
login_url = '/login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url,
'http://testserver%s?%s' % (login_url, querystring.urlencode('/')))
def test_remote_login_url(self):
login_url = 'http://remote.example.com/login'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_https_login_url(self):
login_url = 'https:///login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_login_url_with_querystring(self):
login_url = '/login/?pretty=1'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('pretty=1', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url, 'http://testserver/login/?%s' %
querystring.urlencode('/'))
def test_remote_login_url_with_next_querystring(self):
login_url = 'http://remote.example.com/login/'
login_required_url = self.get_login_required_url('%s?next=/default/' %
login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url, '%s?%s' % (login_url,
querystring.urlencode('/')))
class LogoutTest(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls'
def confirm_logged_out(self):
self.assert_(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertEquals(200, response.status_code)
self.assert_('Logged out' in response.content)
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assert_(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assert_(response['Location'].endswith('/login/'))
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assert_(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
|
|
#!/usr/bin/python
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aireos_command
version_added: "2.4"
author: "James Mighion (@jmighion)"
short_description: Run commands on remote devices running Cisco WLC
description:
- Sends arbitrary commands to an aireos node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- Commands run in configuration mode with this module are not
idempotent. Please use M(aireos_config) to configure WLC devices.
extends_documentation_fragment: aireos
options:
commands:
description:
- List of commands to send to the remote aireos device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
default: 1
"""
EXAMPLES = """
tasks:
- name: run show sysinfo on remote devices
aireos_command:
commands: show sysinfo
- name: run show sysinfo and check to see if output contains Cisco Controller
aireos_command:
commands: show sysinfo
wait_for: result[0] contains 'Cisco Controller'
- name: run multiple commands on remote nodes
aireos_command:
commands:
- show sysinfo
- show interface summary
- name: run multiple commands and evaluate the output
aireos_command:
commands:
- show sysinfo
- show interface summary
wait_for:
- result[0] contains Cisco Controller
- result[1] contains Loopback0
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.network.aireos.aireos import run_commands
from ansible.module_utils.network.aireos.aireos import aireos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = to_text(item, errors='surrogate_then_replace').split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
warnings.append(
'commands run in config mode with aireos_command are not '
'idempotent. Please use aireos_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(aireos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
|
|
from __future__ import unicode_literals
import os
import re
from django.utils import six
from django.utils.six.moves import range
from reviewboard.diffviewer.processors import (filter_interdiff_opcodes,
post_process_filtered_equals)
class MoveRange(object):
"""Stores information on a move range.
This will store the start and end of the range, and all groups that
are a part of it.
"""
def __init__(self, start, end, groups=[]):
self.start = start
self.end = end
self.groups = groups
@property
def last_group(self):
return self.groups[-1]
def add_group(self, group, group_index):
if self.groups[-1] != group:
self.groups.append((group, group_index))
def __repr__(self):
return '<MoveRange(%d, %d, %r)>' % (self.start, self.end, self.groups)
class DiffOpcodeGenerator(object):
ALPHANUM_RE = re.compile(r'\w')
WHITESPACE_RE = re.compile(r'\s')
MOVE_PREFERRED_MIN_LINES = 2
MOVE_MIN_LINE_LENGTH = 20
TAB_SIZE = 8
def __init__(self, differ, diff=None, interdiff=None, request=None,
**kwargs):
"""Initialize the opcode generator.
Version Changed:
3.0.18:
Added the ``request`` and ``**kwargs`` parameters.
Args:
differ (reviewboard.diffviewer.differ.Differ):
The differ being used to generate the diff.
diff (bytes, optional):
The raw contents for the diff.
interdiff (bytes, optional):
The raw contents for the diff on the other end of an
interdiff range, if generating an interdiff.
request (django.http.HttpRequest):
The HTTP request from the client.
**kwargs (dict):
Additional keyword arguments, for future expansion.
"""
self.differ = differ
self.diff = diff
self.interdiff = interdiff
self.request = request
def __iter__(self):
"""Returns opcodes from the differ with extra metadata.
This is a wrapper around a differ's get_opcodes function, which returns
extra metadata along with each range. That metadata includes
information on moved blocks of code and whitespace-only lines.
This returns a list of opcodes as tuples in the form of
(tag, i1, i2, j1, j2, meta).
"""
self.groups = []
self.removes = {}
self.inserts = []
# Run the opcodes through the chain.
opcodes = self.differ.get_opcodes()
opcodes = self._apply_processors(opcodes)
opcodes = self._generate_opcode_meta(opcodes)
opcodes = self._apply_meta_processors(opcodes)
self._group_opcodes(opcodes)
self._compute_moves()
for opcodes in self.groups:
yield opcodes
def _apply_processors(self, opcodes):
"""Apply any diff processors to the generated list of opcodes.
If generating an interdiff, this will apply a filter to remove any
unmodified lines.
Args:
opcodes (list of tuple):
The list of generated diff opcodes to process.
Yields:
tuple:
A processed opcode.
"""
if self.diff and self.interdiff:
# Filter out any lines unrelated to these changes from the
# interdiff. This will get rid of any merge information.
opcodes = filter_interdiff_opcodes(
opcodes=opcodes,
filediff_data=self.diff,
interfilediff_data=self.interdiff,
request=self.request)
for opcode in opcodes:
yield opcode
def _generate_opcode_meta(self, opcodes):
for tag, i1, i2, j1, j2 in opcodes:
meta = {
# True if this chunk is only whitespace.
'whitespace_chunk': False,
# List of tuples (i, j), with whitespace changes.
'whitespace_lines': [],
}
if tag == 'replace':
# replace groups are good for whitespace only changes.
assert (i2 - i1) == (j2 - j1)
for i, j in zip(range(i1, i2), range(j1, j2)):
if (self.WHITESPACE_RE.sub('', self.differ.a[i]) ==
self.WHITESPACE_RE.sub('', self.differ.b[j])):
# Both original lines are equal when removing all
# whitespace, so include their original line number in
# the meta dict.
meta['whitespace_lines'].append((i + 1, j + 1))
# If all lines are considered to have only whitespace change,
# the whole chunk is considered a whitespace-only chunk.
if len(meta['whitespace_lines']) == (i2 - i1):
meta['whitespace_chunk'] = True
elif tag == 'equal':
for group in self._compute_chunk_indentation(i1, i2, j1, j2):
ii1, ii2, ij1, ij2, indentation_changes = group
if indentation_changes:
new_meta = dict({
'indentation_changes': indentation_changes,
}, **meta)
else:
new_meta = meta
yield tag, ii1, ii2, ij1, ij2, new_meta
continue
yield tag, i1, i2, j1, j2, meta
def _apply_meta_processors(self, opcodes):
if self.interdiff:
# When filtering out opcodes, we may have converted chunks into
# "filtered-equal" chunks. This allowed us to skip any additional
# processing, particularly the indentation highlighting. It's
# now time to turn those back into "equal" chunks.
opcodes = post_process_filtered_equals(opcodes)
for opcode in opcodes:
yield opcode
def _group_opcodes(self, opcodes):
for group_index, group in enumerate(opcodes):
self.groups.append(group)
# Store delete/insert ranges for later lookup. We will be building
# keys that in most cases will be unique for the particular block
# of text being inserted/deleted. There is a chance of collision,
# so we store a list of matching groups under that key.
#
# Later, we will loop through the keys and attempt to find insert
# keys/groups that match remove keys/groups.
tag = group[0]
if tag in ('delete', 'replace'):
i1 = group[1]
i2 = group[2]
for i in range(i1, i2):
line = self.differ.a[i].strip()
if line:
self.removes.setdefault(line, []).append(
(i, group, group_index))
if tag in ('insert', 'replace'):
self.inserts.append(group)
def _compute_chunk_indentation(self, i1, i2, j1, j2):
# We'll be going through all the opcodes in this equals chunk and
# grouping with adjacent opcodes based on whether they have
# indentation changes or not. This allows us to keep the lines with
# indentation changes from being collapsed in the diff viewer.
indentation_changes = {}
prev_has_indent = False
prev_start_i = i1
prev_start_j = j1
for i, j in zip(range(i1, i2), range(j1, j2)):
old_line = self.differ.a[i]
new_line = self.differ.b[j]
new_indentation_changes = {}
indent_info = self._compute_line_indentation(old_line, new_line)
has_indent = indent_info is not None
if has_indent:
key = '%d-%d' % (i + 1, j + 1)
new_indentation_changes[key] = indent_info
if has_indent != prev_has_indent:
if prev_start_i != i or prev_start_j != j:
# Yield the previous group.
yield prev_start_i, i, prev_start_j, j, indentation_changes
# We have a new group. Set it up, starting with the current
# calculated state.
prev_start_i = i
prev_start_j = j
prev_has_indent = has_indent
indentation_changes = new_indentation_changes
elif has_indent:
indentation_changes.update(new_indentation_changes)
# Yield the last group, if we haven't already yielded it.
if prev_start_i != i2 or prev_start_j != j2:
yield prev_start_i, i2, prev_start_j, j2, indentation_changes
def _compute_line_indentation(self, old_line, new_line):
if old_line == new_line:
return None
old_line_stripped = old_line.lstrip()
new_line_stripped = new_line.lstrip()
# These are fake-equal. They really have some indentation changes.
# We want to mark those up.
#
# Our goal for this function from here on out is to figure out whether
# the new line has increased or decreased its indentation, and then
# to determine how much that has increased or decreased by.
#
# Since we may be dealing with the additional or removal of tabs,
# we have some challenges here. We need to expand those tabs in
# order to determine if the new line is indented further or not,
# and then we need to figure out how much of the leading whitespace
# on either side represents new indentation levels.
#
# We do this by chopping off all leading whitespace and expanding
# any tabs, and then figuring out the total line lengths. That gives
# us a basis for comparison to determine whether we've indented
# or unindented.
#
# We can then later figure out exactly which indentation characters
# were added or removed, and then store that information.
old_line_indent_len = len(old_line) - len(old_line_stripped)
new_line_indent_len = len(new_line) - len(new_line_stripped)
old_line_indent = old_line[:old_line_indent_len]
new_line_indent = new_line[:new_line_indent_len]
norm_old_line_indent = old_line_indent.expandtabs(self.TAB_SIZE)
norm_new_line_indent = new_line_indent.expandtabs(self.TAB_SIZE)
norm_old_line_indent_len = len(norm_old_line_indent)
norm_new_line_indent_len = len(norm_new_line_indent)
norm_old_line_len = (norm_old_line_indent_len +
len(old_line_stripped))
norm_new_line_len = (norm_new_line_indent_len +
len(new_line_stripped))
line_len_diff = norm_new_line_len - norm_old_line_len
if line_len_diff == 0:
return None
# We know that a spacing change did take place. We need to figure
# out now how many characters of indentation were actually
# added or removed.
is_indent = (line_len_diff > 0)
if is_indent:
raw_indent_len = new_line_indent_len
else:
raw_indent_len = old_line_indent_len
# Figure out how many characters of indentation were in common
# at the end of the strings. We'll want to exclude these
# characters when showing indentation changes.
#
# This is the area after any new indentation. If the indentation
# style changed (such as going from tabs to spaces), then nothing
# will be in common.
#
# We figure out the common trailing indentation by reversing both
# strings and then finding the common prefix. We only care about
# the length, so we can throw the string away.
#
# It may seem odd that we're using os.path.commonprefix, but this
# isn't really limited to paths. Certainly not in our case. It's
# worth not re-implementing that logic.
raw_indent_len -= len(os.path.commonprefix([
old_line_indent[::-1],
new_line_indent[::-1],
]))
return (is_indent,
raw_indent_len,
abs(norm_old_line_indent_len - norm_new_line_indent_len))
def _compute_moves(self):
# We now need to figure out all the moved locations.
#
# At this point, we know all the inserted groups, and all the
# individually deleted lines. We'll be going through and finding
# consecutive groups of matching inserts/deletes that represent a
# move block.
#
# The algorithm will be documented as we go in the code.
#
# We start by looping through all the inserted groups.
r_move_indexes_used = set()
for insert in self.inserts:
self._compute_move_for_insert(r_move_indexes_used, *insert)
def _compute_move_for_insert(self, r_move_indexes_used, itag, ii1, ii2,
ij1, ij2, imeta):
"""Compute move information for a given insert-like chunk.
Args:
r_move_indexes_used (set):
All remove indexes that have already been included in a move
range.
itag (unicode):
The chunk tag for the insert (``insert`` or ``replace``).
ii1 (int):
The 0-based start of the chunk on the original side.
ii2 (int):
The 0-based start of the next chunk on the original side.
ij1 (int):
The 0-based start of the chunk on the modification side.
ij2 (int):
The 0-based start of the next chunk on the modification side.
imeta (dict):
The metadata for the chunk for the modification, where the move
ranges may be stored.
"""
# Store some state on the range we'll be working with inside this
# insert group.
# The current location inside the insert group (from ij1 through ij2).
i_move_cur = ij1
# The current range of consecutive lines that we'll use for a move.
# Each line in this range has a corresponding consecutive delete line.
i_move_range = MoveRange(i_move_cur, i_move_cur)
# The deleted move ranges. The key is a string in the form of
# "{i1}-{i2}-{j1}-{j2}", with those positions taken from the remove
# group for the line. The value is an instance of MoveRange. The values
# in MoveRange are used to quickly locate deleted lines we've found
# that match the inserted lines, so we can assemble ranges later.
r_move_ranges = {} # key -> (start, end, group)
move_key = None
is_replace = (itag == 'replace')
# Loop through every location from ij1 through ij2 - 1 until we've
# reached the end.
while i_move_cur < ij2:
try:
iline = self.differ.b[i_move_cur].strip()
except IndexError:
iline = None
updated_range = False
if iline and iline in self.removes:
# The inserted line at this location has a corresponding
# removed line.
#
# If there's already some information on removed line ranges
# for this particular move block we're processing then we'll
# update the range.
#
# The way we do that is to find each removed line that matches
# this inserted line, and for each of those find out if there's
# an existing move range that the found removed line
# immediately follows. If there is, we update the existing
# range.
#
# If there isn't any move information for this line, we'll
# simply add it to the move ranges.
for ri, rgroup, rgroup_index in self.removes[iline]:
# Ignore any lines that have already been processed as
# part of a move, so we don't end up with incorrect blocks
# of lines being matched.
if ri in r_move_indexes_used:
continue
r_move_range = r_move_ranges.get(move_key)
if not r_move_range or ri != r_move_range.end + 1:
# We either didn't have a previous range, or this
# group didn't immediately follow it, so we need
# to start a new one.
move_key = '%s-%s-%s-%s' % rgroup[1:5]
r_move_range = r_move_ranges.get(move_key)
if r_move_range:
# If the remove information for the line is next in
# the sequence for this calculated move range...
if ri == r_move_range.end + 1:
# This is part of the current range, so update
# the end of the range to include it.
r_move_range.end = ri
r_move_range.add_group(rgroup, rgroup_index)
updated_range = True
else:
# Check that this isn't a replace line that's just
# "replacing" itself (which would happen if it's just
# changing whitespace).
if not is_replace or i_move_cur - ij1 != ri - ii1:
# We don't have any move ranges yet, or we're done
# with the existing range, so it's time to build
# one based on any removed lines we find that
# match the inserted line.
r_move_ranges[move_key] = \
MoveRange(ri, ri, [(rgroup, rgroup_index)])
updated_range = True
if updated_range:
# We found a range we were able to update. Don't
# attempt any more matches for removed lines.
break
if not updated_range and r_move_ranges:
# We didn't find a move range that this line is a part
# of, but we do have some existing move ranges stored.
#
# Given that updated_range is set, we'll be processing
# the known move ranges below. We'll actually want to
# re-check this line afterward, so that we can start a
# new move range after we've finished processing the
# current ones.
#
# To do that, just i_move_cur back by one. That negates
# the increment below.
i_move_cur -= 1
move_key = None
elif iline == '' and move_key:
# This is a blank or whitespace-only line, which would not
# be in the list of removed lines above. We also have been
# working on a move range.
#
# At this point, the plan is to just attach this blank
# line onto the end of the last range being operated on.
#
# This blank line will help tie together adjacent move
# ranges. If it turns out to be a trailing line, it'll be
# stripped later in _determine_move_range.
r_move_range = r_move_ranges.get(move_key)
if r_move_range:
new_end_i = r_move_range.end + 1
if (new_end_i < len(self.differ.a) and
self.differ.a[new_end_i].strip() == ''):
# There was a matching blank line on the other end
# of the range, so we should feel more confident about
# adding the blank line here.
r_move_range.end = new_end_i
# It's possible that this blank line is actually an
# "equal" line. Though technically it didn't move,
# we're trying to create a logical, seamless move
# range, so we need to try to find that group and
# add it to the list of groups in the range, if it'
# not already there.
last_group, last_group_index = r_move_range.last_group
if new_end_i >= last_group[2]:
# This is in the next group, which hasn't been
# added yet. So add it.
cur_group_index = r_move_range.last_group[1] + 1
r_move_range.add_group(
self.groups[cur_group_index],
cur_group_index)
updated_range = True
i_move_cur += 1
if not updated_range or i_move_cur == ij2:
# We've reached the very end of the insert group. See if
# we have anything that looks like a move.
if r_move_ranges:
r_move_range = self._find_longest_move_range(r_move_ranges)
# If we have a move range, see if it's one we want to
# include or filter out. Some moves are not impressive
# enough to display. For example, a small portion of a
# comment, or whitespace-only changes.
r_move_range = self._determine_move_range(r_move_range)
if r_move_range:
# Rebuild the insert and remove ranges based on where
# we are now and which range we won.
#
# The new ranges will be actual lists of positions,
# rather than a beginning and end. These will be
# provided to the renderer.
#
# The ranges expected by the renderers are 1-based,
# whereas our calculations for this algorithm are
# 0-based, so we add 1 to the numbers.
#
# The upper boundaries passed to the range() function
# must actually be one higher than the value we want.
# So, for r_move_range, we actually increment by 2. We
# only increment i_move_cur by one, because i_move_cur
# already factored in the + 1 by being at the end of
# the while loop.
i_range = range(i_move_range.start + 1,
i_move_cur + 1)
r_range = range(r_move_range.start + 1,
r_move_range.end + 2)
moved_to_ranges = dict(zip(r_range, i_range))
for group, group_index in r_move_range.groups:
rmeta = group[-1]
rmeta.setdefault('moved-to', {}).update(
moved_to_ranges)
imeta.setdefault('moved-from', {}).update(
dict(zip(i_range, r_range)))
# Record each of the positions in the removed range
# as used, so that they're not factored in again when
# determining possible ranges for future moves.
#
# We'll use the r_range above, but normalize back to
# 0-based indexes.
r_move_indexes_used.update(r - 1 for r in r_range)
# Reset the state for the next range.
move_key = None
i_move_range = MoveRange(i_move_cur, i_move_cur)
r_move_ranges = {}
def _find_longest_move_range(self, r_move_ranges):
# Go through every range of lines we've found and find the longest.
#
# The longest move range wins. If we find two ranges that are equal,
# though, we'll ignore both. The idea is that if we have two identical
# moves, then it's probably common enough code that we don't want to
# show the move. An example might be some standard part of a comment
# block, with no real changes in content.
#
# Note that with the current approach, finding duplicate moves doesn't
# cause us to reset the winning range to the second-highest identical
# match. We may want to do that down the road, but it means additional
# state, and this is hopefully uncommon enough to not be a real
# problem.
r_move_range = None
for iter_move_range in six.itervalues(r_move_ranges):
if not r_move_range:
r_move_range = iter_move_range
else:
len1 = r_move_range.end - r_move_range.start
len2 = iter_move_range.end - iter_move_range.start
if len1 < len2:
r_move_range = iter_move_range
elif len1 == len2:
# If there are two that are the same, it may be common
# code that we don't want to see moves for. Comments,
# for example.
r_move_range = None
return r_move_range
def _determine_move_range(self, r_move_range):
"""Determines if a move range is valid and should be included.
This performs some tests to try to eliminate trivial changes that
shouldn't have moves associated.
Specifically, a move range is valid if it has at least one line
with alpha-numeric characters and is at least 4 characters long when
stripped.
If the move range is valid, any trailing whitespace-only lines will
be stripped, ensuring it covers only a valid range of content.
"""
if not r_move_range:
return None
end_i = r_move_range.end
lines = self.differ.a[r_move_range.start:end_i + 1]
new_end_i = None
valid = False
for i, line in enumerate(reversed(lines)):
line = line.strip()
if line:
if len(line) >= 4 and self.ALPHANUM_RE.search(line):
valid = True
if new_end_i is None or valid:
new_end_i = end_i - i
if valid:
break
# Accept this if there's more than one line or if the first
# line is long enough, in order to filter out small bits of garbage.
valid = (
valid and
(new_end_i - r_move_range.start + 1 >=
self.MOVE_PREFERRED_MIN_LINES or
len(self.differ.a[r_move_range.start].strip()) >=
self.MOVE_MIN_LINE_LENGTH))
if not valid:
return None
assert new_end_i is not None
return MoveRange(r_move_range.start, new_end_i, r_move_range.groups)
_generator = DiffOpcodeGenerator
def get_diff_opcode_generator_class():
"""Returns the DiffOpcodeGenerator class used for generating opcodes."""
return _generator
def set_diff_opcode_generator_class(renderer):
"""Sets the DiffOpcodeGenerator class used for generating opcodes."""
assert renderer
globals()['_generator'] = renderer
def get_diff_opcode_generator(*args, **kwargs):
"""Returns a DiffOpcodeGenerator instance used for generating opcodes."""
return _generator(*args, **kwargs)
|
|
# -*- coding: utf-8 -*-
"""
Test parsing of units
"""
import sys
import time
import datetime
import unittest
import parsedatetime as pdt
from parsedatetime.context import pdtContext
from . import utils
class test(unittest.TestCase):
@utils.assertEqualWithComparator
def assertExpectedResult(self, result, check, **kwargs):
return utils.compareResultByTimeTuplesAndFlags(result, check, **kwargs)
def setUp(self):
self.cal = pdt.Calendar()
(self.yr, self.mth, self.dy, self.hr,
self.mn, self.sec, self.wd, self.yd, self.isdst) = time.localtime()
def testMinutes(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(minutes=1)
h = s - datetime.timedelta(minutes=1)
start = s.timetuple()
target = t.timetuple()
history = h.timetuple()
self.assertExpectedResult(
self.cal.parse('1 minutes', start),
(target, pdtContext(pdtContext.ACU_MIN)))
self.assertExpectedResult(
self.cal.parse('1 minute', start),
(target, pdtContext(pdtContext.ACU_MIN)))
self.assertExpectedResult(
self.cal.parse('1 min', start),
(target, pdtContext(pdtContext.ACU_MIN)))
self.assertExpectedResult(
self.cal.parse('1min', start),
(target, pdtContext(pdtContext.ACU_MIN)))
self.assertExpectedResult(
self.cal.parse('1 m', start),
(target, pdtContext(pdtContext.ACU_MIN)))
self.assertExpectedResult(
self.cal.parse('1m', start),
(target, pdtContext(pdtContext.ACU_MIN)))
self.assertExpectedResult(
self.cal.parse('1 minutes ago', start),
(history, pdtContext(pdtContext.ACU_MIN)))
self.assertExpectedResult(
self.cal.parse('1 minute ago', start),
(history, pdtContext(pdtContext.ACU_MIN)))
def testHours(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(hours=1)
h = s - datetime.timedelta(hours=1)
start = s.timetuple()
target = t.timetuple()
history = h.timetuple()
self.assertExpectedResult(
self.cal.parse('1 hour', start),
(target, pdtContext(pdtContext.ACU_HOUR)))
self.assertExpectedResult(
self.cal.parse('1 hours', start),
(target, pdtContext(pdtContext.ACU_HOUR)))
self.assertExpectedResult(
self.cal.parse('1 hr', start),
(target, pdtContext(pdtContext.ACU_HOUR)))
self.assertExpectedResult(
self.cal.parse('1 hour ago', start),
(history, pdtContext(pdtContext.ACU_HOUR)))
self.assertExpectedResult(
self.cal.parse('1 hours ago', start),
(history, pdtContext(pdtContext.ACU_HOUR)))
def testDays(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(days=1)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('1 day', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('1 days', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('1days', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('1 dy', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('1 d', start),
(target, pdtContext(pdtContext.ACU_DAY)))
def testNegativeDays(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(days=-1)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('-1 day', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('-1 days', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('-1days', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('-1 dy', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('-1 d', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('- 1 day', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('- 1 days', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('- 1days', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('- 1 dy', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('- 1 d', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('1 day ago', start),
(target, pdtContext(pdtContext.ACU_DAY)))
self.assertExpectedResult(
self.cal.parse('1 days ago', start),
(target, pdtContext(pdtContext.ACU_DAY)))
def testWeeks(self):
s = datetime.datetime.now()
t = s + datetime.timedelta(weeks=1)
h = s - datetime.timedelta(weeks=1)
start = s.timetuple()
target = t.timetuple()
history = h.timetuple()
self.assertExpectedResult(
self.cal.parse('1 week', start),
(target, pdtContext(pdtContext.ACU_WEEK)))
self.assertExpectedResult(
self.cal.parse('1week', start),
(target, pdtContext(pdtContext.ACU_WEEK)))
self.assertExpectedResult(
self.cal.parse('1 weeks', start),
(target, pdtContext(pdtContext.ACU_WEEK)))
self.assertExpectedResult(
self.cal.parse('1 wk', start),
(target, pdtContext(pdtContext.ACU_WEEK)))
self.assertExpectedResult(
self.cal.parse('1 w', start),
(target, pdtContext(pdtContext.ACU_WEEK)))
self.assertExpectedResult(
self.cal.parse('1w', start),
(target, pdtContext(pdtContext.ACU_WEEK)))
self.assertExpectedResult(
self.cal.parse('1 week ago', start),
(history, pdtContext(pdtContext.ACU_WEEK)))
self.assertExpectedResult(
self.cal.parse('1 weeks ago', start),
(history, pdtContext(pdtContext.ACU_WEEK)))
def testMonths(self):
s = datetime.datetime.now()
t = self.cal.inc(s, month=1)
h = self.cal.inc(s, month=-1)
start = s.timetuple()
target = t.timetuple()
history = h.timetuple()
self.assertExpectedResult(
self.cal.parse('1 month', start),
(target, pdtContext(pdtContext.ACU_MONTH)))
self.assertExpectedResult(
self.cal.parse('1 months', start),
(target, pdtContext(pdtContext.ACU_MONTH)))
self.assertExpectedResult(
self.cal.parse('1month', start),
(target, pdtContext(pdtContext.ACU_MONTH)))
self.assertExpectedResult(
self.cal.parse('1 month ago', start),
(history, pdtContext(pdtContext.ACU_MONTH)))
self.assertExpectedResult(
self.cal.parse('1 months ago', start),
(history, pdtContext(pdtContext.ACU_MONTH)))
def testYears(self):
s = datetime.datetime.now()
t = self.cal.inc(s, year=1)
start = s.timetuple()
target = t.timetuple()
self.assertExpectedResult(
self.cal.parse('1 year', start),
(target, pdtContext(pdtContext.ACU_YEAR)))
self.assertExpectedResult(
self.cal.parse('1 years', start),
(target, pdtContext(pdtContext.ACU_YEAR)))
self.assertExpectedResult(
self.cal.parse('1 yr', start),
(target, pdtContext(pdtContext.ACU_YEAR)))
self.assertExpectedResult(
self.cal.parse('1 y', start),
(target, pdtContext(pdtContext.ACU_YEAR)))
self.assertExpectedResult(
self.cal.parse('1y', start),
(target, pdtContext(pdtContext.ACU_YEAR)))
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import test_expectations
from telemetry import story
from telemetry.internal.platform import system_info
from telemetry.page import page as page_module
VENDOR_NVIDIA = 0x10DE
VENDOR_AMD = 0x1002
VENDOR_INTEL = 0x8086
VENDOR_STRING_IMAGINATION = 'Imagination Technologies'
DEVICE_STRING_SGX = 'PowerVR SGX 554'
class StubPlatform(object):
def __init__(self, os_name, os_version_name=None):
self.os_name = os_name
self.os_version_name = os_version_name
def GetOSName(self):
return self.os_name
def GetOSVersionName(self):
return self.os_version_name
class StubBrowser(object):
def __init__(self, platform, gpu, device, vendor_string, device_string):
self.platform = platform
self.system_info = system_info.SystemInfo.FromDict({
'model_name': '',
'gpu': {
'devices': [
{'vendor_id': gpu, 'device_id': device,
'vendor_string': vendor_string, 'device_string': device_string},
]
}
})
@property
def supports_system_info(self):
return False if not self.system_info else True
def GetSystemInfo(self):
return self.system_info
class SampleTestExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
self.Fail('page1.html', ['win', 'mac'], bug=123)
self.Fail('page2.html', ['vista'], bug=123)
self.Fail('page3.html', bug=123)
self.Fail('page4.*', bug=123)
self.Fail('http://test.com/page5.html', bug=123)
self.Fail('page6.html', ['nvidia', 'intel'], bug=123)
self.Fail('page7.html', [('nvidia', 0x1001), ('nvidia', 0x1002)], bug=123)
self.Fail('page8.html', ['win', 'intel', ('amd', 0x1001)], bug=123)
self.Fail('page9.html', ['imagination'])
self.Fail('page10.html', [('imagination', 'PowerVR SGX 554')])
self.Fail('Pages.page_11')
self.Fail('page12.html', ['mountainlion'])
self.Fail('page13.html', ['mavericks'])
self.Fail('page14.html', ['yosemite'])
self.Fail('page15.html', ['amd', 'valid_condition_matched'])
self.Fail('page16.html', ['amd', 'valid_condition_unmatched'])
def IsValidUserDefinedCondition(self, condition):
return condition in ('valid_condition_matched', 'valid_condition_unmatched')
def ModifiersApply(self, shared_page_state, expectation):
if not super(SampleTestExpectations,
self).ModifiersApply(shared_page_state, expectation):
return False
return ((not expectation.user_defined_conditions) or
'valid_condition_matched' in expectation.user_defined_conditions)
class TestExpectationsTest(unittest.TestCase):
def setUp(self):
self.expectations = SampleTestExpectations()
def assertExpectationEquals(self, expected, page, platform='', gpu=0,
device=0, vendor_string='', device_string=''):
result = self.expectations.GetExpectationForPage(StubBrowser(
platform, gpu, device, vendor_string, device_string), page)
self.assertEquals(expected, result)
# Pages with no expectations should always return 'pass'
def testNoExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page0.html', story_set)
self.assertExpectationEquals('pass', page, StubPlatform('win'))
# Pages with expectations for an OS should only return them when running on
# that OS
def testOSExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page1.html', story_set)
self.assertExpectationEquals('fail', page, StubPlatform('win'))
self.assertExpectationEquals('fail', page, StubPlatform('mac'))
self.assertExpectationEquals('pass', page, StubPlatform('linux'))
# Pages with expectations for an OS version should only return them when
# running on that OS version
def testOSVersionExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page2.html', story_set)
self.assertExpectationEquals('fail', page, StubPlatform('win', 'vista'))
self.assertExpectationEquals('pass', page, StubPlatform('win', 'win7'))
# Pages with non-conditional expectations should always return that
# expectation regardless of OS or OS version
def testConditionlessExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page3.html', story_set)
self.assertExpectationEquals('fail', page, StubPlatform('win'))
self.assertExpectationEquals('fail', page, StubPlatform('mac', 'lion'))
self.assertExpectationEquals('fail', page, StubPlatform('linux'))
# Expectations with wildcard characters should return for matching patterns
def testWildcardExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page4.html', story_set)
page_js = page_module.Page('http://test.com/page4.html', story_set)
self.assertExpectationEquals('fail', page, StubPlatform('win'))
self.assertExpectationEquals('fail', page_js, StubPlatform('win'))
# Expectations with absolute paths should match the entire path
def testAbsoluteExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page5.html', story_set)
page_org = page_module.Page('http://test.org/page5.html', story_set)
page_https = page_module.Page('https://test.com/page5.html', story_set)
self.assertExpectationEquals('fail', page, StubPlatform('win'))
self.assertExpectationEquals('pass', page_org, StubPlatform('win'))
self.assertExpectationEquals('pass', page_https, StubPlatform('win'))
# Pages with expectations for a GPU should only return them when running with
# that GPU
def testGpuExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page6.html', story_set)
self.assertExpectationEquals('fail', page, gpu=VENDOR_NVIDIA)
self.assertExpectationEquals('fail', page, gpu=VENDOR_INTEL)
self.assertExpectationEquals('pass', page, gpu=VENDOR_AMD)
# Pages with expectations for a GPU should only return them when running with
# that GPU
def testGpuDeviceIdExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page7.html', story_set)
self.assertExpectationEquals('fail', page, gpu=VENDOR_NVIDIA, device=0x1001)
self.assertExpectationEquals('fail', page, gpu=VENDOR_NVIDIA, device=0x1002)
self.assertExpectationEquals('pass', page, gpu=VENDOR_NVIDIA, device=0x1003)
self.assertExpectationEquals('pass', page, gpu=VENDOR_AMD, device=0x1001)
# Pages with multiple expectations should only return them when all criteria
# is met
def testMultipleExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page8.html', story_set)
self.assertExpectationEquals('fail', page,
StubPlatform('win'), VENDOR_AMD, 0x1001)
self.assertExpectationEquals('fail', page,
StubPlatform('win'), VENDOR_INTEL, 0x1002)
self.assertExpectationEquals('pass', page,
StubPlatform('win'), VENDOR_NVIDIA, 0x1001)
self.assertExpectationEquals('pass', page,
StubPlatform('mac'), VENDOR_AMD, 0x1001)
self.assertExpectationEquals('pass', page,
StubPlatform('win'), VENDOR_AMD, 0x1002)
# Pages with expectations based on GPU vendor string.
def testGpuVendorStringExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page9.html', story_set)
self.assertExpectationEquals('fail', page,
vendor_string=VENDOR_STRING_IMAGINATION,
device_string=DEVICE_STRING_SGX)
self.assertExpectationEquals('fail', page,
vendor_string=VENDOR_STRING_IMAGINATION,
device_string='Triangle Monster 3000')
self.assertExpectationEquals('pass', page,
vendor_string='Acme',
device_string=DEVICE_STRING_SGX)
# Pages with expectations based on GPU vendor and renderer string pairs.
def testGpuDeviceStringExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page10.html', story_set)
self.assertExpectationEquals('fail', page,
vendor_string=VENDOR_STRING_IMAGINATION,
device_string=DEVICE_STRING_SGX)
self.assertExpectationEquals('pass', page,
vendor_string=VENDOR_STRING_IMAGINATION,
device_string='Triangle Monster 3000')
self.assertExpectationEquals('pass', page,
vendor_string='Acme',
device_string=DEVICE_STRING_SGX)
# Pages with user-defined expectations.
def testUserDefinedExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page15.html', story_set)
self.assertExpectationEquals('fail', page, gpu=VENDOR_AMD)
page = page_module.Page('http://test.com/page16.html', story_set)
self.assertExpectationEquals('pass', page, gpu=VENDOR_AMD)
# Expectations can be set against page names as well as urls
def testPageNameExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page11.html', story_set,
name='Pages.page_11')
self.assertExpectationEquals('fail', page)
# Verify version-specific Mac expectations.
def testMacVersionExpectations(self):
story_set = story.StorySet()
page = page_module.Page('http://test.com/page12.html', story_set)
self.assertExpectationEquals('fail', page,
StubPlatform('mac', 'mountainlion'))
self.assertExpectationEquals('pass', page,
StubPlatform('mac', 'mavericks'))
self.assertExpectationEquals('pass', page,
StubPlatform('mac', 'yosemite'))
story_set = story.StorySet()
page = page_module.Page('http://test.com/page13.html', story_set)
self.assertExpectationEquals('pass', page,
StubPlatform('mac', 'mountainlion'))
self.assertExpectationEquals('fail', page,
StubPlatform('mac', 'mavericks'))
self.assertExpectationEquals('pass', page,
StubPlatform('mac', 'yosemite'))
story_set = story.StorySet()
page = page_module.Page('http://test.com/page14.html', story_set)
self.assertExpectationEquals('pass', page,
StubPlatform('mac', 'mountainlion'))
self.assertExpectationEquals('pass', page,
StubPlatform('mac', 'mavericks'))
self.assertExpectationEquals('fail', page,
StubPlatform('mac', 'yosemite'))
|
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Herald HTTP transport discovery, based on a homemade multicast protocol
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.3
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 3)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Herald
from . import ACCESS_ID, SERVICE_HTTP_TRANSPORT, SERVICE_HTTP_RECEIVER, \
FACTORY_DISCOVERY_MULTICAST, PROP_MULTICAST_GROUP, PROP_MULTICAST_PORT
import herald
import herald.beans as beans
import herald.utils as utils
import herald.transports.peer_contact as peer_contact
# Pelix/iPOPO
from pelix.ipopo.decorators import ComponentFactory, Requires, Validate, \
Invalidate, Property, RequiresBest
from pelix.utilities import to_bytes, to_unicode
# Standard library
import logging
import os
import select
import socket
import struct
import threading
import time
# ------------------------------------------------------------------------------
# Heart beat packet type
PACKET_TYPE_HEARTBEAT = 1
# Last beat packet type
PACKET_TYPE_LASTBEAT = 2
PROBE_CHANNEL_MULTICAST = "http_multicast"
""" Name of the multicast discovery probe channel """
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
if os.name == "nt":
# Windows Specific code
def pton(family, address):
"""
Calls inet_pton
:param family: Socket family
:param address: A string address
:return: The binary form of the given address
"""
if family == socket.AF_INET:
return socket.inet_aton(address)
elif family == socket.AF_INET6:
# Do it using WinSocks
import ctypes
winsock = ctypes.windll.ws2_32
# Prepare structure
class sockaddr_in6(ctypes.Structure):
"""
Definition of the C structure sockaddr_in6
"""
# pylint: disable=C0103
_fields_ = [("sin6_family", ctypes.c_short),
("sin6_port", ctypes.c_ushort),
("sin6_flowinfo", ctypes.c_ulong),
("sin6_addr", ctypes.c_ubyte * 16),
("sin6_scope_id", ctypes.c_ulong)]
# Prepare pointers
addr_ptr = ctypes.c_char_p(to_bytes(address))
out_address = sockaddr_in6()
size = len(sockaddr_in6)
size_ptr = ctypes.pointer(size)
# Second call
winsock.WSAStringToAddressA(addr_ptr, family, 0,
out_address, size_ptr)
# Convert the array...
bin_addr = 0
for part in out_address.sin6_addr:
bin_addr = bin_addr * 16 + part
return bin_addr
else:
raise ValueError("Unhandled socket family: {0}".format(family))
else:
# Other systems
def pton(family, address):
"""
Calls inet_pton
:param family: Socket family
:param address: A string address
:return: The binary form of the given address
"""
return socket.inet_pton(family, address)
def make_mreq(family, address):
"""
Makes a mreq structure object for the given address and socket family.
:param family: A socket family (AF_INET or AF_INET6)
:param address: A multicast address (group)
:raise ValueError: Invalid family or address
"""
if not address:
raise ValueError("Empty address")
# Convert the address to a binary form
group_bin = pton(family, address)
if family == socket.AF_INET:
# IPv4
# struct ip_mreq
# {
# struct in_addr imr_multiaddr; /* IP multicast address of group */
# struct in_addr imr_interface; /* local IP address of interface */
# };
# "=I" : Native order, standard size unsigned int
return group_bin + struct.pack("=I", socket.INADDR_ANY)
elif family == socket.AF_INET6:
# IPv6
# struct ipv6_mreq {
# struct in6_addr ipv6mr_multiaddr;
# unsigned int ipv6mr_interface;
# };
# "@I" : Native order, native size unsigned int
return group_bin + struct.pack("@I", 0)
raise ValueError("Unknown family {0}".format(family))
def create_multicast_socket(address, port, join=True):
"""
Creates a multicast socket according to the given address and port.
Handles both IPv4 and IPv6 addresses.
:param address: Multicast address/group
:param port: Socket port
:param join: If False, the socket is not bound and does not join the
multicast group (creates a simple UDP socket)
:return: A tuple (socket, listening address)
:raise ValueError: Invalid address or port
"""
# Get the information about a datagram (UDP) socket, of any family
try:
addrs_info = socket.getaddrinfo(address, port, socket.AF_UNSPEC,
socket.SOCK_DGRAM)
except socket.gaierror:
raise ValueError("Error retrieving address information ({0}, {1})"
.format(address, port))
if len(addrs_info) > 1:
_logger.debug("More than one address information found. "
"Using the first one.")
# Get the first entry : (family, socktype, proto, canonname, sockaddr)
addr_info = addrs_info[0]
# Only accept IPv4/v6 addresses
if addr_info[0] not in (socket.AF_INET, socket.AF_INET6):
# Unhandled address family
raise ValueError("Unhandled socket family : %d" % (addr_info[0]))
# Prepare the socket
sock = socket.socket(addr_info[0], socket.SOCK_DGRAM)
if join:
# Reuse address
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
# Special case for MacOS
# pylint: disable=no-member
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except AttributeError:
pass
# Bind the socket
if sock.family == socket.AF_INET:
# IPv4 binding
sock.bind(('0.0.0.0', port))
else:
# IPv6 Binding
sock.bind(('::', port))
# Prepare the mreq structure to join the group
# addrinfo[4] = (addr,port)
mreq = make_mreq(sock.family, addr_info[4][0])
# Join the group
if sock.family == socket.AF_INET:
# IPv4
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Allow multicast packets to get back on this host
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
else:
# IPv6
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
# Allow multicast packets to get back on this host
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, 1)
return sock, addr_info[4][0]
def close_multicast_socket(sock, address):
"""
Cleans up the given multicast socket.
Unregisters it of the multicast group.
Parameters should be the result of create_multicast_socket
:param sock: A multicast socket
:param address: The multicast address used by the socket
"""
if sock is None:
return
if address:
# Prepare the mreq structure to join the group
mreq = make_mreq(sock.family, address)
# Quit group
if sock.family == socket.AF_INET:
# IPv4
sock.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, mreq)
elif sock.family == socket.AF_INET6:
# IPv6
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_LEAVE_GROUP, mreq)
# Close the socket
sock.close()
# ------------------------------------------------------------------------------
def make_heartbeat(port, path, peer_uid, app_id):
"""
Prepares the heart beat UDP packet
Format : Little endian
* Kind of beat (1 byte)
* Herald HTTP server port (2 bytes)
* Herald HTTP servlet path length (2 bytes)
* Herald HTTP servlet path (variable, UTF-8)
* Peer UID length (2 bytes)
* Peer UID (variable, UTF-8)
* Application ID length (2 bytes)
* Application ID (variable, UTF-8)
:param port: The port to access the Herald HTTP server
:param path: The path to the Herald HTTP servlet
:param peer_uid: The UID of the peer
:param app_id: Application ID
:return: The heart beat packet content (byte array)
"""
# Type and port...
packet = struct.pack("<BH", PACKET_TYPE_HEARTBEAT, port)
for string in (path, peer_uid, app_id):
# Strings...
string_bytes = to_bytes(string)
packet += struct.pack("<H", len(string_bytes))
packet += string_bytes
return packet
def make_lastbeat(peer_uid, app_id):
"""
Prepares the last beat UDP packet (when the peer is going away)
Format : Little endian
* Kind of beat (1 byte)
* Peer UID length (2 bytes)
* Peer UID (variable, UTF-8)
* Application ID length (2 bytes)
* Application ID (variable, UTF-8)
:param peer_uid: Peer UID
:param app_id: Application ID
:return: The last beat packet content (byte array)
"""
packet = struct.pack("<B", PACKET_TYPE_LASTBEAT)
for string in (peer_uid, app_id):
string_bytes = to_bytes(string)
packet += struct.pack("<H", len(string_bytes))
packet += string_bytes
return packet
# ------------------------------------------------------------------------------
class MulticastReceiver(object):
"""
A multicast datagram receiver
"""
def __init__(self, group, port, callback):
"""
Sets up the receiver
The given callback must have the following signature:
``callback(host, port, path, peer_uid)``.
:param group: Multicast group to listen
:param port: Multicast port
:param callback: Method to call back once a packet is received
"""
# Parameters
self._group = group
self._port = port
self._callback = callback
# Reception loop
self._stop_event = threading.Event()
self._thread = None
# Socket
self._socket = None
def start(self):
"""
Starts listening to the socket
:return: True if the socket has been created
"""
# Create the multicast socket (update the group)
self._socket, self._group = create_multicast_socket(self._group,
self._port)
# Start the listening thread
self._stop_event.clear()
self._thread = threading.Thread(
target=self.__read,
name="MulticastReceiver-{0}".format(self._port))
self._thread.start()
def stop(self):
"""
Stops listening to the socket
"""
# Stop the loop
self._stop_event.set()
# Join the thread
self._thread.join()
self._thread = None
# Close the socket
close_multicast_socket(self._socket, self._group)
def _handle_heartbeat(self, sender, data):
"""
Handles a raw heart beat
:param sender: Sender (address, port) tuple
:param data: Raw packet data
"""
# Kind of beat
parsed, data = self._unpack("<B", data)
kind = parsed[0]
if kind == PACKET_TYPE_HEARTBEAT:
# Extract content
parsed, data = self._unpack("<H", data)
port = parsed[0]
path, data = self._unpack_string(data)
uid, data = self._unpack_string(data)
try:
app_id, data = self._unpack_string(data)
except struct.error:
# Compatibility with previous version
app_id = herald.DEFAULT_APPLICATION_ID
elif kind == PACKET_TYPE_LASTBEAT:
# Peer is going away
uid, data = self._unpack_string(data)
app_id, data = self._unpack_string(data)
port = -1
path = None
else:
_logger.warning("Unknown kind of packet: %d", kind)
return
try:
self._callback(kind, uid, app_id, sender[0], port, path)
except Exception as ex:
_logger.exception("Error handling heart beat: %s", ex)
def _unpack(self, fmt, data):
"""
Calls struct.unpack().
Returns a tuple containing the result tuple and the subset of data
containing the unread content.
:param fmt: The format of data
:param data: Data to unpack
:return: A tuple (result tuple, unread_data)
"""
size = struct.calcsize(fmt)
read, unread = data[:size], data[size:]
return struct.unpack(fmt, read), unread
def _unpack_string(self, data):
"""
Unpacks the next string from the given data
:param data: A datagram, starting at a string size
:return: A (string, unread_data) tuple
"""
# Get the size of the string
result, data = self._unpack("<H", data)
size = result[0]
# Read it
string_bytes = data[:size]
# Convert it
return to_unicode(string_bytes), data[size:]
def __read(self):
"""
Reads packets from the socket
"""
# Set the socket as non-blocking
self._socket.setblocking(0)
while not self._stop_event.is_set():
# Watch for content
ready = select.select([self._socket], [], [], 1)
if ready[0]:
# Socket is ready
data, sender = self._socket.recvfrom(1024)
try:
self._handle_heartbeat(sender, data)
except Exception as ex:
_logger.exception("Error handling the heart beat: %s", ex)
# ------------------------------------------------------------------------------
@ComponentFactory(FACTORY_DISCOVERY_MULTICAST)
@RequiresBest('_probe', herald.SERVICE_PROBE)
@Requires('_directory', herald.SERVICE_DIRECTORY)
@Requires('_receiver', SERVICE_HTTP_RECEIVER)
@Requires('_transport', SERVICE_HTTP_TRANSPORT)
@Property('_group', PROP_MULTICAST_GROUP, '239.0.0.1')
@Property('_port', PROP_MULTICAST_PORT, 42000)
@Property('_peer_ttl', 'peer.ttl', 30)
class MulticastHeartbeat(object):
"""
Discovery of Herald peers based on multicast
"""
def __init__(self):
"""
Sets up the component
"""
# Injected services
self._directory = None
self._receiver = None
self._transport = None
self._probe = None
# Local peer bean
self._local_peer = None
# Properties
self._group = "239.0.0.1"
self._port = 42000
self._peer_ttl = 30
# Multicast receiver
self._multicast_recv = None
# Multicast sender
self._multicast_send = None
self._multicast_target = None
# Threads
self._stop_event = threading.Event()
self._lst_thread = None
self._heart_thread = None
# peer UID -> Last Time Seen
self._peer_lst = {}
self._lst_lock = threading.Lock()
@Validate
def _validate(self, _):
"""
Component validated
"""
self._port = int(self._port)
self._peer_ttl = int(self._peer_ttl)
self._local_peer = self._directory.get_local_peer()
self._stop_event.clear()
# Start the multicast listener
self._multicast_recv = MulticastReceiver(self._group, self._port,
self.handle_heartbeat)
self._multicast_recv.start()
# Create the multicast sender socket
self._multicast_send, address = create_multicast_socket(self._group,
self._port,
False)
self._multicast_target = (address, self._port)
# Start the heart & TTL threads
self._heart_thread = threading.Thread(target=self.__heart_loop,
name="Herald-HTTP-HeartBeat")
self._lst_thread = threading.Thread(target=self.__lst_loop,
name="Herald-HTTP-LST")
self._heart_thread.start()
self._lst_thread.start()
@Invalidate
def _invalidate(self, _):
"""
Component invalidated
"""
# Stop everything
self._stop_event.set()
self._multicast_recv.stop()
self._multicast_recv = None
# Wait for the threads to stop
self._heart_thread.join(.5)
self._lst_thread.join(.5)
self._lst_thread = None
# Send a last beat: "leaving"
beat = make_lastbeat(self._local_peer.uid, self._local_peer.app_id)
self._multicast_send.sendto(beat, 0, self._multicast_target)
# Clear the multicast sender
self._multicast_send.close()
self._multicast_send = None
self._multicast_target = None
# Clear storage
self._peer_lst.clear()
def handle_heartbeat(self, kind, peer_uid, app_id, host, port, path):
"""
Handles a parsed heart beat
:param kind: Kind of heart beat
:param peer_uid: UID of the discovered peer
:param app_id: Application ID of the discovered peer
:param host: Address which sent the heart beat
:param port: Port of the Herald HTTP server
:param path: Path to the Herald HTTP servlet
"""
if peer_uid == self._local_peer.uid \
or app_id != self._local_peer.app_id:
# Ignore this heart beat (sent by us or another application)
return
if kind == PACKET_TYPE_LASTBEAT:
with self._lst_lock:
try:
del self._peer_lst[peer_uid]
except KeyError:
# We weren't aware of that peer
pass
self._probe.store(
PROBE_CHANNEL_MULTICAST,
{"uid": peer_uid, "timestamp": time.time(),
"event": "lastbeat"})
try:
# Peer is going away
peer = self._directory.get_peer(peer_uid)
peer.unset_access(ACCESS_ID)
except KeyError:
# Unknown peer
pass
elif kind == PACKET_TYPE_HEARTBEAT:
with self._lst_lock:
# Update the peer LST
self._peer_lst[peer_uid] = time.time()
if peer_uid not in self._directory:
# The peer isn't known, register it
self._probe.store(
PROBE_CHANNEL_MULTICAST,
{"uid": peer_uid, "timestamp": time.time(),
"event": "discovered"})
self.__discover_peer(host, port, path)
def __discover_peer(self, host, port, path):
"""
Grabs the description of a peer using the Herald servlet
:param host: Address which sent the heart beat
:param port: Port of the Herald HTTP server
:param path: Path to the Herald HTTP servlet
"""
if path.startswith('/'):
# Remove the starting /, as it is added while forging the URL
path = path[1:]
# Normalize the address of the sender
host = utils.normalize_ip(host)
# Prepare the "extra" information, like for a reply
extra = {'host': host, 'port': port, 'path': path}
local_dump = self._directory.get_local_peer().dump()
try:
self._transport.fire(
None,
beans.Message(peer_contact.SUBJECT_DISCOVERY_STEP_1,
local_dump), extra)
except Exception as ex:
_logger.exception("Error contacting peer: %s", ex)
def __heart_loop(self):
"""
Loop sending heart beats every 20 seconds
"""
# Get local information
access = self._receiver.get_access_info()
# Prepare the packet
beat = make_heartbeat(access[1], access[2], self._local_peer.uid,
self._local_peer.app_id)
while not self._stop_event.is_set():
# Send the heart beat using the multicast socket
self._multicast_send.sendto(beat, 0, self._multicast_target)
# Wait 20 seconds before next loop
self._stop_event.wait(20)
def __lst_loop(self):
"""
Loop that validates the LST of all peers and removes those who took
to long to respond
"""
while not self._stop_event.is_set():
with self._lst_lock:
loop_start = time.time()
to_delete = set()
for uid, last_seen in self._peer_lst.items():
if not last_seen:
# No LST for this peer
_logger.debug("Invalid LST for %s", uid)
elif (loop_start - last_seen) > self._peer_ttl:
# TTL reached
to_delete.add(uid)
_logger.debug("Peer %s reached TTL.", uid)
self._probe.store(
PROBE_CHANNEL_MULTICAST,
{"uid": uid, "timestamp": time.time(),
"event": "timeout"})
for uid in to_delete:
# Unregister those peers
del self._peer_lst[uid]
self._directory.unregister(uid)
# Wait a second or the event before next loop
self._stop_event.wait(1)
|
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import sys
import itertools
from collections import namedtuple
from genutil import *
# Templates
declarationTemplate = """
case ${{NAME}}
${{COMPILE_FAIL}}
values {}
vertex ""
precision mediump float;
attribute highp vec4 dEQP_Position;
${{VARIABLE_VTX}}
void main()
{
x0 = 1.0;
gl_Position = dEQP_Position;
}
""
fragment ""
precision mediump float;
${{VARIABLE_FRG}}
void main()
{
float result = x0 + x1;
gl_FragColor = vec4(result, result, result, 1.0);
}
""
end
"""[1:-1]
parameterTemplate = """
case ${{NAME}}
${{COMPILE_FAIL}}
values {}
both ""
precision mediump float;
${DECLARATIONS}
float foo0 (${{PARAMETER0}})
{
return x + 1.0;
}
void foo1 (${{PARAMETER1}})
{
x = 1.0;
}
float foo2 (${{PARAMETER2}})
{
return x + 1.0;
}
void main()
{
${SETUP}
float result;
foo1(result);
float x0 = foo0(1.0);
foo2(result);
${OUTPUT}
}
""
end
"""[1:-1]
# Classes
class DeclarationCase(ShaderCase):
def __init__(self, compileFail, paramList):
self.compileFail = "expect compile_fail" if compileFail else "expect pass"
self.name = ''
var0 = ''
var1 = ''
var2 = ''
for p in paramList:
self.name += p.name
if paramList.index(p) != len(paramList)-1:
self.name += '_'
var0 += p.vars[0] + ' '
var1 += p.vars[1] + ' '
var2 += p.vars[2] + ' '
var0 += 'float x0;\n'
var1 += 'float x1;\n'
var2 += 'float x2;'
self.variableVtx = (var0 + var1 + var2).strip()
self.variableFrg = (var0 + var1).strip() # Omit 'attribute' in frag shader
self.variableVtx = self.variableVtx.replace(" ", " ")
self.variableFrg = self.variableFrg.replace(" ", " ")
def __str__(self):
params = {
"NAME" : self.name,
"COMPILE_FAIL" : self.compileFail,
"VARIABLE_VTX" : self.variableVtx,
"VARIABLE_FRG" : self.variableFrg
}
return fillTemplate(declarationTemplate, params)
class ParameterCase(ShaderCase):
def __init__(self, compileFail, paramList):
self.compileFail = "expect compile_fail" if compileFail else "expect pass"
self.name = ''
self.param0 = ''
self.param1 = ''
self.param2 = ''
for p in paramList:
self.name += p.name
if paramList.index(p) != len(paramList)-1:
self.name += '_'
self.param0 += p.vars[0] + ' '
self.param1 += p.vars[1] + ' '
self.param2 += p.vars[2] + ' '
self.param0 += 'float x'
self.param1 += 'float x'
self.param2 += 'float x'
self.param0 = self.param0.replace(" ", " ")
self.param1 = self.param1.replace(" ", " ")
self.param2 = self.param2.replace(" ", " ")
def __str__(self):
params = {
"NAME" : self.name,
"COMPILE_FAIL" : self.compileFail,
"PARAMETER0" : self.param0,
"PARAMETER1" : self.param1,
"PARAMETER2" : self.param2,
}
return fillTemplate(parameterTemplate, params)
# Declarations
CaseFormat = namedtuple('CaseFormat', 'name vars')
DECL_INVARIANT = CaseFormat("invariant", ["invariant", "", ""])
DECL_STORAGE = CaseFormat("storage", ["varying", "uniform", "attribute"])
DECL_PRECISION = CaseFormat("precision", ["lowp", "mediump", "mediump"])
PARAM_STORAGE = CaseFormat("storage", [ "const", "", ""])
PARAM_PARAMETER = CaseFormat("parameter", [ "in", "out", "inout" ])
PARAM_PRECISION = CaseFormat("precision", [ "lowp", "mediump", "mediump" ])
# Order of qualification tests
validDeclarationCases = []
invalidDeclarationCases = []
validParameterCases = []
invalidParameterCases = []
declFormats = [
[DECL_INVARIANT, DECL_STORAGE, DECL_PRECISION],
[DECL_STORAGE, DECL_PRECISION],
[DECL_INVARIANT, DECL_STORAGE]
]
paramFormats = [
[PARAM_STORAGE, PARAM_PARAMETER, PARAM_PRECISION],
[PARAM_STORAGE, PARAM_PARAMETER],
[PARAM_STORAGE, PARAM_PRECISION],
[PARAM_PARAMETER, PARAM_PRECISION]
]
for f in declFormats:
for p in itertools.permutations(f):
if list(p) == f:
validDeclarationCases.append(DeclarationCase(False, p)) # Correct order
else:
invalidDeclarationCases.append(DeclarationCase(True, p)) # Incorrect order
for f in paramFormats:
for p in itertools.permutations(f):
if list(p) == f:
validParameterCases.append(ParameterCase(False, p)) # Correct order
else:
invalidParameterCases.append(ParameterCase(True, p)) # Incorrect order
qualificationOrderCases = [
CaseGroup("variables", "Order of qualification in variable declarations.", children = [
CaseGroup("valid", "Valid orderings.", validDeclarationCases),
CaseGroup("invalid", "Invalid orderings.", invalidDeclarationCases)
]),
CaseGroup("parameters", "Order of qualification in function parameters.", children = [
CaseGroup("valid", "Valid orderings.", validParameterCases),
CaseGroup("invalid", "Invalid orderings.", invalidParameterCases)
])
]
# Main program
if __name__ == "__main__":
print("Generating shader case files.")
writeAllCases("qualification_order.test", qualificationOrderCases)
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
from abc import abstractmethod
from collections import OrderedDict
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.task.task import Task
from pants.util.dirutil import fast_relpath, safe_delete, safe_walk
logger = logging.getLogger(__name__)
class SimpleCodegenTask(Task):
"""A base-class for code generation for a single target language.
:API: public
"""
# Subclasses may override to provide the type of gen targets the target acts on.
# E.g., JavaThriftLibrary. If not provided, the subclass must implement is_gentarget.
gentarget_type = None
def __init__(self, context, workdir):
"""
Add pass-thru Task Constructor for public API visibility.
:API: public
"""
super(SimpleCodegenTask, self).__init__(context, workdir)
@classmethod
def product_types(cls):
# NB(gmalmquist): This is a hack copied from the old CodeGen base class to get the round manager
# to properly run codegen before resolve and compile. It would be more correct to just have each
# individual codegen class declare what languages it generates, but would cause problems with
# scala. See https://rbcommons.com/s/twitter/r/2540/.
return ['java', 'scala', 'python']
@classmethod
def register_options(cls, register):
super(SimpleCodegenTask, cls).register_options(register)
register('--allow-empty', type=bool, default=True, fingerprint=True,
help='Skip targets with no sources defined.',
advanced=True)
register('--allow-dups', type=bool, fingerprint=True,
help='Allow multiple targets specifying the same sources. If duplicates are '
'allowed, the logic of find_sources will associate generated sources with '
'the least-dependent targets that generate them.',
advanced=True)
@classmethod
def get_fingerprint_strategy(cls):
"""Override this method to use a fingerprint strategy other than the default one.
:API: public
:return: a fingerprint strategy, or None to use the default strategy.
"""
return None
@property
def cache_target_dirs(self):
return True
@property
def validate_sources_present(self):
"""A property indicating whether input targets require sources.
If targets should have sources, the `--allow-empty` flag indicates whether it is a
warning or an error for sources to be missing.
:API: public
"""
return True
def synthetic_target_extra_dependencies(self, target, target_workdir):
"""Gets any extra dependencies generated synthetic targets should have.
This method is optional for subclasses to implement, because some code generators may have no
extra dependencies.
:param Target target: the Target from which we are generating a synthetic Target. E.g., 'target'
might be a JavaProtobufLibrary, whose corresponding synthetic Target would be a JavaLibrary.
It may not be necessary to use this parameter depending on the details of the subclass.
:API: public
:return: a list of dependencies.
"""
return []
def synthetic_target_type_by_target(self, target):
"""The type of target this codegen task generates.
For example, the target type for JaxbGen would simply be JavaLibrary.
:API: public
:return: a type (class) that inherits from Target.
"""
raise NotImplementedError
def synthetic_target_type(self, target):
"""The type of target this codegen task generates.
For example, the target type for JaxbGen would simply be JavaLibrary.
:API: public
:return: a type (class) that inherits from Target.
"""
raise NotImplementedError
def is_gentarget(self, target):
"""Predicate which determines whether the target in question is relevant to this codegen task.
E.g., the JaxbGen task considers JaxbLibrary targets to be relevant, and nothing else.
:API: public
:param Target target: The target to check.
:return: True if this class can generate code for the given target, False otherwise.
"""
if self.gentarget_type:
return isinstance(target, self.gentarget_type)
else:
raise NotImplementedError
def ignore_dup(self, tgt1, tgt2, rel_src):
"""Subclasses can override to omit a specific generated source file from dup checking."""
return False
def codegen_targets(self):
"""Finds codegen targets in the dependency graph.
:API: public
:return: an iterable of dependency targets.
"""
return self.context.targets(self.is_gentarget)
def _do_validate_sources_present(self, target):
"""Checks whether sources is empty, and either raises a TaskError or just returns False.
The specifics of this behavior are defined by whether the user sets --allow-empty to True/False:
--allow-empty=False will result in a TaskError being raised in the event of an empty source
set. If --allow-empty=True, this method will just return false and log a warning.
Shared for all SimpleCodegenTask subclasses to help keep errors consistent and descriptive.
:param target: Target to validate.
:return: True if sources is not empty, False otherwise.
"""
if not self.validate_sources_present:
return True
sources = target.sources_relative_to_buildroot()
if not sources:
message = ('Target {} has no sources.'.format(target.address.spec))
if not self.get_options().allow_empty:
raise TaskError(message)
else:
logging.warn(message)
return False
return True
def _get_synthetic_address(self, target, target_workdir):
synthetic_name = target.id
sources_rel_path = os.path.relpath(target_workdir, get_buildroot())
synthetic_address = Address(sources_rel_path, synthetic_name)
return synthetic_address
def execute(self):
with self.invalidated(self.codegen_targets(),
invalidate_dependents=True,
fingerprint_strategy=self.get_fingerprint_strategy()) as invalidation_check:
with self.context.new_workunit(name='execute', labels=[WorkUnitLabel.MULTITOOL]):
for vt in invalidation_check.all_vts:
# Build the target and handle duplicate sources.
if not vt.valid:
if self._do_validate_sources_present(vt.target):
self.execute_codegen(vt.target, vt.results_dir)
self._handle_duplicate_sources(vt.target, vt.results_dir)
vt.update()
# And inject a synthetic target to represent it.
self._inject_synthetic_target(vt.target, vt.results_dir)
@property
def _copy_target_attributes(self):
"""Return a list of attributes to be copied from the target to derived synthetic targets.
By default, propagates the provides, scope, and tags attributes.
"""
return ['provides', 'tags', 'scope']
def synthetic_target_dir(self, target, target_workdir):
"""
:API: public
"""
return target_workdir
def _inject_synthetic_target(self, target, target_workdir):
"""Create, inject, and return a synthetic target for the given target and workdir.
:param target: The target to inject a synthetic target for.
:param target_workdir: The work directory containing the generated code for the target.
"""
copied_attributes = {}
for attribute in self._copy_target_attributes:
copied_attributes[attribute] = getattr(target, attribute)
target_workdir = self.synthetic_target_dir(target, target_workdir)
synthetic_target = self.context.add_new_target(
address=self._get_synthetic_address(target, target_workdir),
target_type=self.synthetic_target_type(target),
dependencies=self.synthetic_target_extra_dependencies(target, target_workdir),
sources=list(self.find_sources(target, target_workdir)),
derived_from=target,
**copied_attributes
)
build_graph = self.context.build_graph
# NB(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected.
for dependent_address in build_graph.dependents_of(target.address):
build_graph.inject_dependency(
dependent=dependent_address,
dependency=synthetic_target.address,
)
# NB(pl): See the above comment. The same note applies.
for concrete_dependency_address in build_graph.dependencies_of(target.address):
build_graph.inject_dependency(
dependent=synthetic_target.address,
dependency=concrete_dependency_address,
)
build_graph.walk_transitive_dependee_graph(
build_graph.dependencies_of(target.address),
work=lambda t: t.mark_transitive_invalidation_hash_dirty(),
)
if target in self.context.target_roots:
self.context.target_roots.append(synthetic_target)
return synthetic_target
def resolve_deps(self, unresolved_deps):
"""
:API: public
"""
deps = OrderedSet()
for dep in unresolved_deps:
try:
deps.update(self.context.resolve(dep))
except AddressLookupError as e:
raise AddressLookupError('{message}\n on dependency {dep}'.format(message=e, dep=dep))
return deps
@abstractmethod
def execute_codegen(self, target, target_workdir):
"""Generate code for the given target.
:param target: A target to generate code for
:param target_workdir: A clean directory into which to generate code
"""
def find_sources(self, target, target_workdir):
"""Determines what sources were generated by the target after the fact.
This is done by searching the directory where this target's code was generated.
:param Target target: the target for which to find generated sources.
:param path target_workdir: directory containing sources for the target.
:return: A set of filepaths relative to the target_workdir.
:rtype: OrderedSet
"""
return OrderedSet(self._find_sources_in_workdir(target_workdir))
def _find_sources_in_workdir(self, target_workdir):
"""Returns relative sources contained in the given target_workdir."""
for root, _, files in safe_walk(target_workdir):
rel_root = fast_relpath(root, target_workdir)
for name in files:
yield os.path.join(rel_root, name)
def _handle_duplicate_sources(self, target, target_workdir):
"""Handles duplicate sources generated by the given gen target by either failure or deletion.
This method should be called after all dependencies have been injected into the graph, but
before injecting the synthetic version of this target.
NB(gm): Some code generators may re-generate code that their dependent libraries generate.
This results in targets claiming to generate sources that they really don't, so we try to
filter out sources that were actually generated by dependencies of the target. This causes
the code generated by the dependencies to 'win' over the code generated by dependees. By
default, this behavior is disabled, and duplication in generated sources will raise a
TaskError. This is controlled by the --allow-dups flag.
"""
# Compute the raw sources owned by this target.
by_target = self.find_sources(target, target_workdir)
# Walk dependency gentargets and record any sources owned by those targets that are also
# owned by this target.
duplicates_by_target = OrderedDict()
def record_duplicates(dep):
if dep == target or not self.is_gentarget(dep.concrete_derived_from):
return
duped_sources = [s for s in dep.sources_relative_to_source_root() if s in by_target and
not self.ignore_dup(target, dep, s)]
if duped_sources:
duplicates_by_target[dep] = duped_sources
target.walk(record_duplicates)
# If there were no dupes, we're done.
if not duplicates_by_target:
return
# If there were duplicates warn or error.
messages = ['{target} generated sources that had already been generated by dependencies.'
.format(target=target.address.spec)]
for dep, duped_sources in duplicates_by_target.items():
messages.append('\t{} also generated:'.format(dep.concrete_derived_from.address.spec))
messages.extend(['\t\t{}'.format(source) for source in duped_sources])
message = '\n'.join(messages)
if self.get_options().allow_dups:
logger.warn(message)
else:
raise self.DuplicateSourceError(message)
# Finally, remove duplicates from the workdir. This prevents us from having to worry
# about them during future incremental compiles.
for dep, duped_sources in duplicates_by_target.items():
for duped_source in duped_sources:
safe_delete(os.path.join(target_workdir, duped_source))
class DuplicateSourceError(TaskError):
"""A target generated the same code that was generated by one of its dependencies.
This is only thrown when --allow-dups=False.
"""
|
|
#!/usr/bin/env python
"""Convert pyx12 configuration files from XML to Python.
The Python X12 module contains a number of XML files which
define X12 message structures. They need to be converted into the syntax for the
:mod:`X12.parse` parser.
Note that the pyx12 module defines Loops, Segments, Composites and individual Elements.
It also defines selected data element types and sizes. Since it provides
complete code values, it provides Segment parsing clues and compliance
checking values.
Definitions
===========
There are two principle classes in this module.
.. autoclass:: XMLParser
This reads XML files and creates
:mod:`xml.dom` structures for the codes, data elements and message
structures.
.. autoclass:: ParserBuilder
This emits a Python :mod:`X12.parse` constructor
that matches the given :file:`.CF` file.
There are two convenience functions.
.. autofunction:: convertFilePath
.. autofunction:: convertFile
.. autofunction:: convertAll
Warnings
========
The following default warnings configuration is used.
- :class:`XMLWarning` are elevated to Errors to cause this module to stop.
In the case of suspicious XML, it is possible to downgrade these
errors and keep processing.
- :class:`EmptyLoopWarning` is always reported, but is merely a warning.
- :class:`UnknownElementWarning` is always reported, but is merely a warning.
- :class:`Extension` is reported once so that it's clear that the XML contains
features which were not parsed.
XML Source Structure
====================
The pyx12 package provides a :mod:`pyx12.maps.xsd` that provides a bit
of guidance on parsing the XML documents.
The :file:`README` provides some additional hints.
The following elements are
used in the message xml definitions.
- usage
Implementation Guide Usage field. S=Situational, N=Not Used, R=Required
- pos
Segment position number -- obvious from the XML document, but provided anyway
- id
Implementation Guide id
- name
Implementation Guide name
- data_ele
Data Element id
- valid_codes
List of valid codes for the element or the name.
Could be a separate list document, via attribute external="".
See codes.xml for the complete set of codes.
- refdes
reference designator for a Composite item
- syntax
STD syntax dependency string for a segment, optional
The :mod:`pyx12.maps.xsd` provides a schema for the various message definitions.
Data Element Definitions
========================
The following are used in the data element definitions.
- data_ele
Data Element ID
- name
IG name
- data_type
STD type
- min_len/max_len
STD
Analysis reveals the following DTD for the message definitions::
<!ELEMENT data_elements ( data_ele )*>
<!ELEMENT data_ele #PCDATA>
<!ATTLIST data_ele
ele_num CDATA #REQUIRED
data_type (ID|R|AN|DT|TM|N0) #REQUIRED
min_len CDATA #REQUIRED
max_len CDATA #REQUIRED
name CDATA #REQUIRED>
"""
from __future__ import print_function
import xml.dom.minidom as DOM
import argparse
from datetime import datetime
import os.path, logging, sys, fnmatch
import warnings
from tigershark.X12.parse import Composite
from tigershark.X12.parse import Element
from tigershark.X12.parse import Loop
from tigershark.X12.parse import Message
from tigershark.X12.parse import Properties
from tigershark.X12.parse import Segment
from tigershark.X12.map.source import FlatPythonVisitor
from tigershark.X12.map.source import PythonVisitor
class XMLWarning( UserWarning ):
"""A superclass for a number of warnings regarding the XML definition."""
pass
class EmptyLoopWarning( UserWarning ):
"""A Loop definition had no subloops or Segments."""
pass
class UnknownElementWarning( UserWarning ):
"""An Element had a dataele= reference to an unkown name."""
pass
class Extension( UserWarning ):
"""This feature of the XML is an extension to the current implementation."""
pass
class ParserBuilder( object ):
"""Build a :mod:`X12.parse` Parser from an XML message specification.
This makes use of an :class:`XMLParser` instance which has been populated
with three separate XML documents.
1. The data element definitions were parsed with :meth:`XMLParser.data`.
2. The external code definitions were parsed with :meth:`XMLParser.codes`.
3. The message structure was parsed via :meth:`XMLParser.read`.
Once this is complete, then the :meth:`build` method can be given the
:class:`XMLParser` to emit the desired :class:`X12.parse.Message` object.
This creates the :class:`X12.parse.Message` object, inserts the various
Loop and Segment components. The resulting object can be used to
parse X12 messages.
The resulting object can also be used to emit Python source,
as well as SQL DDL.
The :samp:`<transaction>` contains :samp:`<loop>`'s.
The :samp:`<loop>`'s contain :samp:`<segment>`'s and :samp:`<loop>`'s.
The :samp:`<segment>`'s contain :samp:`<element>`'s and :samp:`<composite>`'s.
:ivar dataDictionary: a dictionary of :samp:`<data_ele>` definitions.
"""
def __init__( self ):
"""Creates a new, uninitialized ParserBuilder."""
self.log= logging.getLogger( "tools.convertPyX12.ParserBuilder" )
self.log2= logging.getLogger( "tools.convertPyX12.ParserBuilder.buildElement" )
self.dataDictionary= {}
def buildDataElementDef( self, aNode ):
"""Load the data dictionary with the definition from a :samp:`<data_ele>` node.
The key is the data element name.
The value in the dictionary is a tuple of ( data_type, min_len and max_len ).
:param aNode: a :mod:`xml.dom` Element with a name of :samp:`data_ele`
"""
assert aNode.nodeType == DOM.Node.ELEMENT_NODE and aNode.nodeName == "data_ele"
# ele_num="100" data_type="ID" min_len="3" max_len="3" name
ele_num= aNode.getAttribute('ele_num')
data_type= aNode.getAttribute('data_type')
min_len= aNode.getAttribute('min_len')
max_len= aNode.getAttribute('max_len')
name= aNode.getAttribute('name')
self.dataDictionary[ele_num]= ( name, (data_type, min_len, max_len) )
def dataElements( self, xmlDoc ):
"""Load the data dictionary with the definitions from the :samp:`<data_elements>` node.
:param xmlDoc: a :class:`XMLParser` which has parsed the data document
"""
doc= xmlDoc.dataeleDoc.documentElement
assert doc.nodeType == DOM.Node.ELEMENT_NODE and doc.nodeName == "data_elements"
for c in doc.childNodes:
if c.nodeType != DOM.Node.ELEMENT_NODE: continue
if c.nodeName == "data_ele":
self.buildDataElementDef( c )
else:
warnings.warn( XMLWarning("Unexpected %r" % (c,) ) )
self.log.warning( "*** Unexpected %r", c )
def codes( self, xmlDoc ):
"""Load external code file.
.. todo:: XXX - Finish load external code file.
:param xmlDoc: a :class:`XMLParser` which has parsed the codes document
"""
pass
def getValidCodes( self, aNode ):
"""Parse a :samp:`<valid_codes>` node, building a list of valid codes
for an Element. This will examine only :samp:`<code>` elements found
under the :samp:`<valid_codes>` element.
:param aNode: a :mod:`xml.dom` Element with a name of :samp:`valid_codes`
:returns: list of code values
"""
assert aNode.nodeType == DOM.Node.ELEMENT_NODE and aNode.nodeName == "valid_codes"
codes= []
for c in aNode.childNodes:
if c.nodeType != DOM.Node.ELEMENT_NODE: continue
if c.nodeName == "code":
for n in c.childNodes:
codes.append( n.nodeValue )
else:
warnings.warn( XMLWarning("Unexpected %r" % (c,) ) )
self.log.warning( "*** Unexpected %r", c )
return codes
def buildComposite( self, compositeNode, context, nesting=0 ):
"""Build a Composite from a list of sub-Elements.
Note that the ISA segment provides the Component Element Separator in ISA16.
Composites are tricky. We have two choices.
1. Treat them as just a sequence of Elements -- which they
are for RDBMS purposes. However, this makes compliance
checking hard, since we have to look inside the composite.
We have to turn off "match" for each Element of the Composite,
otherwise, we won't be able to look into the segment data
properly.
2. Treat them as a subclass of Elements -- which is a lot of
fooling around to support a few compliance checks. We have to
provide a composite match function and we have to make use
of punctuation in the ISA segment for sub-parsing each Composite when we
finally figure out what the Segment and Elements.
:param compositeNode: a :mod:`xml.dom` Element with a name of :samp:`composite`
:param context: The Segment which contains this Composite
:param nesting: The current nesting level used to indent the log messages.
"""
assert compositeNode.nodeType == DOM.Node.ELEMENT_NODE and compositeNode.nodeName == "composite"
name= self.getChildTextValue( compositeNode, "name" )
usage= self.getChildTextValue( compositeNode, "usage" )
seq= self.getChildTextValue( compositeNode, "seq" )
data_ele= self.getChildTextValue( compositeNode, "data_ele" )
refdes= self.getChildTextValue( compositeNode, "refdes" )
self.log.debug( "%*sComposite name %r usage %r seq %r data_ele %r refdes %r",
nesting*2, '', name, usage, seq, data_ele, refdes )
theComposite= Composite(
data_ele,
Properties( desc=name, req_sit=usage, seq=seq, refdes=refdes ) )
for c in compositeNode.childNodes:
# Want to preserve the original XML order of <element>
if c.nodeType != DOM.Node.ELEMENT_NODE: continue
if c.nodeName == "element":
self.buildElement( c, theComposite, nesting+1 )
elif c.nodeName in ( "name", "usage", "seq", "data_ele", 'refdes',):
pass # already consumed
else:
warnings.warn( XMLWarning("Unexpected %r" % (c,) ) )
self.log.warning( "*** Unexpected %r", c )
context.append( theComposite )
def buildElement( self, elementNode, context, nesting=0 ):
"""Element is the fundamental piece of data in a Segment or Composite.
Elements can have code definitions or can reference external code
definitions.
.. todo:: XXX - Use external code definitions.
:param elementNode: a :mod:`xml.dom` Element with a name of :samp:`element`
:param context: The Segment or Composite which contains this Element
:param nesting: The current nesting level used to indent the log messages.
"""
assert elementNode.nodeType == DOM.Node.ELEMENT_NODE and elementNode.nodeName == "element"
eltXid= elementNode.getAttribute('xid')
name= self.getChildTextValue( elementNode, "name" )
usage= self.getChildTextValue( elementNode, "usage" )
seq= self.getChildTextValue( elementNode, "seq" )
data_ele= self.getChildTextValue( elementNode, "data_ele" )
self.log.debug( "%*sElement id %r name %r usage %r seq %r data_ele %r",
nesting*2, '', eltXid, name, usage, seq, data_ele )
if not self.dataDictionary.has_key( data_ele ):
warnings.warn( UnknownElementWarning("No Data Element %r %r %r" % ( eltXid, data_ele, name ,) ) )
data_type_tuple= (None,None,None)
else:
name, data_type_tuple = self.dataDictionary[data_ele]
codes= []
for c in elementNode.childNodes:
if c.nodeType != DOM.Node.ELEMENT_NODE: continue
if c.nodeName == "valid_codes":
if c.getAttribute("external") != "":
pass # reference to codes.xml codes
# XXX - Handle external code list
warnings.warn( Extension("External Codes Not Implemented") )
else:
codes= self.getValidCodes( c )
elif c.nodeName in ( "name", "usage", "seq", "data_ele", ):
pass # already consumed
else:
warnings.warn( XMLWarning("Unexpected %r" % (c,) ) )
self.log.warning( "*** Unexpected %r", c )
theElement= Element(
eltXid,
Properties(desc=name, req_sit=usage, seq=seq, data_ele=data_ele,
data_type=data_type_tuple, codes=codes, )
)
if self.dataDictionary.has_key( data_ele ) and len(codes) != 0:
#self.log2.debug( "Segment Qual Parameter? %r %r", self.dataDictionary[data_ele], codes )
pass
context.append( theElement )
def buildSegment( self, segmentNode, context, nesting=0 ):
"""Segment contains Elements and Composites.
:param segmentNode: a :mod:`xml.dom` Element with a name of :samp:`segment`
:param context: The Loop which contains this Segment.
:param nesting: The current nesting level used to indent the log messages.
"""
assert segmentNode.nodeType == DOM.Node.ELEMENT_NODE and segmentNode.nodeName == "segment"
segXid= segmentNode.getAttribute('xid')
name= self.getChildTextValue( segmentNode, "name" )
usage= self.getChildTextValue( segmentNode, "usage" )
pos= self.getChildTextValue( segmentNode, "pos" )
max_use= self.getChildTextValue( segmentNode, "max_use" )
syntax= self.getChildTextValue( segmentNode, "syntax" )
self.log.debug( "%*sSegment xid %r: name %r usage %r pos %r max_use %r syntax %r",
nesting*2, '', segXid, name, usage, pos, max_use, syntax )
theSegment= Segment(
segXid,
Properties(desc=name,req_sit=usage,pos=pos,repeat=max_use,syntax=syntax),
)
for c in segmentNode.childNodes:
# Want to preserve the original XML order of <element> and <composite>
if c.nodeType != DOM.Node.ELEMENT_NODE: continue
if c.nodeName == "element":
self.buildElement( c, theSegment, nesting+1 )
elif c.nodeName == "composite":
self.buildComposite( c, theSegment, nesting+1 )
elif c.nodeName in ( "name", "usage", "pos", "max_use", "syntax", ):
pass # already consumed
else:
warnings.warn( XMLWarning("Unexpected %r" % (c,) ) )
self.log.warning( "*** Unexpected %r", c )
context.append( theSegment )
def buildLoop( self, loopNode, context, nesting=0 ):
"""Loop contains Segments and Loops.
Empty Loops create a warning, and are dropped.
:param loopNode: a :mod:`xml.dom` Element with a name of :samp:`loop`
:param context: The Loop which contains this Loop or Segment.
:param nesting: The current nesting level used to indent the log messages.
"""
assert loopNode.nodeType == DOM.Node.ELEMENT_NODE and loopNode.nodeName == "loop"
loopXid= loopNode.getAttribute('xid')
loopType= loopNode.getAttribute('type')
name= self.getChildTextValue( loopNode, "name" )
usage= self.getChildTextValue( loopNode, "usage" )
pos= self.getChildTextValue( loopNode, "pos" )
repeat= self.getChildTextValue( loopNode, "repeat" )
self.log.debug( "%*sLoop xid %r type %r: name %r usage %r pos %r repear %r",
nesting*2, '', loopXid, loopType, name, usage, pos, repeat )
theLoop= Loop(
loopXid,
Properties(desc=name,req_sit=usage,pos=pos,repeat=repeat,looptype=loopType),
)
for c in loopNode.childNodes:
# Want to preserve the original XML order of <loop> and <segment>
if c.nodeType != DOM.Node.ELEMENT_NODE: continue
if c.nodeName == "loop":
self.buildLoop( c, theLoop, nesting+1 )
elif c.nodeName == "segment":
self.buildSegment( c, theLoop, nesting+1 )
elif c.nodeName in ( "name", "usage", "pos", "repeat", ):
pass # already consumed
else:
warnings.warn( XMLWarning("Unexpected %r" % (c,) ) )
self.log.warning( "*** Unexpected %r", c )
if len(theLoop.structure) == 0:
warnings.warn( EmptyLoopWarning("Empty Loop %r" % (theLoop,) ) )
# optimize this out of existence
else:
context.append( theLoop )
def build( self, xmlDoc, name=None ):
"""Build the overall :class:`X12.parse.Message` parser.
:param xmlDoc: a :class:`XMLParser` which has parsed the data, codes and message
structure documents.
:param name: Optional name of the Message to build; this will use
the xid attribute provided in the XML if no overriding name is provided here.
:returns: :class:`X12.parse.Message` parser.
"""
self.dataElements( xmlDoc )
self.codes( xmlDoc )
doc= xmlDoc.doc.documentElement
assert doc.nodeType == DOM.Node.ELEMENT_NODE and doc.nodeName == "transaction"
xid= doc.getAttribute('xid')
desc= self.getChildTextValue( doc, "name" )
if name is None: name= xid
self.log.info( "Message %s: xid=%s desc=%s", name, xid, desc )
self.top= Message( name, Properties(desc=desc) )
for c in doc.childNodes:
# Want to preserve the original XML order of <loop> and <segment>
if c.nodeType != DOM.Node.ELEMENT_NODE: continue
if c.nodeName == "loop":
self.buildLoop( c, self.top )
elif c.nodeName in ( "name", ):
pass # Already consumed this
else:
warnings.warn( XMLWarning("Unexpected %r" % (c,) ) )
self.log.warning( "*** Unexpected %r", c )
return self.top
def getChildTextValue( self, aNode, name ):
"""Examines all children with the given name
and extracts the text nodes for those children.
It accumulates the nodeValues for those text nodes.
:param aNode: an :mod:`xml.dom` Element.
:param name: a node name underneath the given node.
"""
childElements = [ n for n in aNode.childNodes if n.nodeType == DOM.Node.ELEMENT_NODE and n.nodeName == name ]
textNodes = [ n for c in childElements for n in c.childNodes if n.nodeType in ( DOM.Node.TEXT_NODE, DOM.Node.CDATA_SECTION_NODE ) ]
text = [ n.nodeValue for n in textNodes ]
return " ".join( text )
class XMLParser( object ):
"""Parse a related set of XML docs to get codes, data element definitions
and the overall Message structure.
The ParserBuilder will need some combination of these related XML documents
to define the :mod:`X12.parse` parser.
"""
def __init__( self ):
"""Creates an empty XMLParser instance."""
self.dataeleDoc= None
self.codesDoc= None
self.doc= None
def data( self, name_or_file ):
"""Parses the data element definition XML file.
:param fileName: name of the data element file, usually :file:`dataele.xml`
"""
self.dataeleDoc= DOM.parse( name_or_file )
def codes( self, name_or_file ):
"""Parses the external codes definition XML file.
:param fileName: name of the external codes file, usually :file:`codes.xml`
"""
self.codesDoc= DOM.parse( name_or_file )
def read( self, name_or_file ):
"""Parses the message structure XML file.
:param fileName: the name of a message structure file.
"""
self.doc= DOM.parse( name_or_file )
def dump( self, aDoc ):
"""Prints a dump of an XML document to stdout.
:param aDoc: an :mod:`xml.dom` Node (usually the top-level Document).
"""
nodeNames= set()
def walk( node, depth=0 ):
if node.nodeType in ( DOM.Node.TEXT_NODE, DOM.Node.CDATA_SECTION_NODE ):
if len(node.nodeValue.strip()) == 0:
# Disposable whitespace node
return
if node.nodeType == DOM.Node.COMMENT_NODE:
return
print( depth*' ', node.nodeName, repr(node.nodeValue) )
if node.attributes is not None and len(node.attributes.keys()) > 0:
print( depth*' ', '(', ", ".join([ "%s=%r" % (k,v) for k,v in node.attributes.items() ]), ')' )
nodeNames.add( node.nodeName )
for c in node.childNodes:
walk( c, depth+1 )
walk( aDoc )
def convertFilePath( baseDir, aFile, dataeleFile="dataele.xml",
codesFile="codes.xml" ):
"""Converts a single message file from XML to :mod:`X12.parse`."""
return convertFile(open(os.path.join(baseDir, aFile)),
open(os.path.join(baseDir, dataeleFile)),
open(os.path.join(baseDir, codesFile)))
def convertFile(xml_file, data_ele_file, codes_file):
log = logging.getLogger("tools.convertPyX12.convertFile")
bp = ParserBuilder()
try:
xml = XMLParser()
xml.data(data_ele_file)
xml.codes(codes_file)
xml.read(xml_file)
return bp.build(xml)
except Exception as e:
log.error("Failed to convert %s: %s" % (xml_file.name, e))
raise
def convertAll( baseDir ):
"""Convert all :file:`nnn*.4010.X*.xml` files in the given directory.
:param baseDir: Directory with message definition, dataele and codes files.
"""
for path,dirs,names in os.walk(baseDir):
for fn in names:
if fnmatch.fnmatch( fn, "[0-9][0-9][0-9]*.4010.X*.xml"):
convertFilePath( path, fn )
def writeFile(aFile, name, x12, structure="flat"):
"""Write the x12 python module to a file.
:param aFile: Filename of destination file.
:type aFile: String
:param name: The name of the generated class.
:type name: String
:param x12: The `X12.parse.Message` object to write.
:type x12: `X12.parse.Message`
"""
if structure == "nested":
pyMap= PythonVisitor( name )
else:
pyMap= FlatPythonVisitor( name )
x12.visit( pyMap )
pySrc= pyMap.getSource()
with open(aFile, 'w') as f:
f.write("#\n# Generated by TigerShark.tools.convertPyX12 on %s\n#\n" %
datetime.now())
f.write( pySrc )
f.write( '\n\n' )
# Raise an XML warning to an error level. This will spot extra tags.
warnings.simplefilter( "error", XMLWarning, 0 )
# Report empty loop warnings. This will spot Loops with no Segments.
warnings.simplefilter( "always", EmptyLoopWarning, 0 )
# Report unknown data elements.
warnings.simplefilter( "always", UnknownElementWarning, 0 )
# Report extensions that aren't implemented yet.
warnings.simplefilter( "once", Extension, 0 )
if __name__ == "__main__":
logging.basicConfig( stream=sys.stdout, level=logging.DEBUG )
parser = argparse.ArgumentParser(
description="Convert a PyX12 XML file to a python module.")
parser.add_argument('x12_file', help="The x12 xml file to convert")
parser.add_argument('py_file', help="The destination file.")
parser.add_argument('-b', '--base_dir', dest="base_dir", default='.',
help="Base directory containing the X12 xml, dataele.xml, and "\
"codes.xml files.")
parser.add_argument('-s', '--structure', choices=['flat', 'nested'],
default="flat", help="The structure of the resulting python "\
"class. Nested is easier to read, but may not compile "\
"due to too many object instantiations in a single "\
"call.")
parser.add_argument('-n', '--name', help="The name of the generated "\
"class. Defaults to the py_file argument, minus the filetype.")
args = parser.parse_args()
x12 = convertFilePath( args.base_dir, args.x12_file)
if args.name is not None:
name = args.name
else:
name = args.py_file.rsplit('/', 1)[1]
name = name.split('.', 1)[0]
writeFile(args.base_dir, args.py_file, name, x12, args.structure)
|
|
from itertools import chain
from contextlib import contextmanager
from plumbum.machines.local import CommandsProvider
from plumbum.commands.base import BaseCommand
from plumbum.commands.processes import run_proc, CommandNotFound, ProcessExecutionError
class EmptyCluster(Exception):
"""Raised by :class:`Cluster <plumbum.machines.parallel.Cluster>` when actions are attempted on a cluster
that has no machines"""
pass
def make_concurrent(self, rhs):
if not isinstance(rhs, BaseCommand):
return NotImplemented
if isinstance(self, ConcurrentCommand):
if isinstance(rhs, ConcurrentCommand):
self.commands.extend(rhs.commands)
else:
self.commands.append(rhs)
return self
elif isinstance(rhs, ConcurrentCommand):
rhs.commands.insert(0, self)
return rhs
else:
return ConcurrentCommand(self, rhs)
BaseCommand.__and__ = make_concurrent
class ConcurrentPopen(object):
def __init__(self, procs):
self.procs = procs
self.stdin = None
self.stdout = None
self.stderr = None
self.encoding = None
self.returncode = None
self.machine = list(set(proc.machine for proc in procs))
@property
def argv(self):
return [getattr(proc, "argv", []) for proc in self.procs]
def poll(self):
if self.returncode is not None:
return self.returncode
rcs = [proc.poll() for proc in self.procs]
if any(rc is None for rc in rcs):
return None
self.returncode = 0
for rc in rcs:
if rc != 0:
self.returncode = rc
break
return self.returncode
def wait(self):
for proc in self.procs:
proc.wait()
return self.poll()
def communicate(self, input=None):
if input:
raise ValueError("Cannot pass input to ConcurrentPopen.communicate")
out_err_tuples = [proc.communicate() for proc in self.procs]
self.wait()
return tuple(zip(*out_err_tuples))
def _decode(self, bytes):
return [proc._decode(b) for (proc, b) in zip(self.procs, bytes)]
class ConcurrentCommand(BaseCommand):
def __init__(self, *commands):
assert commands, EmptyConcurrentCommand()
self.commands = list(commands)
@property
def machine(self):
return list(set(cmd.machine for cmd in self.commands))
def formulate(self, level=0, args=()):
form = ["("]
for cmd in self.commands:
form.extend(cmd.formulate(level, args))
form.append("&")
return form + [")"]
def popen(self, *args, **kwargs):
return ConcurrentPopen([cmd[args].popen(**kwargs) for cmd in self.commands])
def __getitem__(self, args):
"""Creates a bound-command with the given arguments"""
if not isinstance(args, (tuple, list)):
args = [args, ]
if not args:
return self
else:
return ConcurrentCommand(*(cmd[args] for cmd in self.commands))
class Cluster(CommandsProvider):
def __init__(self, *machines):
self.machines = list(machines)
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def close(self):
for mach in self.machines:
mach.close()
del self.machines[:]
def add_machine(self, machine):
self.machines.append(machine)
def __len__(self):
return len(self.machines)
def empty(self):
return not self
def __iter__(self):
return iter(self.machines)
def __add__(self, other):
return self.__class__(*chain(self, other))
def filter(self, pred):
return self.__class__(*filter(pred, self))
def which(self, progname):
return [mach.which(progname) for mach in self]
def list_processes(self):
return [mach.list_processes() for mach in self]
def pgrep(self, pattern):
return [mach.pgrep(pattern) for mach in self]
def path(self, *parts):
return [mach.path(*parts) for mach in self]
def __getitem__(self, progname):
if isinstance(progname, int):
return self.machines[progname]
if isinstance(progname, slice):
return self.__class__(*self.machines[progname])
if not isinstance(progname, str):
raise TypeError("progname must be a string, not %r" % (type(progname,)))
if not self.machines:
raise EmptyCluster("Cluster is empty")
return ConcurrentCommand(*(mach[progname] for mach in self))
def __contains__(self, cmd):
try:
self[cmd]
except CommandNotFound:
return False
else:
return True
@property
def python(self):
if not self.machines:
raise EmptyCluster()
return ConcurrentCommand(*(mach.python for mach in self))
def session(self):
if not self.machines:
raise EmptyCluster()
return ClusterSession(*(mach.session() for mach in self))
@contextmanager
def as_user(self, user=None):
with nested(*(mach.as_user(user) for mach in self)):
yield self
def as_root(self):
return self.as_user()
class ClusterSession(object):
def __init__(self, *sessions):
self.sessions = sessions
def __iter__(self):
return iter(self.sessions)
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __del__(self):
try:
self.close()
except Exception:
pass
def alive(self):
"""Returns ``True`` if the underlying shells are all alive, ``False`` otherwise"""
return all(session.alive for session in self)
def close(self):
"""Closes (terminates) all underlying shell sessions"""
for session in self.sessions:
session.close()
del self.sessions[:]
def popen(self, cmd):
return ConcurrentPopen([session.popen(cmd) for session in self])
def run(self, cmd, retcode=None):
return run_proc(self.popen(cmd), retcode)
if __name__ == "__main__":
from plumbum import local
from plumbum.cmd import ls, date, sleep
c = ls & date & sleep[1]
print(c())
c = ls & date & sleep[1] & sleep["-z"]
try:
c()
except ProcessExecutionError as ex:
print(ex)
else:
assert False
clst = Cluster(local, local, local)
print(clst["ls"]())
# This works fine
print(local.session().run("echo $$"))
# this does not
ret, stdout, stderr = clst.session().run("echo $$")
print(ret)
ret = [int(pid) for pid in stdout]
assert(len(set(ret))==3)
try:
from contextlib import nested
except ImportError:
try:
from contextlib import ExitStack
except ImportError:
# we're probably on python 3.2, so we'll need to redefine the deprecated 'nested' function
import sys
@contextmanager
def nested(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit, enter = mgr.__exit__, mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
e, v, t = exc
raise v.with_traceback(t)
else:
@contextmanager
def nested(*managers):
with ExitStack() as stack:
yield [stack.enter_context(ctx) for ctx in managers]
|
|
"""Run GATK GenomicsDBImport tool."""
import os
import shutil
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
GroupField,
IntegerField,
ListField,
Process,
SchedulingClass,
StringField,
)
from resolwe.process.fields import DirField
class GenomicsDBImport(Process):
"""Import single-sample GVCFs into GenomicsDB before joint genotyping."""
slug = "gatk-genomicsdb-import"
name = "GATK GenomicsDBImport"
category = "GATK"
process_type = "data:genomicsdb"
version = "1.2.0"
scheduling_class = SchedulingClass.BATCH
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/s4q6j6e8/resolwebio/dnaseq:6.3.1"}
},
"resources": {
"cores": 4,
"memory": 32768,
"storage": 200,
},
}
data_name = '{{ "GATK GenomicsDB (%s %s)"|format(gvcfs|length, "samples added" if use_existing else "samples" ) }}'
class Input:
"""Input fields for GenomicsDBImport."""
gvcfs = ListField(
DataField("variants:gvcf"),
label="Input data (GVCF)",
)
intervals = DataField(
"bed",
label="Intervals file (.bed)",
description="Intervals file is required if a new database will be "
"created.",
required=False,
)
use_existing = BooleanField(
label="Add new samples to an existing GenomicsDB workspace",
default=False,
)
existing_db = DataField(
"genomicsdb",
label="Select a GATK GenomicsDB object",
description="Instead of creating a new database the GVCFs are "
"added to this database and a new GenomicsDB object is created.",
required=False,
hidden="!use_existing",
)
advanced = BooleanField(
label="Show advanced options",
description="Inspect and modify parameters.",
default=False,
)
class AdvancedOptions:
"""Advanced options."""
batch_size = IntegerField(
label="Batch size",
default=0,
description="Batch size controls the number of samples "
"for which readers are open at once and therefore provides "
"a way to minimize memory consumption. However, it can "
"take longer to complete. Use the consolidate flag if more "
"than a hundred batches were used. This will improve feature "
"read time. batchSize=0 means no batching "
"(i.e. readers for all samples will be opened at once).",
)
consolidate = BooleanField(
label="Consolidate",
default=False,
description="Boolean flag to enable consolidation. If "
"importing data in batches, a new fragment is created for "
"each batch. In case thousands of fragments are created, "
"GenomicsDB feature readers will try to open ~20x as many "
"files. Also, internally GenomicsDB would consume more "
"memory to maintain bookkeeping data from all fragments. "
"Use this flag to merge all fragments into one. Merging "
"can potentially improve read performance, however overall "
"benefit might not be noticeable as the top Java layers "
"have significantly higher overheads. This flag has no "
"effect if only one batch is used.",
)
max_heap_size = IntegerField(
label="Java maximum heap size in GB (Xmx)",
default=28,
description="Set the maximum Java heap size.",
)
use_cms_gc = BooleanField(
label="Use CMS Garbage Collector in Java",
default=True,
description="The Concurrent Mark Sweep (CMS) implementation uses multiple garbage "
"collector threads for garbage collection.",
)
advanced_options = GroupField(
AdvancedOptions, label="Advanced options", hidden="!advanced"
)
class Output:
"""Output fields for GenomicsDBImport."""
database = DirField(label="GenomicsDB workspace")
intervals = FileField(label="Intervals file")
species = StringField(label="Species")
build = StringField(label="Build")
def run(self, inputs, outputs):
"""Run analysis."""
database_folder = "database"
sample_map_file = "sample_map.txt"
species = inputs.gvcfs[0].output.species
if any(gvcf.output.species != species for gvcf in inputs.gvcfs):
self.error("Not all of the input samples are of the same species.")
build = inputs.gvcfs[0].output.build
if any(gvcf.output.build != build for gvcf in inputs.gvcfs):
self.error("Not all of the input samples have the same genome build.")
with open(sample_map_file, "w") as sample_map:
for gvcf in inputs.gvcfs:
sample_map.write(f"{gvcf.entity_name}\t{gvcf.output.vcf.path}\n")
if inputs.use_existing and inputs.existing_db is None:
self.error(
"GATK GenomicsDB object has to be provided to add GVCFs to the existing "
"database."
)
elif inputs.use_existing and inputs.existing_db:
if species != inputs.existing_db.output.species:
self.error("The existing database and GVCFs species differ.")
if build != inputs.existing_db.output.build:
self.error("The existing database and GVCFs build differ.")
shutil.copytree(inputs.existing_db.output.database.path, database_folder)
db_import_args = [
"--genomicsdb-update-workspace-path",
database_folder,
]
intervals = Path(inputs.existing_db.output.intervals.path)
elif inputs.intervals:
db_import_args = [
"--genomicsdb-workspace-path",
database_folder,
"-L",
inputs.intervals.output.bed.path,
]
intervals = Path(inputs.intervals.output.bed.path)
else:
self.error("Intervals file is required for creating a new database.")
java_memory = min(
int(self.requirements.resources.memory / 1024),
inputs.advanced_options.max_heap_size,
)
java_options = f"-Xmx{java_memory}g"
if inputs.advanced_options.use_cms_gc:
java_options += " -XX:+UseConcMarkSweepGC"
db_import_args.extend(
[
"--sample-name-map",
sample_map_file,
"--batch-size",
inputs.advanced_options.batch_size,
"--reader-threads",
min(self.requirements.resources.cores, 5),
"--verbosity",
"DEBUG",
"--tmp-dir",
os.environ.get("TMPDIR"),
"--java-options",
java_options,
]
)
if inputs.advanced_options.consolidate:
db_import_args.append("--consolidate")
return_code, stdout, stderr = Cmd["gatk"]["GenomicsDBImport"][
db_import_args
] & TEE(retcode=None)
if return_code:
print(stdout, stderr)
self.error("GATK GenomicsDBImport tool failed.")
output_bed = f"./{intervals.name}"
Path(output_bed).symlink_to(str(intervals))
outputs.intervals = output_bed
outputs.database = database_folder
outputs.species = species
outputs.build = build
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_profile_tcp
short_description: BIG-IP ltm tcp profile module
description:
- Configures a Transmission Control Protocol (TCP) profile.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
abc:
description:
- When enabled, increases the congestion window by basing the increase amount on the number of previously
unacknowledged bytes that each acknowledgement code (ACK) includes.
default: enabled
choices: ['enabled', 'disabled']
ack_on_push:
description:
- When enabled, significantly improves performance to Microsoft Windows and MacOS peers, who are writing out
on a very small send buffer.
default: enabled
choices: ['enabled', 'disabled']
app_service:
description:
- Specifies the name of the application service to which the profile belongs.
close_wait_timeout:
description:
- Specifies the number of seconds that a connection remains in a LAST-ACK (last acknowledgement code) state
before quitting.
default: 5
cmetrics_cache:
description:
- Specifies, when enabled, the default value, that the system uses a cache for storing congestion metrics.
choices: ['enabled', 'disabled']
congestion_control:
description:
- Specifies the algorithm to use to share network resources among competing users to reduce congestion.
default: high-speed
choices: [
'cdg', 'chd', 'cubic', 'h igh-speed', 'illinois', 'new-reno', 'none', 'reno', 'scalable', 'vegas',
'westwood', 'woodside'
]
defaults_from:
description:
- Specifies the profile that you want to use as the parent profile.
default: tcp
deferred_accept:
description:
- Specifies, when enabled, that the system defers allocation of the connection chain context until the
system has received the payload from the client.
default: disabled
choices: ['enabled', 'disabled']
delay_window_control:
description:
- When enabled, the system uses an estimate of queueing delay as a measure of congestion, in addition to the
normal loss-based control, to control the amount of data sent.
default: disabled
choices: ['enabled', 'disabled']
delayed_acks:
description:
- Specifies, when enabled, the default value, that the traffic management system allows coalescing of
multiple acknowledgement (ACK) responses.
default: enabled
choices: ['enabled', 'disabled']
description:
description:
- User defined description.
dsack:
description:
- When enabled, specifies the use of the SACK option to acknowledge duplicate segments.
default: disabled
choices: ['enabled', 'disabled']
early_retransmit:
description:
- Specifies, when enabled, that the system uses early retransmit recovery (as specified in RFC 5827) to
reduce the recovery time for connections that are receive-buffer or user-data limited.
default: disabled
choices: ['enabled', 'disabled']
ecn:
description:
- Specifies, when enabled, that the system uses the TCP flags CWR and ECE to notify its peer of congestion
and congestion counter-measures.
default: disabled
choices: ['enabled', 'disabled']
fin_wait_timeout:
description:
- Specifies the number of seconds that a connection is in the FIN-WAIT or closing state before quitting.
default: 5
hardware_syn_cookie:
description:
- Specifies whether or not to use hardware SYN Cookie when cross system limit.
default: disabled
choices: ['enabled', 'disabled']
idle_timeout:
description:
- Specifies the number of seconds that a connection is idle before the connection is eligible for deletion.
default: 300
init_cwnd:
description:
- Specifies the initial congestion window size for connections to this destination.
default: 0
choices: range(0, 17)
init_rwnd:
description:
- Specifies the initial receive window size for connections to this destination.
default: 0
choices: range(0, 17)
ip_tos_to_client:
description:
- Specifies the Type of Service (ToS) level that the traffic management system assigns to TCP packets when
sending them to clients.
default: 0
keep_alive_interval:
description:
- Specifies the keep-alive probe interval, in seconds.
default: 1800
limited_transmit:
description:
- Specifies, when enabled, that the system uses limited transmit recovery revisions for fast retransmits to
reduce the recovery time for connections on a lossy network.
default: enabled
choices: ['enabled', 'disabled']
link_qos_to_client:
description:
- Specifies the Link Quality of Service (QoS) level that the system assigns to TCP packets when sending them
to clients.
default: 0
max_retrans:
description:
- Specifies the maximum number of retransmissions of data segments that the system allows.
default: 8
md5_signature:
description:
- Specifies, when enabled, that the system uses RFC2385 TCP-MD5 signatures to protect TCP traffic against
intermediate tampering.
default: disabled
choices: ['enabled', 'disabled']
md5_signature_passphrase:
description:
- Specifies a plain text passphrase tnat is used in a shared-secret scheme to implement the spoof-prevention
parts of RFC2385.
choices: Plain text passphrase between 1 and 80 characters in length
minimum_rto:
description:
- Specifies the minimum TCP retransmission timeout in milliseconds.
default: 0
mptcp:
description:
- Specifies, when enabled, that the system will accept MPTCP connections.
default: disabled
choices: ['enabled', 'disabled']
mptcp_csum:
description:
- Specifies, when enabled, that the system will calculate the checksum for MPTCP connections.
default: disabled
choices: ['enabled', 'disabled']
mptcp_csum_verify:
description:
- Specifies, when enabled, that the system verifys checksum for MPTCP connections.
default: disabled
choices: ['enabled', 'disabled']
mptcp_debug:
description:
- Specifies, when enabled, that the system provides debug logs and statistics for MPTCP connections.
default: disabled
choices: ['enabled', 'disabled']
mptcp_fallback:
description:
- Specifies, MPTCP fallback mode.
default: reset
choices: ['accept', 'active-accept', 'reset', 'retransmit']
mptcp_joinmax:
description:
- Specifies the max number of MPTCP connections that can join to given one.
default: 5
mptcp_nojoindssack:
description:
- Specifies, when enabled, no DSS option is sent on the JOIN ACK.
default: disabled
choices: ['enabled', 'disabled']
mptcp_rtomax:
description:
- Specifies, the number of RTOs before declaring subflow dead.
default: 5
mptcp_rxmitmin:
description:
- Specifies the minimum value (in msec) of the retransmission timer for these MPTCP flows.
default: 1000
mptcp_subflowmax:
description:
- Specifies the maximum number of MPTCP subflows for a single flow.
default: 6
mptcp_makeafterbreak:
description:
- Specifies, when enabled, that make-after-break functionality is supported, allowing for long-lived MPTCP
sessions.
default: disabled
choices: ['enabled', 'disabled']
mptcp_timeout:
description:
- Specifies, the timeout value to discard long-lived sessions that do not have an active flow, in seconds.
default: 3600
mptcp_fastjoin:
description:
- Specifies, when enabled, FAST join, allowing data to be sent on the MP_JOIN SYN, which can allow a server
response to occur in parallel with the JOIN.
default: disabled
choices: ['enabled', 'disabled']
nagle:
description:
- Specifies, when enabled, that the system applies Nagle's algorithm to reduce the number of short segments
on the network.
default: disabled
choices: ['enabled', 'disabled']
name:
description:
- Specifies a unique name for the component.
required: true
partition:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
pkt_loss_ignore_burst:
description:
- Specifies the probability of performing congestion control when multiple packets in a row are lost, even
if the pkt-loss-ignore-rate was not exceeded.
default: 0
choices: range(0, 33)
pkt_loss_ignore_rate:
description:
- Specifies the threshold of packets lost per million at which the system should perform congestion control.
default: 0
choices: range(0, 1000001)
proxy_buffer_high:
description:
- Specifies the highest level at which the receive window is closed.
default: 49152
proxy_buffer_low:
description:
- Specifies the lowest level at which the receive window is closed.
default: 32768
proxy_mss:
description:
- Specifies, when enabled, that the system advertises the same mss to the server as was negotiated with the
client.
default: disabled
choices: ['enabled', 'disabled']
proxy_options:
description:
- Specifies, when enabled, that the system advertises an option, such as a time-stamp to the server only if
it was negotiated with the client.
default: disabled
choices: ['enabled', 'disabled']
rate_pace:
description:
- Specifies, when enabled, that the system will rate pace TCP data transmissions.
default: disabled
choices: ['enabled', 'disabled']
receive_window_size:
description:
- Specifies the size of the receive window, in bytes.
default: 65535
reset_on_timeout:
description:
- Specifies whether to reset connections on timeout.
default: enabled
choices: ['enabled', 'disabled']
selective_acks:
description:
- Specifies, when enabled, that the system negotiates RFC2018-compliant Selective Acknowledgements with
peers.
default: enabled
choices: ['enabled', 'disabled']
selective_nack:
description:
- Specifies whether Selective Negative Acknowledgment is enabled or disabled.
default: enabled
choices: ['enabled', 'disabled']
send_buffer_size:
description:
- Specifies the size of the buffer, in bytes.
default: 65535
slow_start:
description:
- Specifies, when enabled, that the system uses larger initial window sizes (as specified in RFC 3390) to
help reduce round trip times.
default: enabled
choices: ['enabled', 'disabled']
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
syn_cookie_whitelist:
description:
- Specifies whether or not to use a SYN Cookie WhiteList when doing software SYN Cookies.
default: disabled
choices: ['enabled', 'disabled']
syn_max_retrans:
description:
- Specifies the maximum number of retransmissions of SYN segments that the system allows.
default: 3
syn_rto_base:
description:
- Specifies the initial RTO (Retransmission TimeOut) base multiplier for SYN retransmission, in
milliseconds.
default: 0
tail_loss_probe:
description:
- Specifies whether the system uses tail loss probe to reduce the number of retransmission timeouts.
default: disabled
choices: ['enabled', 'disabled']
time_wait_recycle:
description:
- Specifies whether the system recycles the connection when a SYN packet is received in a TIME-WAIT state.
default: enabled
choices: ['enabled', 'disabled']
time_wait_timeout:
description:
- Specifies the number of milliseconds that a connection is in the TIME-WAIT state before closing.
default: 2000
choices: range(0, 600001)
timestamps:
description:
- Specifies, when enabled, that the system uses the timestamps extension for TCP (as specified in RFC 1323)
to enhance high-speed network performance.
default: enabled
choices: ['enabled', 'disabled']
verified_accept:
description:
- Specifies, when enabled, that the system can actually communicate with the server before establishing a
client connection.
default: disabled
choices: ['enabled', 'disabled']
zero_window_timeout:
description:
- Specifies the timeout in milliseconds for terminating a connection with an effective zero length TCP
transmit window.
default: 2000
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM TCP Profile
f5bigip_ltm_profile_tcp:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_tcp_profile
partition: Common
init_cwnd: 10
pkt_loss_ignore_burst: 15
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import range
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
abc=dict(type='str', choices=F5_ACTIVATION_CHOICES),
ack_on_push=dict(type='str', choices=F5_ACTIVATION_CHOICES),
app_service=dict(type='str'),
close_wait_timeout=dict(type='int'),
cmetrics_cache=dict(type='str', choices=F5_ACTIVATION_CHOICES),
congestion_control=dict(type='str',
choices=['cdg', 'chd', 'cubic', 'high-speed', 'illinois', 'new-reno', 'none',
'reno', 'scalable', 'vegas', 'westwood', 'woodside']),
defaults_from=dict(type='str'),
deferred_accept=dict(type='str', choices=F5_ACTIVATION_CHOICES),
delay_window_control=dict(type='str', choices=F5_ACTIVATION_CHOICES),
delayed_acks=dict(type='str', choices=F5_ACTIVATION_CHOICES),
description=dict(type='str'),
dsack=dict(type='str', choices=F5_ACTIVATION_CHOICES),
early_retransmit=dict(type='str', choices=F5_ACTIVATION_CHOICES),
ecn=dict(type='str', choices=F5_ACTIVATION_CHOICES),
fin_wait_timeout=dict(type='int'),
hardware_syn_cookie=dict(type='str', choices=F5_ACTIVATION_CHOICES),
idle_timeout=dict(type='int'),
init_cwnd=dict(type='int', choices=range(0, 17)),
init_rwnd=dict(type='int', choices=range(0, 17)),
ip_tos_to_client=dict(type='int'),
keep_alive_interval=dict(type='int'),
limited_transmit=dict(type='str', choices=F5_ACTIVATION_CHOICES),
link_qos_to_client=dict(type='int'),
max_retrans=dict(type='int'),
md5_signature=dict(type='str', choices=F5_ACTIVATION_CHOICES),
md5_signature_passphrase=dict(type='str', no_log=True),
minimum_rto=dict(type='int'),
mptcp=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_csum=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_csum_verify=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_debug=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_fallback=dict(type='str', choices=['accept', 'active-accept', 'reset', 'retransmit']),
mptcp_joinmax=dict(type='int'),
mptcp_nojoindssack=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_rtomax=dict(type='int'),
mptcp_rxmitmin=dict(type='int'),
mptcp_subflowmax=dict(type='int'),
mptcp_makeafterbreak=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mptcp_timeout=dict(type='int'),
mptcp_fastjoin=dict(type='str', choices=F5_ACTIVATION_CHOICES),
nagle=dict(type='str', choices=F5_ACTIVATION_CHOICES),
pkt_loss_ignore_burst=dict(type='int', choices=range(0, 33)),
pkt_loss_ignore_rate=dict(type='int', choices=range(0, 1000001)),
proxy_buffer_high=dict(type='int'),
proxy_buffer_low=dict(type='int'),
proxy_mss=dict(type='str', choices=F5_ACTIVATION_CHOICES),
proxy_options=dict(type='str', choices=F5_ACTIVATION_CHOICES),
rate_pace=dict(type='str', choices=F5_ACTIVATION_CHOICES),
receive_window_size=dict(type='int'),
reset_on_timeout=dict(type='str', choices=F5_ACTIVATION_CHOICES),
selective_acks=dict(type='str', choices=F5_ACTIVATION_CHOICES),
selective_nack=dict(type='str', choices=F5_ACTIVATION_CHOICES),
send_buffer_size=dict(type='int'),
slow_start=dict(type='str', choices=F5_ACTIVATION_CHOICES),
syn_cookie_whitelist=dict(type='str', choices=F5_ACTIVATION_CHOICES),
syn_max_retrans=dict(type='int'),
syn_rto_base=dict(type='int'),
tail_loss_probe=dict(type='str', choices=F5_ACTIVATION_CHOICES),
time_wait_recycle=dict(type='str', choices=F5_ACTIVATION_CHOICES),
time_wait_timeout=dict(type='int', choices=range(0, 600001)),
timestamps=dict(type='str', choices=F5_ACTIVATION_CHOICES),
verified_accept=dict(type='str', choices=F5_ACTIVATION_CHOICES),
zero_window_timeout=dict(type='int')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmProfileTcp(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.profile.tcps.tcp.create,
'read': self._api.tm.ltm.profile.tcps.tcp.load,
'update': self._api.tm.ltm.profile.tcps.tcp.update,
'delete': self._api.tm.ltm.profile.tcps.tcp.delete,
'exists': self._api.tm.ltm.profile.tcps.tcp.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmProfileTcp(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
|
|
import sys
import os
import csv
from statsd import StatsClient
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from itertools import chain
import pycrfsuite
statsd = StatsClient()
print sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding('utf-8')
#change this if you would only like to do a certain number of files, useful for testing
maxNumFiles = 1000
#base dir for all data files
data_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'data/'))
def deleteLabel(dictionary):
del dictionary['label']
return dictionary
#divide dataset into features and labels
@statsd.timer('formatDataSet')
def formatDataSet(features):
#Y = [[s['label']] for s in features]
#X = [[deleteLabel(s)] for s in features]
Y = [[word['label'] for word in article]for article in features]
X = [[deleteLabel(word) for word in article]for article in features]
print len(X)
return X, Y
#turn features into crfsuite readable object
def word2features(token):
features = {
'label' : token[0]
}
del token[0]
for elem in token:
seperated = elem.split('=')
nameFeat = seperated[0]
#if nameFeat == 'minDistanceVerbCue':
# continue
answer = seperated[1]
features.update( {
nameFeat : answer
})
return features
#creates a report for BIO encoded sequences
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
"""
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_)
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
return classification_report(
y_true_combined,
y_pred_combined,
labels = [class_indices[cls] for cls in tagset],
target_names = tagset,
)
#trains a classifier based on content token features
def trainAll(X_train, y_train):
crf = pycrfsuite.Trainer(
verbose = True,
#algorithm = 'l2sgd',
)
crf.set_params({
'max_iterations': 40, # stop earlier
'feature.minfreq': 2,
'feature.possible_transitions': False
})
for xseq, yseq in zip(X_train, y_train):
crf.append(xseq, yseq)
crf.train('ContentSpanClassifier8.crfsuite')
return crf
def trainData():
lines = []
trainingFileName = os.path.join(data_dir, 'PARCTrainContentSpans2.txt')
reader_object = open(trainingFileName, 'r')
lines = reader_object.readlines()
print 'length of training set'
print len(lines)
allFeatures = []
thisFileFeatures = []
print 'extracting features'
lastfilename = None
i = 0
for line in lines:
i = i + 1
row = line.split('\t')
features = word2features(row)
filename = features['filename']
if filename == lastfilename or lastfilename == None:
thisFileFeatures.append(features)
lastfilename = filename
else:
allFeatures.append(thisFileFeatures)
thisFileFeatures = []
thisFileFeatures.append(features)
lastfilename = filename
print len(allFeatures)
print 'features extracted'
print 'formatting data set'
x_train, y_train = formatDataSet(allFeatures)
prevPred = ['O']
for pred in y_train:
if pred == ['I'] and prevPred == ['O']:
print 'foundTRAIN'
prevPred = pred
print 'trainingData'
#classifier = TRAIN(x_train, y_train, x_test, y_test)
classifier = trainAll(x_train, y_train)
#tests the results of a classifier against a labelled dataset
def test(X_test, y_test):
tagger = pycrfsuite.Tagger()
#tagger.open('ContentSpanClassifier.crfsuite')
tagger.open('ContentSpanClassifier8.crfsuite')
print 'new'
y_pred2 = [tagger.tag(xseq) for xseq in X_test]
prevPred = 'O'
for pred in y_pred2:
if pred == 'I' and prevPred == 'O':
print 'foundTEST'
prevPred = pred
print(bio_classification_report(y_test, y_pred2))
y_test2 = [item for sublist in y_test for item in sublist]
y_pred3 = [item for sublist in y_pred2 for item in sublist]
print accuracy_score(y_test2, y_pred3)
#tests the classifier that is created against some data
def testData():
testingFileName = data_dir + '/PARCTestContentSpans1.txt'
reader_object = open(testingFileName, 'r')
lines = reader_object.readlines()
print 'length of test set'
print len(lines)
allFeatures = []
thisFileFeatures = []
print 'extracting features'
lastfilename = None
i = 0
for line in lines:
i = i + 1
row = line.split('\t')
features = word2features(row)
filename = features['filename']
if filename == lastfilename or lastfilename == None:
thisFileFeatures.append(features)
lastfilename = filename
else:
allFeatures.append(thisFileFeatures)
thisFileFeatures = []
thisFileFeatures.append(features)
lastfilename = filename
print len(allFeatures)
print 'features extracted'
print 'formatting data set'
x_test, y_test= formatDataSet(allFeatures)
test(x_test, y_test)
def main():
print sys.argv
if sys.argv[1] == '-test':
testData()
elif sys.argv[1] == '-train':
trainData()
else:
print 'Use of this command line is: python source/crfsuiteTests.py -test or -train'
#labelData()
if __name__ == '__main__':
main()
|
|
from __future__ import absolute_import
from collections import Counter, defaultdict
import six
from django.utils import timezone
from sentry.tsdb.base import BaseTSDB
from sentry.utils.dates import to_datetime, to_timestamp
from sentry.utils.compat import map
class InMemoryTSDB(BaseTSDB):
"""
An in-memory time-series storage.
This should not be used in production as it will leak memory.
"""
def __init__(self, *args, **kwargs):
super(InMemoryTSDB, self).__init__(*args, **kwargs)
self.flush()
def incr(self, model, key, timestamp=None, count=1, environment_id=None):
self.validate_arguments([model], [environment_id])
environment_ids = set([environment_id, None])
if timestamp is None:
timestamp = timezone.now()
for rollup, max_values in six.iteritems(self.rollups):
norm_epoch = self.normalize_to_rollup(timestamp, rollup)
for environment_id in environment_ids:
self.data[model][(key, environment_id)][norm_epoch] += count
def merge(self, model, destination, sources, timestamp=None, environment_ids=None):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments([model], environment_ids)
for environment_id in environment_ids:
dest = self.data[model][(destination, environment_id)]
for source in sources:
for bucket, count in self.data[model].pop((source, environment_id), {}).items():
dest[bucket] += count
def delete(self, models, keys, start=None, end=None, timestamp=None, environment_ids=None):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments(models, environment_ids)
rollups = self.get_active_series(start, end, timestamp)
for rollup, series in rollups.items():
for model in models:
for key in keys:
for environment_id in environment_ids:
data = self.data[model][(key, environment_id)]
for timestamp in series:
data.pop(self.normalize_to_rollup(timestamp, rollup), 0)
def get_range(self, model, keys, start, end, rollup=None, environment_ids=None):
self.validate_arguments([model], environment_ids if environment_ids is not None else [None])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
results = []
for timestamp in map(to_datetime, series):
norm_epoch = self.normalize_to_rollup(timestamp, rollup)
for key in keys:
if not environment_ids:
value = self.data[model][(key, None)][norm_epoch]
else:
value = sum(
int(self.data[model][(key, environment_id)][norm_epoch])
for environment_id in environment_ids
)
results.append((to_timestamp(timestamp), key, value))
results_by_key = defaultdict(dict)
for epoch, key, count in results:
results_by_key[key][epoch] = int(count or 0)
for key, points in six.iteritems(results_by_key):
results_by_key[key] = sorted(points.items())
return dict(results_by_key)
def record(self, model, key, values, timestamp=None, environment_id=None):
self.validate_arguments([model], [environment_id])
environment_ids = set([environment_id, None])
if timestamp is None:
timestamp = timezone.now()
for rollup, max_values in six.iteritems(self.rollups):
r = self.normalize_to_rollup(timestamp, rollup)
for environment_id in environment_ids:
self.sets[model][(key, environment_id)][r].update(values)
def get_distinct_counts_series(
self, model, keys, start, end=None, rollup=None, environment_id=None
):
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
results = {}
for key in keys:
source = self.sets[model][(key, environment_id)]
counts = results[key] = []
for timestamp in series:
r = self.normalize_ts_to_rollup(timestamp, rollup)
counts.append((timestamp, len(source[r])))
return results
def get_distinct_counts_totals(
self, model, keys, start, end=None, rollup=None, environment_id=None
):
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
results = {}
for key in keys:
source = self.sets[model][(key, environment_id)]
values = set()
for timestamp in series:
r = self.normalize_ts_to_rollup(timestamp, rollup)
values.update(source[r])
results[key] = len(values)
return results
def get_distinct_counts_union(
self, model, keys, start, end=None, rollup=None, environment_id=None
):
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
values = set()
for key in keys:
source = self.sets[model][(key, environment_id)]
for timestamp in series:
r = self.normalize_ts_to_rollup(timestamp, rollup)
values.update(source[r])
return len(values)
def merge_distinct_counts(
self, model, destination, sources, timestamp=None, environment_ids=None
):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments([model], environment_ids)
for environment_id in environment_ids:
dest = self.sets[model][(destination, environment_id)]
for source in sources:
for bucket, values in self.sets[model].pop((source, environment_id), {}).items():
dest[bucket].update(values)
def delete_distinct_counts(
self, models, keys, start=None, end=None, timestamp=None, environment_ids=None
):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments(models, environment_ids)
rollups = self.get_active_series(start, end, timestamp)
for rollup, series in rollups.items():
for model in models:
for key in keys:
for environment_id in environment_ids:
data = self.data[model][(key, environment_id)]
for timestamp in series:
data.pop(self.normalize_to_rollup(timestamp, rollup), set())
def flush(self):
# self.data[model][key][rollup] = count
self.data = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
# self.sets[model][key][rollup] = set of elements
self.sets = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))
# self.frequencies[model][key][rollup] = Counter()
self.frequencies = defaultdict(lambda: defaultdict(lambda: defaultdict(Counter)))
def record_frequency_multi(self, requests, timestamp=None, environment_id=None):
environment_ids = set([environment_id, None])
self.validate_arguments([model for model, request in requests], [environment_id])
if timestamp is None:
timestamp = timezone.now()
for model, request in requests:
for key, items in request.items():
items = {k: float(v) for k, v in items.items()}
for environment_id in environment_ids:
source = self.frequencies[model][(key, environment_id)]
for rollup in self.rollups:
source[self.normalize_to_rollup(timestamp, rollup)].update(items)
def get_most_frequent(
self, model, keys, start, end=None, rollup=None, limit=None, environment_id=None
):
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
self.validate_arguments([model], [environment_id])
results = {}
for key in keys:
result = results[key] = Counter()
source = self.frequencies[model][(key, environment_id)]
for timestamp in series:
result.update(source[self.normalize_ts_to_rollup(timestamp, rollup)])
for key, counter in list(results.items()):
results[key] = counter.most_common(limit)
return results
def get_most_frequent_series(
self, model, keys, start, end=None, rollup=None, limit=None, environment_id=None
):
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
self.validate_arguments([model], [environment_id])
results = {}
for key in keys:
result = results[key] = []
source = self.frequencies[model][(key, environment_id)]
for timestamp in series:
data = source[self.normalize_ts_to_rollup(timestamp, rollup)]
result.append((timestamp, dict(data.most_common(limit))))
return results
def get_frequency_series(self, model, items, start, end=None, rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
results = {}
for key, members in items.items():
result = results[key] = []
source = self.frequencies[model][(key, environment_id)]
for timestamp in series:
scores = source[self.normalize_ts_to_rollup(timestamp, rollup)]
result.append((timestamp, {k: scores.get(k, 0.0) for k in members}))
return results
def get_frequency_totals(self, model, items, start, end=None, rollup=None, environment_id=None):
self.validate_arguments([model], [environment_id])
results = {}
for key, series in six.iteritems(
self.get_frequency_series(model, items, start, end, rollup, environment_id)
):
result = results[key] = {}
for timestamp, scores in series:
for member, score in scores.items():
result[member] = result.get(member, 0.0) + score
return results
def merge_frequencies(self, model, destination, sources, timestamp=None, environment_ids=None):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments([model], environment_ids)
for environment_id in environment_ids:
dest = self.frequencies[model][(destination, environment_id)]
for source in sources:
for bucket, counter in self.data[model].pop((source, environment_id), {}).items():
dest[bucket].update(counter)
def delete_frequencies(
self, models, keys, start=None, end=None, timestamp=None, environment_ids=None
):
environment_ids = (set(environment_ids) if environment_ids is not None else set()).union(
[None]
)
self.validate_arguments(models, environment_ids)
rollups = self.get_active_series(start, end, timestamp)
for rollup, series in rollups.items():
for model in models:
for key in keys:
for environment_id in environment_ids:
data = self.frequencies[model][(key, environment_id)]
for timestamp in series:
data.pop(self.normalize_to_rollup(timestamp, rollup), Counter())
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 - 2012 -- Lars Heuer <heuer[at]semagia.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the project name nor the names of the contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""\
Event handler to create a cable corpus.
"""
from __future__ import absolute_import
from cablemap.core.handler import NoopCableHandler, DelegatingCableHandler
from .corpus import WordCorpus, CableCorpus
class NLPFilter(DelegatingCableHandler):
"""\
A configurable filter to swallow unwanted events/texts.
By default, the "handle_content" event is passed to the underlying handler and
all other events are swallowed by this filter.
"""
def __init__(self, handler, want_tags=False, want_content=True, want_summary=False,
want_comment=False, want_header=False, want_subject=False):
"""\
Initializes the filter.
`handler`
The handler which should receive the (valid) events
`want_tags`
Indicates if `handle_tag` should be passed to the underlying handler (default: ``False``)
`want_content`
Indicates if `handle_content` should be passed to the underlying handler (default: ``True``)
`want_summary`
Indicates if `handle_summary` should be passed to the underlying handler (default: ``False``)
`want_comment`
Indicates if `handle_comment` should be passed to the underlying handler (default: ``False``)
`want_header`
Indicates if `handle_header` should be passed to the underlying handler (default: ``False``)
`want_subject`
Indicates if `handle_subject` should be passed to the underlying handler (default: ``False``)
"""
super(NLPFilter, self).__init__(handler)
self.want_tags = want_tags
self.want_content = want_content
self.want_summary = want_summary
self.want_commet = want_comment
self.want_header = want_header
self.want_subject = want_subject
def handle_subject(self, s):
if self.want_subject:
self._handler.handle_subject(s)
def handle_header(self, s):
if self.want_header:
self._handler.handle_header(s)
def handle_tag(self, s):
if self.want_tags:
self._handler.handle_tag(s)
def handle_content(self, s):
if self.want_content:
self._handler.handle_content(s)
def handle_summary(self, s):
if self.want_summary:
self._handler.handle_summary(s)
def handle_comment(self, s):
if self.want_comment:
self._handler.handle_comment(s)
class NLPCableHandler(NoopCableHandler):
"""\
`cablemap.core.interfaces.ICableHandler` implementation which collects
texts and adds
The handler uses by default the information from
* summary
* comment
* header
* content
* TAGs
The information can be reduced if the events are filtered in advance, i.e.::
from cablemap.core.handler import DelegatingCableHandler, handle_source
from cablemap.core import constants as consts
from cablemap.core.utils import tag_kind
from cablemap.nlp.handler import CorpusHandler, NLPFilter
class MyFilter(NLPFilter):
'''\
Filters all TAGs which are not person TAGs
'''
def handle_tag(self, tag):
# Let only person TAGs pass
if self.want_tags and tag_kind(tag) == consts.TAG_KIND_PERSON:
self._handler.handle_tag(tag)
writer = CorpusHandler('/my/path')
handler = MyFilter(writer)
handle_source('cables.csv', handler)
As result, the corpus will not contain information from the cable headers and all TAGs
which are not person TAGs won't be part of the corpus, too.
Without filtering, the writer will add duplicate information to the corpus since it adds
the comment and the summary section (which are part of the cable's content) to the corpus
in addition to the cable's content.
"""
def __init__(self, corpus, before_close=None):
"""\
`corpus`
An object which has a `add_texts(reference_id, iterable_of_strings)`` and
a ``close`` method.
`before_close`
An optional function which is called with the underlying corpus before it is
closed.
"""
self._corpus = corpus
self._reference_id = None
self._buff = []
self.before_close = before_close
def end(self):
if self.before_close:
self.before_close(self._corpus)
self._corpus.close()
def start_cable(self, reference_id, canonical_id):
self._reference_id = reference_id
def end_cable(self):
self._corpus.add_texts(self._reference_id, self._buff)
self._buff = []
def handle_subject(self, s):
self._buff.append(s)
def handle_summary(self, s):
self._buff.append(s)
def handle_comment(self, s):
self._buff.append(s)
def handle_header(self, s):
self._buff.append(s)
def handle_content(self, s):
self._buff.append(s)
def handle_tag(self, s):
self._buff.append(s)
class DictionaryHandler(NLPCableHandler):
"""\
`NLPCableHandler` implementation which works on a `WordCorpus`.
Note: This handler won't attempt to persist the Dictionary. The caller
should handle over an existing Dictionary or should save it in the `before_close`
callback::
from gensim.corpora.dictionary import Dictionary
from cablemap.core import handle_source
dct = Dictionary()
handler = DictionaryHandler(dct)
handle_source('cables.csv', handler)
# Now save the dict:
dct.save_as_text('/path/wordids.txt')
"""
def __init__(self, dct=None, tokenizer=None, before_close=None):
"""\
`dct`
An existing `gensim.corpora.dictionary.Dictionary`
If it's ``None`` (default) a dictionary will be created.
`tokenizer`
A function to tokenize/normalize/clean-up strings.
If it's ``None`` (default), a default function will be used to tokenize
texts.
`before_close`
An optional function which is called with the underlying corpus before it is
closed.
"""
super(DictionaryHandler, self).__init__(WordCorpus(dct, tokenizer), before_close)
class CorpusHandler(NLPCableHandler):
"""\
Creates a `cablemap.nlp.corpus.CableCorpus` instance.
"""
def __init__(self, path, dct=None, tokenizer=None, allow_dict_updates=True, prefix=None, before_close=None):
"""\
Initializes the corpus writer which creates a new `CableCorpus`.
`path`
Directory where the generated files are stored.
`dct`
An existing `gensim.corpora.dictionary.Dictionary`
If it's ``None`` (default) a dictionary will be created.
`tokenizer`
A function to tokenize/normalize/clean-up strings.
If it's ``None`` (default), a default function will be used to tokenize
texts.
`allow_dict_updates`
Indicats if unknown words should be added to the dictionary (default ``True``).
`prefix`
A prefix for the generated file names.
`before_close`
An optional function which is called with the underlying corpus before it is
closed. May be useful to modify the corpus or the Dictionary before changes are
written to disk.
"""
super(CorpusHandler, self).__init__(CableCorpus(path, dct, tokenizer, allow_dict_updates, prefix),
before_close)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests import matchers
from nova.tests.virt.vmwareapi import test_vmwareapi_vm_util
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
CONF = cfg.CONF
class VMwareVifTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVifTestCase, self).setUp()
self.flags(vlan_interface='vmnet0', group='vmware')
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
self.session = test_vmwareapi_vm_util.fake_session()
self.cluster = None
def tearDown(self):
super(VMwareVifTestCase, self).tearDown()
def test_ensure_vlan_bridge(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
# FlatDHCP network mode without vlan - network doesn't exist with the host
def test_ensure_vlan_bridge_without_vlan(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# FlatDHCP network mode without vlan - network exists with the host
# Get vswitch and check vlan interface should not be called
def test_ensure_vlan_bridge_with_network(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
vm_network = {'name': 'VM Network', 'type': 'Network'}
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(vm_network)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# Flat network mode with DVS
def test_ensure_vlan_bridge_with_existing_dvs(self):
network_ref = {'dvpg': 'dvportgroup-2062',
'type': 'DistributedVirtualPortgroup'}
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(network_ref)
self.mox.ReplayAll()
ref = vif.ensure_vlan_bridge(self.session,
self.vif,
create_vlan=False)
self.assertThat(ref, matchers.DictMatches(network_ref))
def test_get_network_ref_neutron(self):
self.mox.StubOutWithMock(vif, 'get_neutron_network')
vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, True)
def test_get_network_ref_flat_dhcp(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=False)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=True)
self.mox.ReplayAll()
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True,
should_create_vlan=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id',
'opaqueNetworkName': 'name',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertEqual('bridge_id', network_ref['network-id'])
def test_get_network_ref_bridges(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'bridge_id2',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id2')
self.assertEqual('bridge_id2', network_ref['network-id'])
def test_get_network_ref_integration(self):
opaque_networks = [{'opaqueNetworkId': 'integration_bridge',
'opaqueNetworkName': 'name',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertEqual('integration_bridge', network_ref['network-id'])
def test_get_network_ref_bridge_none(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'bridge_id2',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertIsNone(network_ref)
def test_get_network_ref_integration_multiple(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'integration_bridge',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertIsNone(network_ref)
def test_get_neutron_network(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vim_util, "get_dynamic_property",
'fake-host', 'HostSystem',
'config.network.opaqueNetwork').AndReturn(opaque)
vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
CONF.vmware.integration_bridge,
self.vif['network']['id']).AndReturn('fake-network-ref')
self.mox.ReplayAll()
network_ref = vif.get_neutron_network(self.session,
self.vif['network']['id'],
self.cluster,
self.vif)
self.assertEqual(network_ref, 'fake-network-ref')
def test_get_neutron_network_opaque_network_not_found(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vim_util, "get_dynamic_property",
'fake-host', 'HostSystem',
'config.network.opaqueNetwork').AndReturn(opaque)
vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
CONF.vmware.integration_bridge,
self.vif['network']['id']).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_neutron_network, self.session,
self.vif['network']['id'], self.cluster, self.vif)
def test_get_neutron_network_bridge_network_not_found(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vim_util, "get_dynamic_property",
'fake-host', 'HostSystem',
'config.network.opaqueNetwork').AndReturn(None)
network_util.get_network_with_the_name(self.session, 0,
self.cluster).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_neutron_network, self.session,
self.vif['network']['id'], self.cluster, self.vif)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Page.published_languages'
db.delete_column(u'cms_page', 'published_languages')
def backwards(self, orm):
# Adding field 'Page.published_languages'
db.add_column(u'cms_page', 'published_languages',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', 'db_table': "u'cmsplugin_placeholderreference'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
|
|
from DBUtil import *
##### junction - 202666904
def createjunction3Data(junction_id):
connection = MongoClient()
db = connection.c3stem_database
db.junction.insert({
"_id": junction_id,
"west_lane_out":"-19501516#11",
"west_lane_out_values":["-19501516#11_0", "-19501516#11_1"],
"west_lane_in":"19501516#11",
"west_lane_in_values":["19501516#11_1","19501516#11_0"],
# left, straight, right
"west_lane_in_adjascent":["-19514291#2", "19501516#12", "19514291#3"],
"east_lane_in":"-19501516#12",
"east_lane_in_values":["-19501516#12_0","-19501516#12_1"],
"east_lane_in_adjascent":["19514291#3", "-19501516#11", "-19514291#2"],
"east_lane_out":"19501516#12",
"east_lane_out_values":["19501516#12_1", "19501516#12_0"],
"north_lane_in":"19514291#2",
"north_lane_in_values":["19514291#2_0","19514291#2_1"],
"north_lane_in_adjascent":["19501516#12", "19514291#3", "-19501516#11"],
"north_lane_out":"-19514291#2",
"north_lane_out_values":["-19514291#2_1","-19514291#2_0"],
"south_lane_out":"19514291#3",
"south_lane_out_values":["19514291#3_0","19514291#3_1"],
"south_lane_in":"-19514291#3",
"south_lane_in_values":["-19514291#3_1", "-19514291#3_0"],
"south_lane_in_adjascent":["-19501516#11", "-19514291#2", "19501516#12"]
})
db.inductionloop.insert({
"_id": "-19501516#11_0_5",
"junction": junction_id,
"location": "west_lane_out",
"pos": 5
})
db.inductionloop.insert({
"_id": "-19501516#11_1_5",
"junction": junction_id,
"location": "west_lane_out",
"pos": 5
})
db.inductionloop.insert({
"_id": "19501516#11_1_-5",
"junction": junction_id,
"location": "west_lane_in",
"pos": -5
})
db.inductionloop.insert({
"_id": "19501516#11_0_-5",
"junction": junction_id,
"location": "west_lane_in",
"pos": -5
})
db.inductionloop.insert({
"_id": "-19501516#12_0_-5",
"junction": junction_id,
"location": "east_lane_in",
"pos": -5
})
db.inductionloop.insert({
"_id": "-19501516#12_1_-5",
"junction": junction_id,
"location": "east_lane_in",
"pos": -5
})
db.inductionloop.insert({
"_id": "19501516#12_1_5",
"junction": junction_id,
"location": "east_lane_out",
"pos": 5
})
db.inductionloop.insert({
"_id": "19501516#12_0_5",
"junction": junction_id,
"location": "east_lane_out",
"pos": 5
})
db.inductionloop.insert({
"_id": "19514291#2_0_-5",
"junction": junction_id,
"location": "north_lane_in",
"pos": -5
})
db.inductionloop.insert({
"_id": "19514291#2_1_-5",
"junction": junction_id,
"location": "north_lane_in",
"pos": -5
})
db.inductionloop.insert({
"_id": "-19514291#2_1_5",
"junction": junction_id,
"location": "north_lane_out",
"pos": 5
})
db.inductionloop.insert({
"_id": "-19514291#2_0_5",
"junction": junction_id,
"location": "north_lane_out",
"pos": 5
})
db.inductionloop.insert({
"_id": "19514291#3_0_5",
"junction": junction_id,
"location": "south_lane_out",
"pos": 5
})
db.inductionloop.insert({
"_id": "19514291#3_1_5",
"junction": junction_id,
"location": "south_lane_out",
"pos": 5
})
db.inductionloop.insert({
"_id": "-19514291#3_1_-5",
"junction": junction_id,
"location": "south_lane_in",
"pos": -5
})
db.inductionloop.insert({
"_id": "-19514291#3_0_-5",
"junction": junction_id,
"location": "south_lane_in",
"pos": -5
})
def createJunction3TurnProbability(junction_id, simulation_id):
connection = MongoClient()
db = connection.c3stem_database
# First intersection
db.turnprobability.insert({
"simulation_id": simulation_id,
"intersection_id": junction_id,
"edge_id": "19501516#11",
"left_turn": "0.2",
"go_straight": "0.6",
"right_turn": "0.2",
"to_edge_left": "-19514291#2",
"to_edge_straight": "19501516#12",
"to_edge_right": "19514291#3"
})
db.turnprobability.insert({
"simulation_id": simulation_id,
"intersection_id": junction_id,
"edge_id": "-19501516#12",
"left_turn": "0.2",
"go_straight": "0.6",
"right_turn": "0.2",
"to_edge_left": "19514291#3",
"to_edge_straight": "-19501516#11",
"to_edge_right": "-19514291#2"
})
db.turnprobability.insert({
"simulation_id": simulation_id,
"intersection_id": junction_id,
"edge_id": "19514291#2",
"left_turn": "0.2",
"go_straight": "0.6",
"right_turn": "0.2",
"to_edge_left": "19501516#12",
"to_edge_straight": "19514291#3",
"to_edge_right": "-19501516#11"
})
db.turnprobability.insert({
"simulation_id": simulation_id,
"intersection_id": junction_id,
"edge_id": "-19514291#3",
"left_turn": "0.2",
"go_straight": "0.6",
"right_turn": "0.2",
"to_edge_left": "-19501516#11",
"to_edge_straight": "-19514291#2",
"to_edge_right": "19501516#12"
})
def createJunction3FlowData(junction_id, simulation_id):
# | B | C
# | |
# | Iwest |
# --A-- O1----------------- O2---D---
# | Ieast |
# | |
# Lsouth| Lnorth Jsouth | Jnorth
# | |
# | Keast |
# --H-- O4----------------- O3---E---
# | Kwest |
# | |
# | G | F
connection = MongoClient()
db = connection.c3stem_database
db.flows.insert({
"point_name": "Jsouth",
"simulation_id": simulation_id,
"intersection_id": junction_id,
"from_edge_id": "19514291#2",
"to_edge_id": "n/a",
"via_edge_id": "19514291#2",
"flow_rate": "600",
"latitude": "35.0339935941766",
"longitude": "-85.27231693267822",
"removable": "0"
})
db.flows.insert({
"point_name": "Jnorth",
"simulation_id": simulation_id,
"intersection_id": junction_id,
"from_edge_id": "19457616#3",
"to_edge_id": "n/a",
"via_edge_id": "19457616#3",
"flow_rate": "600",
"latitude": "36.139572",
"longitude": "-86.810",
"removable": "1"
})
db.flows.insert({
"point_name": "E",
"simulation_id": simulation_id,
"intersection_id": junction_id,
"from_edge_id": "-19501516#12",
"to_edge_id": "n/a",
"via_edge_id": "-19501516#12",
"flow_rate": "600",
"latitude": "35.03179734081907",
"longitude": "-85.27172684669495",
"removable": "0"
})
db.flows.insert({
"point_name": "F",
"simulation_id": simulation_id,
"intersection_id": junction_id,
"from_edge_id": "-19514291#3",
"to_edge_id": "n/a",
"via_edge_id": "-19514291#3",
"flow_rate": "600",
"latitude": "35.031525001289786",
"longitude": "-85.27367949485779",
"removable": "0"
})
db.flows.insert({
"point_name": "Kwest",
"simulation_id": simulation_id,
"intersection_id": junction_id,
"from_edge_id": "-19457616#4",
"to_edge_id": "n/a",
"via_edge_id": "-19457616#4",
"flow_rate": "600",
"latitude": "36.138208",
"longitude": "-86.810903",
"removable": "1"
})
db.flows.insert({
"point_name": "Keast",
"simulation_id": simulation_id,
"intersection_id": junction_id,
"from_edge_id": "19501516#11",
"to_edge_id": "n/a",
"via_edge_id": "19501516#11",
"flow_rate": "600",
"latitude": "35.034424052917046",
"longitude": "-85.27881860733032",
"removable": "0"
})
|
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for database."""
import base64
import hashlib
import six
import mock
from oslo_config import fixture as config_fixture
from oslotest import base
import sqlalchemy.orm
from refstack import db
from refstack.api import constants as api_const
from refstack.db.sqlalchemy import api
from refstack.db.sqlalchemy import models
class DBAPITestCase(base.BaseTestCase):
"""Test case for database API."""
@mock.patch.object(api, 'store_results')
def test_store_results(self, mock_store_results):
db.store_results('fake_results')
mock_store_results.assert_called_once_with('fake_results')
@mock.patch.object(api, 'get_test')
def test_get_test(self, mock_get_test):
db.get_test(12345)
mock_get_test.assert_called_once_with(12345, allowed_keys=None)
@mock.patch.object(api, 'get_test_results')
def test_get_test_results(self, mock_get_test_results):
db.get_test_results(12345)
mock_get_test_results.assert_called_once_with(12345)
@mock.patch.object(api, 'get_test_records')
def test_get_test_records(self, mock_db):
filters = mock.Mock()
db.get_test_records(1, 2, filters)
mock_db.assert_called_once_with(1, 2, filters)
@mock.patch.object(api, 'get_test_records_count')
def test_get_test_records_count(self, mock_db):
filters = mock.Mock()
db.get_test_records_count(filters)
mock_db.assert_called_once_with(filters)
@mock.patch.object(api, 'user_get')
def test_user_get(self, mock_db):
user_openid = '[email protected]'
db.user_get(user_openid)
mock_db.assert_called_once_with(user_openid)
@mock.patch.object(api, 'user_save')
def test_user_save(self, mock_db):
user_info = '[email protected]'
db.user_save(user_info)
mock_db.assert_called_once_with(user_info)
class DBHelpersTestCase(base.BaseTestCase):
"""Test case for database backend helpers."""
@mock.patch.object(api, '_create_facade_lazily')
def test_get_engine(self, mock_create_facade):
facade = mock_create_facade.return_value
facade.get_engine = mock.Mock(return_value='fake_engine')
result = api.get_engine()
mock_create_facade.assert_called_once_with()
facade.get_engine.assert_called_once_with()
self.assertEqual(result, 'fake_engine')
@mock.patch.object(api, '_create_facade_lazily')
def test_get_session(self, mock_create_facade):
facade = mock_create_facade.return_value
facade.get_session = mock.Mock(return_value='fake_session')
fake_kwargs = {'foo': 'bar'}
result = api.get_session(**fake_kwargs)
mock_create_facade.assert_called_once_with()
facade.get_session.assert_called_once_with(**fake_kwargs)
self.assertEqual(result, 'fake_session')
@mock.patch('oslo_db.sqlalchemy.session.EngineFacade.from_config')
def test_create_facade_lazily(self, session):
session.return_value = 'fake_session'
result = api._create_facade_lazily()
self.assertEqual(result, 'fake_session')
class DBBackendTestCase(base.BaseTestCase):
"""Test case for database backend."""
def setUp(self):
super(DBBackendTestCase, self).setUp()
self.config_fixture = config_fixture.Config()
self.CONF = self.useFixture(self.config_fixture).conf
def test_to_dict(self):
fake_query_result = mock.Mock()
fake_query_result.keys.return_value = ('fake_id',)
fake_query_result.index = 1
fake_query_result.fake_id = 12345
self.assertEqual({'fake_id': 12345}, api._to_dict(fake_query_result))
fake_query_result_list = [fake_query_result]
self.assertEqual([{'fake_id': 12345}],
api._to_dict(fake_query_result_list))
fake_query = mock.Mock(spec=sqlalchemy.orm.Query)
fake_query.all.return_value = fake_query_result
self.assertEqual({'fake_id': 12345}, api._to_dict(fake_query))
fake_model = mock.Mock(spec=models.RefStackBase)
fake_model.default_allowed_keys = ('fake_id', 'meta',
'child', 'childs')
fake_child = mock.Mock(spec=models.RefStackBase)
fake_child.iteritems.return_value = {'child_id': 42}.items()
fake_child.default_allowed_keys = ('child_id',)
fake_child.metadata_keys = {}
actuall_dict = {'fake_id': 12345,
'meta': [{'meta_key': 'answer',
'value': 42}],
'child': fake_child,
'childs': [fake_child]}
fake_model.iteritems.return_value = actuall_dict.items()
fake_model.metadata_keys = {'meta': {'key': 'meta_key',
'value': 'value'}}
self.assertEqual({'fake_id': 12345,
'meta': {'answer': 42},
'child': {'child_id': 42},
'childs': [{'child_id': 42}]},
api._to_dict(fake_model))
@mock.patch.object(api, 'get_session')
@mock.patch('refstack.db.sqlalchemy.models.TestResults')
@mock.patch('refstack.db.sqlalchemy.models.Test')
@mock.patch('refstack.db.sqlalchemy.models.TestMeta')
@mock.patch('uuid.uuid4')
def test_store_results(self, mock_uuid, mock_test_meta, mock_test,
mock_test_result, mock_get_session):
fake_tests_result = {
'cpid': 'foo',
'duration_seconds': 10,
'results': [
{'name': 'tempest.some.test'},
{'name': 'tempest.test', 'uid': '12345678'}
],
'meta': {'answer': 42}
}
_id = 12345
mock_uuid.return_value = _id
test = mock_test.return_value
test.save = mock.Mock()
session = mock_get_session.return_value
session.begin = mock.MagicMock()
test_result = mock_test_result.return_value
test_result.save = mock.Mock()
test_id = api.store_results(fake_tests_result)
mock_test.assert_called_once_with()
mock_get_session.assert_called_once_with()
test.save.assert_called_once_with(session)
session.begin.assert_called_once_with()
self.assertEqual(test_id, six.text_type(_id))
self.assertEqual(test.cpid, fake_tests_result['cpid'])
self.assertEqual(test.duration_seconds,
fake_tests_result['duration_seconds'])
self.assertEqual(mock_test_result.call_count,
len(fake_tests_result['results']))
@mock.patch.object(api, 'get_session')
@mock.patch('refstack.db.sqlalchemy.models.Test')
@mock.patch.object(api, '_to_dict', side_effect=lambda x, *args: x)
def test_get_test(self, mock_to_dict, mock_test, mock_get_session):
session = mock_get_session.return_value
session.query = mock.Mock()
query = session.query.return_value
query.filter_by = mock.Mock()
filter_by = query.filter_by.return_value
mock_result = 'fake_test_info'
filter_by.first = mock.Mock(return_value=mock_result)
test_id = 'fake_id'
actual_result = api.get_test(test_id)
mock_get_session.assert_called_once_with()
session.query.assert_called_once_with(mock_test)
query.filter_by.assert_called_once_with(id=test_id)
filter_by.first.assert_called_once_with()
self.assertEqual(mock_result, actual_result)
session = mock_get_session.return_value
session.query = mock.Mock()
query = session.query.return_value
query.filter_by.return_value.first.return_value = None
self.assertRaises(api.NotFound, api.get_test, 'fake_id')
@mock.patch('refstack.db.sqlalchemy.api.models')
@mock.patch.object(api, 'get_session')
def test_delete_test(self, mock_get_session, mock_models):
session = mock_get_session.return_value
test_query = mock.Mock()
test_meta_query = mock.Mock()
test_results_query = mock.Mock()
session.query = mock.Mock(side_effect={
mock_models.Test: test_query,
mock_models.TestMeta: test_meta_query,
mock_models.TestResults: test_results_query
}.get)
db.delete_test('fake_id')
session.begin.assert_called_once_with()
test_query.filter_by.return_value.first\
.assert_called_once_with()
test_meta_query.filter_by.return_value.delete\
.assert_called_once_with()
test_results_query.filter_by.return_value.delete\
.assert_called_once_with()
session.delete.assert_called_once_with(
test_query.filter_by.return_value.first.return_value)
mock_get_session.return_value = mock.MagicMock()
session = mock_get_session.return_value
session.query.return_value\
.filter_by.return_value\
.first.return_value = None
self.assertRaises(api.NotFound, db.delete_test, 'fake_id')
@mock.patch('refstack.db.sqlalchemy.api.models')
@mock.patch.object(api, 'get_session')
def test_get_test_meta_key(self, mock_get_session, mock_models):
session = mock_get_session.return_value
session.query.return_value\
.filter_by.return_value\
.filter_by.return_value\
.first.return_value = mock.Mock(value=42)
self.assertEqual(42, db.get_test_meta_key('fake_id', 'fake_key'))
session.query.return_value\
.filter_by.return_value\
.filter_by.return_value\
.first.return_value = None
self.assertEqual(24, db.get_test_meta_key('fake_id', 'fake_key', 24))
@mock.patch('refstack.db.sqlalchemy.api.models')
@mock.patch.object(api, 'get_session')
def test_save_test_meta_item(self, mock_get_session, mock_models):
session = mock_get_session.return_value
mock_meta_item = mock.Mock()
session.query.return_value\
.filter_by.return_value\
.filter_by.return_value\
.first.return_value = mock_meta_item
db.save_test_meta_item('fake_id', 'fake_key', 42)
self.assertEqual('fake_id', mock_meta_item.test_id)
self.assertEqual('fake_key', mock_meta_item.meta_key)
self.assertEqual(42, mock_meta_item.value)
session.begin.assert_called_once_with()
mock_meta_item.save.assert_called_once_with(session)
session.query.return_value\
.filter_by.return_value\
.filter_by.return_value\
.first.return_value = None
mock_meta_item = mock.Mock()
mock_models.TestMeta.return_value = mock_meta_item
db.save_test_meta_item('fake_id', 'fake_key', 42)
self.assertEqual('fake_id', mock_meta_item.test_id)
self.assertEqual('fake_key', mock_meta_item.meta_key)
self.assertEqual(42, mock_meta_item.value)
@mock.patch('refstack.db.sqlalchemy.api.models')
@mock.patch.object(api, 'get_session')
def test_delete_test_meta_item(self, mock_get_session, mock_models):
session = mock_get_session.return_value
mock_meta_item = mock.Mock()
session.query.return_value\
.filter_by.return_value\
.filter_by.return_value\
.first.return_value = mock_meta_item
db.delete_test_meta_item('fake_id', 'fake_key')
session.begin.assert_called_once_with()
session.delete.assert_called_once_with(mock_meta_item)
session.query.return_value\
.filter_by.return_value\
.filter_by.return_value\
.first.return_value = None
self.assertRaises(db.NotFound,
db.delete_test_meta_item, 'fake_id', 'fake_key')
@mock.patch.object(api, 'get_session')
@mock.patch('refstack.db.sqlalchemy.models.TestResults')
def test_get_test_results(self, mock_test_result, mock_get_session):
mock_test_result.name = mock.Mock()
session = mock_get_session.return_value
session.query = mock.Mock()
query = session.query.return_value
query.filter_by = mock.Mock()
filter_by = query.filter_by.return_value
mock_result = 'fake_test_results'
expected_result = ['fake_test_results']
filter_by.all = mock.Mock(return_value=[mock_result])
test_id = 'fake_id'
actual_result = api.get_test_results(test_id)
mock_get_session.assert_called_once_with()
session.query.assert_called_once_with(mock_test_result)
query.filter_by.assert_called_once_with(test_id=test_id)
filter_by.all.assert_called_once_with()
self.assertEqual(expected_result, actual_result)
@mock.patch('refstack.db.sqlalchemy.models.Test')
@mock.patch('refstack.db.sqlalchemy.models.TestMeta')
def test_apply_filters_for_query_unsigned(self, mock_meta,
mock_test):
query = mock.Mock()
mock_test.created_at = six.text_type()
mock_meta.test_id = six.text_type()
filters = {
api_const.START_DATE: 'fake1',
api_const.END_DATE: 'fake2',
api_const.CPID: 'fake3'
}
unsigned_query = (query
.filter.return_value
.filter.return_value
.filter.return_value)
unsigned_query.session.query.return_value.filter_by.side_effect = (
'signed_results_query', 'shared_results_query'
)
result = api._apply_filters_for_query(query, filters)
query.filter.assert_called_once_with(mock_test.created_at >=
filters[api_const.START_DATE])
query = query.filter.return_value
query.filter.assert_called_once_with(mock_test.created_at <=
filters[api_const.END_DATE])
query = query.filter.return_value
query.filter.assert_called_once_with(mock_test.cpid ==
filters[api_const.CPID])
unsigned_query.session.query.assert_has_calls((
mock.call(mock_meta.test_id),
mock.call().filter_by(meta_key='public_key'),
mock.call(mock_meta.test_id),
mock.call().filter_by(meta_key='shared'),
))
unsigned_query.filter.assert_has_calls((
mock.call(mock_test.id.notin_.return_value),
mock.call(mock_test.id.in_.return_value),
mock.call().union(unsigned_query.filter.return_value)
))
filtered_query = unsigned_query.filter.return_value.union.return_value
self.assertEqual(result, filtered_query)
@mock.patch('refstack.db.sqlalchemy.models.Test')
@mock.patch('refstack.db.sqlalchemy.models.TestMeta')
def test_apply_filters_for_query_signed(self, mock_meta,
mock_test):
query = mock.Mock()
mock_test.created_at = six.text_type()
mock_meta.test_id = six.text_type()
filters = {
api_const.START_DATE: 'fake1',
api_const.END_DATE: 'fake2',
api_const.CPID: 'fake3',
api_const.USER_PUBKEYS: ['fake_pk'],
api_const.SIGNED: 'true'
}
signed_query = (query
.filter.return_value
.filter.return_value
.filter.return_value)
result = api._apply_filters_for_query(query, filters)
signed_query.join.assert_called_once_with(mock_test.meta)
signed_query = signed_query.join.return_value
signed_query.filter.assert_called_once_with(
mock_meta.meta_key == api_const.PUBLIC_KEY
)
signed_query = signed_query.filter.return_value
mock_meta.value.in_.assert_called_once_with(
filters[api_const.USER_PUBKEYS])
signed_query.filter.assert_called_once_with(
mock_meta.value.in_.return_value)
filtered_query = signed_query.filter.return_value
self.assertEqual(result, filtered_query)
@mock.patch.object(api, '_apply_filters_for_query')
@mock.patch.object(api, 'get_session')
@mock.patch('refstack.db.sqlalchemy.models.Test')
def test_get_test_records(self, mock_model,
mock_get_session,
mock_apply):
per_page = 9000
filters = {
api_const.START_DATE: 'fake1',
api_const.END_DATE: 'fake2',
api_const.CPID: 'fake3'
}
session = mock_get_session.return_value
first_query = session.query.return_value
second_query = mock_apply.return_value
ordered_query = second_query.order_by.return_value
query_with_offset = ordered_query.offset.return_value
query_with_offset.limit.return_value.all.return_value = 'fake_uploads'
result = api.get_test_records(2, per_page, filters)
mock_get_session.assert_called_once_with()
session.query.assert_called_once_with(mock_model)
mock_apply.assert_called_once_with(first_query, filters)
second_query.order_by.\
assert_called_once_with(mock_model.created_at.desc())
self.assertEqual(result, 'fake_uploads')
ordered_query.offset.assert_called_once_with(per_page)
query_with_offset.limit.assert_called_once_with(per_page)
@mock.patch.object(api, '_apply_filters_for_query')
@mock.patch.object(api, 'get_session')
@mock.patch('refstack.db.sqlalchemy.models.Test')
def test_get_test_records_count(self, mock_model,
mock_get_session,
mock_apply):
filters = mock.Mock()
session = mock_get_session.return_value
query = session.query.return_value
apply_result = mock_apply.return_value
apply_result.count.return_value = 999
result = api.get_test_records_count(filters)
self.assertEqual(result, 999)
session.query.assert_called_once_with(mock_model.id)
mock_apply.assert_called_once_with(query, filters)
apply_result.count.assert_called_once_with()
@mock.patch.object(api, 'get_session',
return_value=mock.Mock(name='session'),)
@mock.patch('refstack.db.sqlalchemy.models.User')
def test_user_get(self, mock_model, mock_get_session):
user_openid = '[email protected]'
session = mock_get_session.return_value
query = session.query.return_value
filtered = query.filter_by.return_value
user = filtered.first.return_value
result = api.user_get(user_openid)
self.assertEqual(result, user)
session.query.assert_called_once_with(mock_model)
query.filter_by.assert_called_once_with(openid=user_openid)
filtered.first.assert_called_once_with()
@mock.patch.object(api, 'get_session',
return_value=mock.Mock(name='session'),)
@mock.patch('refstack.db.sqlalchemy.models.User')
def test_user_get_none(self, mock_model, mock_get_session):
user_openid = '[email protected]'
session = mock_get_session.return_value
query = session.query.return_value
filtered = query.filter_by.return_value
filtered.first.return_value = None
self.assertRaises(api.NotFound, api.user_get, user_openid)
@mock.patch.object(api, 'get_session')
@mock.patch('refstack.db.sqlalchemy.models.User')
@mock.patch.object(api, 'user_get', side_effect=api.NotFound('User'))
def test_user_update_or_create(self, mock_get_user, mock_model,
mock_get_session):
user_info = {'openid': '[email protected]'}
session = mock_get_session.return_value
user = mock_model.return_value
result = api.user_save(user_info)
self.assertEqual(result, user)
mock_model.assert_called_once_with()
mock_get_session.assert_called_once_with()
user.save.assert_called_once_with(session=session)
user.update.assert_called_once_with(user_info)
session.begin.assert_called_once_with()
@mock.patch.object(api, 'get_session')
@mock.patch('refstack.db.sqlalchemy.api.models')
def test_store_pubkey(self, mock_models, mock_get_session):
session = mock_get_session.return_value
pubkey_info = {
'openid': 'fake_id',
'format': 'ssh-rsa',
'pubkey': 'cHV0aW4gaHVpbG8=',
'comment': 'comment'
}
mock_pubkey = mock.Mock()
mock_pubkey.id = 42
mock_models.PubKey.return_value = mock_pubkey
session.query.return_value\
.filter_by.return_value\
.filter_by.return_value\
.all.return_value = None
self.assertEqual(42, db.store_pubkey(pubkey_info))
self.assertEqual('fake_id', mock_pubkey.openid)
self.assertEqual('ssh-rsa', mock_pubkey.format)
self.assertEqual('cHV0aW4gaHVpbG8=', mock_pubkey.pubkey)
self.assertEqual(
hashlib.md5(
base64.b64decode('cHV0aW4gaHVpbG8='.encode('ascii'))
).hexdigest(),
'3b30cd2bdac1eeb7e92dfc983bf5f943'
)
mock_pubkey.save.assert_called_once_with(session)
session.query.return_value\
.filter_by.return_value\
.filter_by.return_value\
.all.return_value = mock_pubkey
self.assertRaises(db.Duplication,
db.store_pubkey, pubkey_info)
@mock.patch.object(api, 'get_session')
@mock.patch('refstack.db.sqlalchemy.api.models')
def test_delete_pubkey(self, mock_models, mock_get_session):
session = mock_get_session.return_value
db.delete_pubkey('key_id')
key = session\
.query.return_value\
.filter_by.return_value\
.first.return_value
session.query.assert_called_once_with(mock_models.PubKey)
session.query.return_value.filter_by.assert_called_once_with(
id='key_id')
session.delete.assert_called_once_with(key)
session.begin.assert_called_once_with()
@mock.patch.object(api, 'get_session')
@mock.patch('refstack.db.sqlalchemy.api.models')
@mock.patch.object(api, '_to_dict', side_effect=lambda x: x)
def test_get_user_pubkeys(self, mock_to_dict, mock_models,
mock_get_session):
session = mock_get_session.return_value
actual_keys = db.get_user_pubkeys('user_id')
keys = session \
.query.return_value \
.filter_by.return_value \
.all.return_value
session.query.assert_called_once_with(mock_models.PubKey)
session.query.return_value.filter_by.assert_called_once_with(
openid='user_id')
self.assertEqual(keys, actual_keys)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.